diff --git "a/aaai/aaai2023.json" "b/aaai/aaai2023.json" new file mode 100644--- /dev/null +++ "b/aaai/aaai2023.json" @@ -0,0 +1,64608 @@ +[ + { + "id": "article-25365", + "title": "3D Assembly Completion", + "track": "main", + "status": "Technical", + "abstract": "Automatic assembly is a promising research topic in 3D computer vision and robotics. Existing works focus on generating assembly (e.g., IKEA furniture) from scratch with a set of parts, namely 3D part assembly. In practice, there are higher demands for the robot to take over and finish an incomplete assembly (e.g., a half-assembled IKEA furniture) with an off-the-shelf toolkit, especially in human-robot and multi-agent collaborations. Compared to 3D part assembly, it is more complicated in nature and remains unexplored yet. The robot must understand the incomplete structure, infer what parts are missing, single out the correct parts from the toolkit and finally, assemble them with appropriate poses to finish the incomplete assembly. Geometrically similar parts in the toolkit can interfere, and this problem will be exacerbated with more missing parts. To tackle this issue, we propose a novel task called 3D assembly completion. Given an incomplete assembly, it aims to find its missing parts from a toolkit and predict the 6-DoF poses to make the assembly complete. To this end, we propose FiT, a framework for Finishing the incomplete 3D assembly with Transformer. We employ the encoder to model the incomplete assembly into memories. Candidate parts interact with memories in a memory-query paradigm for final candidate classification and pose prediction. Bipartite part matching and symmetric transformation consistency are embedded to refine the completion. For reasonable evaluation and further reference, we design two standard toolkits of different difficulty, containing different compositions of candidate parts. We conduct extensive comparisons with several baseline methods and ablation studies, demonstrating the effectiveness of the proposed method.", + "primary_area": "computer vision iii", + "author": "Weihao Wang; Rufeng Zhang; Mingyu You; Hongjun Zhou; Bin He", + "authorids": "", + "aff": "College of Electronic and Information Engineering, Tongji University, Shanghai 201804 China; College of Electronic and Information Engineering, Tongji University, Shanghai 201804 China; College of Electronic and Information Engineering, Tongji University, Shanghai 201804 China; College of Electronic and Information Engineering, Tongji University, Shanghai 201804 China; College of Electronic and Information Engineering, Tongji University, Shanghai 201804 China", + "bibtex": "@article{Wang_Zhang_You_Zhou_He_2023, title={3D Assembly Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25365}, DOI={10.1609/aaai.v37i3.25365}, abstractNote={Automatic assembly is a promising research topic in 3D computer vision and robotics. Existing works focus on generating assembly (e.g., IKEA furniture) from scratch with a set of parts, namely 3D part assembly. In practice, there are higher demands for the robot to take over and finish an incomplete assembly (e.g., a half-assembled IKEA furniture) with an off-the-shelf toolkit, especially in human-robot and multi-agent collaborations. Compared to 3D part assembly, it is more complicated in nature and remains unexplored yet. The robot must understand the incomplete structure, infer what parts are missing, single out the correct parts from the toolkit and finally, assemble them with appropriate poses to finish the incomplete assembly. Geometrically similar parts in the toolkit can interfere, and this problem will be exacerbated with more missing parts. To tackle this issue, we propose a novel task called 3D assembly completion. Given an incomplete assembly, it aims to find its missing parts from a toolkit and predict the 6-DoF poses to make the assembly complete. To this end, we propose FiT, a framework for Finishing the incomplete 3D assembly with Transformer. We employ the encoder to model the incomplete assembly into memories. Candidate parts interact with memories in a memory-query paradigm for final candidate classification and pose prediction. Bipartite part matching and symmetric transformation consistency are embedded to refine the completion. For reasonable evaluation and further reference, we design two standard toolkits of different difficulty, containing different compositions of candidate parts. We conduct extensive comparisons with several baseline methods and ablation studies, demonstrating the effectiveness of the proposed method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Weihao and Zhang, Rufeng and You, Mingyu and Zhou, Hongjun and He, Bin}, year={2023}, month={Jun.}, pages={2663-2671} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25365/25137", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25365", + "pdf_size": 1626815, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5711081723538097784&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "tongji.edu.cn;foxmail.com;tongji.edu.cn;tongji.edu.cn;tongji.edu.cn", + "email": "tongji.edu.cn;foxmail.com;tongji.edu.cn;tongji.edu.cn;tongji.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Tongji University", + "aff_unique_dep": "College of Electronic and Information Engineering", + "aff_unique_url": "https://www.tongji.edu.cn", + "aff_unique_abbr": "Tongji", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25192", + "title": "3D Human Pose Lifting with Grid Convolution", + "track": "main", + "status": "Technical", + "abstract": "Existing lifting networks for regressing 3D human poses from 2D single-view poses are typically constructed with linear layers based on graph-structured representation learning. In sharp contrast to them, this paper presents Grid Convolution (GridConv), mimicking the wisdom of regular convolution operations in image space. GridConv is based on a novel Semantic Grid Transformation (SGT) which leverages a binary assignment matrix to map the irregular graph-structured human pose onto a regular weave-like grid pose representation joint by joint, enabling layer-wise feature learning with GridConv operations. We provide two ways to implement SGT, including handcrafted and learnable designs. Surprisingly, both designs turn out to achieve promising results and the learnable one is better, demonstrating the great potential of this new lifting representation learning formulation. To improve the ability of GridConv to encode contextual cues, we introduce an attention module over the convolutional kernel, making grid convolution operations input-dependent, spatial-aware and grid-specific. We show that our fully convolutional grid lifting network outperforms state-of-the-art methods with noticeable margins under (1) conventional evaluation on Human3.6M and (2) cross-evaluation on MPI-INF-3DHP. Code is available at https://github.com/OSVAI/GridConv.", + "primary_area": "computer vision i", + "author": "Yangyuxuan Kang; Yuyang Liu; Anbang Yao; Shandong Wang; Enhua Wu", + "authorids": "", + "aff": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Tsinghua University; Intel Labs China; Intel Labs China; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences + Faculty of Science and Technology, University of Macau", + "bibtex": "@article{Kang_Liu_Yao_Wang_Wu_2023, title={3D Human Pose Lifting with Grid Convolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25192}, DOI={10.1609/aaai.v37i1.25192}, abstractNote={Existing lifting networks for regressing 3D human poses from 2D single-view poses are typically constructed with linear layers based on graph-structured representation learning. In sharp contrast to them, this paper presents Grid Convolution (GridConv), mimicking the wisdom of regular convolution operations in image space. GridConv is based on a novel Semantic Grid Transformation (SGT) which leverages a binary assignment matrix to map the irregular graph-structured human pose onto a regular weave-like grid pose representation joint by joint, enabling layer-wise feature learning with GridConv operations. We provide two ways to implement SGT, including handcrafted and learnable designs. Surprisingly, both designs turn out to achieve promising results and the learnable one is better, demonstrating the great potential of this new lifting representation learning formulation. To improve the ability of GridConv to encode contextual cues, we introduce an attention module over the convolutional kernel, making grid convolution operations input-dependent, spatial-aware and grid-specific. We show that our fully convolutional grid lifting network outperforms state-of-the-art methods with noticeable margins under (1) conventional evaluation on Human3.6M and (2) cross-evaluation on MPI-INF-3DHP. Code is available at https://github.com/OSVAI/GridConv.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Yangyuxuan and Liu, Yuyang and Yao, Anbang and Wang, Shandong and Wu, Enhua}, year={2023}, month={Jun.}, pages={1105-1113} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25192/24964", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25192", + "pdf_size": 3720252, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14665627154458435137&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ios.ac.cn;mails.tsinghua.edu.cn; fanbang.yao;intel.com;um.edu.mo", + "email": "ios.ac.cn;mails.tsinghua.edu.cn; fanbang.yao;intel.com;um.edu.mo", + "github": "https://github.com/OSV AI/GridConv", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;3;3;0+1+4", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tsinghua University;Intel Corporation;University of Macau", + "aff_unique_dep": "Institute of Software;;;Intel Labs;Faculty of Science and Technology", + "aff_unique_url": "http://www.ios.ac.cn;http://www.ucas.ac.cn;https://www.tsinghua.edu.cn;https://www.intel.cn;https://www.um.edu.mo", + "aff_unique_abbr": "CAS;UCAS;THU;Intel;UM", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+0+1", + "aff_country_unique": "China;Macau" + }, + { + "id": "article-25186", + "title": "3D-TOGO: Towards Text-Guided Cross-Category 3D Object Generation", + "track": "main", + "status": "Technical", + "abstract": "", + "primary_area": "computer vision i", + "author": "Zutao Jiang; Guansong Lu; Xiaodan Liang; Jihua Zhu; Wei Zhang; Xiaojun Chang; Hang Xu", + "authorids": "", + "aff": "School of Software Engineering, Xi\u2019an Jiaotong University; Huawei Noah\u2019s Ark Lab; Sun Yat-sen University + MBZUAI; School of Software Engineering, Xi\u2019an Jiaotong University; Huawei Noah\u2019s Ark Lab; ReLER, AAII, University of Technology Sydney; Huawei Noah\u2019s Ark Lab + PengCheng Laboratory", + "bibtex": "@article{Jiang_Lu_Liang_Zhu_Zhang_Chang_Xu_2023, title={3D-TOGO: Towards Text-Guided Cross-Category 3D Object Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25186}, DOI={10.1609/aaai.v37i1.25186}, abstractNote={<p><em>This article has been updated and an error has been fixed in published paper. An <a href="https://doi.org/10.1609/aaai.v37i13.27320">Erratum</a> to this article was published on 6 September 2023. </em></p>\n<p>Text-guided 3D object generation aims to generate 3D objects described by user-defined captions, which paves a flexible way to visualize what we imagined. Although some works have been devoted to solving this challenging task, these works either utilize some explicit 3D representations (e.g., mesh), which lack texture and require post-processing for rendering photo-realistic views; or require individual time-consuming optimization for every single case. Here, we make the first attempt to achieve generic text-guided cross-category 3D object generation via a new 3D-TOGO model, which integrates a text-to-views generation module and a views-to-3D generation module. The text-to-views generation module is designed to generate different views of the target 3D object given an input caption. prior-guidance, caption-guidance and view contrastive learning are proposed for achieving better view-consistency and caption similarity. Meanwhile, a pixelNeRF model is adopted for the views-to-3D generation module to obtain the implicit 3D neural representation from the previously-generated views. Our 3D-TOGO model generates 3D objects in the form of the neural radiance field with good texture and requires no time-cost optimization for every single caption. Besides, 3D-TOGO can control the category, color and shape of generated 3D objects with the input caption. Extensive experiments on the largest 3D object dataset (i.e., ABO) are conducted to verify that 3D-TOGO can better generate high-quality 3D objects according to the input captions across 98 different categories, in terms of PSNR, SSIM, LPIPS and CLIP-score, compared with text-NeRF and Dreamfields.</p>}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Zutao and Lu, Guansong and Liang, Xiaodan and Zhu, Jihua and Zhang, Wei and Chang, Xiaojun and Xu, Hang}, year={2023}, month={Sep.}, pages={1051-1059} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25186/27093", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25186", + "pdf_size": 1994705, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12198887688357333509&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;huawei.com;gmail.com;xjtu.edu.cn;huawei.com;uts.edu.au;gmail.com", + "email": "gmail.com;huawei.com;gmail.com;xjtu.edu.cn;huawei.com;uts.edu.au;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2+3;0;1;4;1+5", + "aff_unique_norm": "Xi'an Jiaotong University;Huawei;Sun Yat-sen University;Mohamed Bin Zayed University of Artificial Intelligence;University of Technology Sydney;PengCheng Laboratory", + "aff_unique_dep": "School of Software Engineering;Noah\u2019s Ark Lab;;;;", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.huawei.com;http://www.sysu.edu.cn/;https://www.mbzuai.ac.ae;https://www.uts.edu.au;http://www.pcl.ac.cn", + "aff_unique_abbr": "XJTU;Huawei;SYSU;MBZUAI;UTS;", + "aff_campus_unique_index": "0;;0;", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0+1;0;0;2;0+0", + "aff_country_unique": "China;United Arab Emirates;Australia" + }, + { + "id": "article-25366", + "title": "A Benchmark and Asymmetrical-Similarity Learning for Practical Image Copy Detection", + "track": "main", + "status": "Technical", + "abstract": "Image copy detection (ICD) aims to determine whether a query image is an edited copy of any image from a reference set. Currently, there are very limited public benchmarks for ICD, while all overlook a critical challenge in real-world applications, i.e., the distraction from hard negative queries. Specifically, some queries are not edited copies but are inherently similar to some reference images. These hard negative queries are easily false recognized as edited copies, significantly compromising the ICD accuracy. This observation motivates us to build the first ICD benchmark featuring this characteristic. Based on existing ICD datasets, this paper constructs a new dataset by additionally adding 100,000 and 24, 252 hard negative pairs into the training and test set, respectively. Moreover, this paper further reveals a unique difficulty for solving the hard negative problem in ICD, i.e., there is a fundamental conflict between current metric learning and ICD. This conflict is: the metric learning adopts symmetric distance while the edited copy is an asymmetric (unidirectional) process, e.g., a partial crop is close to its holistic reference image and is an edited copy, while the latter cannot be the edited copy of the former (in spite the distance is equally small). This insight results in an Asymmetrical-Similarity Learning (ASL) method, which allows the similarity in two directions (the query \u2194 the reference image) to be different from each other. Experimental results show that ASL outperforms state-of-the-art methods by a clear margin, confirming that solving the symmetric-asymmetric conflict is critical for ICD. The NDEC dataset and code are available at https://github.com/WangWenhao0716/ASL.", + "primary_area": "computer vision iii", + "author": "Wenhao Wang; Yifan Sun; Yi Yang", + "authorids": "", + "aff": "ReLER, University of Technology Sydney + Baidu Research; Baidu Research; Zhejiang University", + "bibtex": "@article{Wang_Sun_Yang_2023, title={A Benchmark and Asymmetrical-Similarity Learning for Practical Image Copy Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25366}, DOI={10.1609/aaai.v37i3.25366}, abstractNote={Image copy detection (ICD) aims to determine whether a query image is an edited copy of any image from a reference set. Currently, there are very limited public benchmarks for ICD, while all overlook a critical challenge in real-world applications, i.e., the distraction from hard negative queries. Specifically, some queries are not edited copies but are inherently similar to some reference images. These hard negative queries are easily false recognized as edited copies, significantly compromising the ICD accuracy. This observation motivates us to build the first ICD benchmark featuring this characteristic. Based on existing ICD datasets, this paper constructs a new dataset by additionally adding 100,000 and 24, 252 hard negative pairs into the training and test set, respectively. Moreover, this paper further reveals a unique difficulty for solving the hard negative problem in ICD, i.e., there is a fundamental conflict between current metric learning and ICD. This conflict is: the metric learning adopts symmetric distance while the edited copy is an asymmetric (unidirectional) process, e.g., a partial crop is close to its holistic reference image and is an edited copy, while the latter cannot be the edited copy of the former (in spite the distance is equally small). This insight results in an Asymmetrical-Similarity Learning (ASL) method, which allows the similarity in two directions (the query \u2194 the reference image) to be different from each other. Experimental results show that ASL outperforms state-of-the-art methods by a clear margin, confirming that solving the symmetric-asymmetric conflict is critical for ICD. The NDEC dataset and code are available at https://github.com/WangWenhao0716/ASL.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Wenhao and Sun, Yifan and Yang, Yi}, year={2023}, month={Jun.}, pages={2672-2679} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25366/25138", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25366", + "pdf_size": 11834673, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11504322484679819116&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;baidu.com;zju.edu.cn", + "email": "gmail.com;baidu.com;zju.edu.cn", + "github": "https://github.com/WangWenhao0716/ASL", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;2", + "aff_unique_norm": "University of Technology Sydney;Baidu;Zhejiang University", + "aff_unique_dep": "ReLER;Baidu Research;", + "aff_unique_url": "https://www.uts.edu.au;https://research.baidu.com;https://www.zju.edu.cn", + "aff_unique_abbr": "UTS;Baidu;ZJU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Sydney;", + "aff_country_unique_index": "0+1;1;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26681", + "title": "A Composite Multi-Attention Framework for Intraoperative Hypotension Early Warning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Intraoperative hypotension (IOH) events warning plays a crucial role in preventing postoperative complications, such as postoperative delirium and mortality. Despite significant efforts, two fundamental problems limit its wide clinical use. The well-established IOH event warning systems are often built on proprietary medical devices that may not be available in all hospitals. The warnings are also triggered mainly through a predefined IOH event that might not be suitable for all patients. This work proposes a composite multi-attention (CMA) framework to tackle these problems by conducting short-term predictions on user-definable IOH events using vital signals in a low sampling rate with demographic characteristics. Our framework leverages a multi-modal fusion network to make four vital signals and three demographic characteristics as input modalities. For each modality, a multi-attention mechanism is used for feature extraction for better model training. Experiments on two large-scale real-world data sets show that our method can achieve up to 94.1% accuracy on IOH events early warning while the signals sampling rate is reduced by 3000 times. Our proposal CMA can achieve a mean absolute error of 4.50 mm Hg in the most challenging 15-minute mean arterial pressure prediction task and the error reduction by 42.9% compared to existing solutions.", + "primary_area": "ai for social impact", + "author": "Feng Lu; Wei Li; Zhiqiang Zhou; Cheng Song; Yifei Sun; Yuwei Zhang; Yufei Ren; Xiaofei Liao; Hai Jin; Ailin Luo; Albert Y. Zomaya", + "authorids": "", + "aff": "National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; The Australia-China Joint Research Centre for Energy Informatics and Demand Response Technologies, Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia; Tongji Hospital, Tongji Medical College, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Tongji Hospital, Tongji Medical College, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Tongji Hospital, Tongji Medical College, Huazhong University of Science and Technology, China; The Australia-China Joint Research Centre for Energy Informatics and Demand Response Technologies, Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia", + "bibtex": "@article{Lu_Li_Zhou_Song_Sun_Zhang_Ren_Liao_Jin_Luo_Zomaya_2023, title={A Composite Multi-Attention Framework for Intraoperative Hypotension Early Warning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26681}, DOI={10.1609/aaai.v37i12.26681}, abstractNote={Intraoperative hypotension (IOH) events warning plays a crucial role in preventing postoperative complications, such as postoperative delirium and mortality. Despite significant efforts, two fundamental problems limit its wide clinical use. The well-established IOH event warning systems are often built on proprietary medical devices that may not be available in all hospitals. The warnings are also triggered mainly through a predefined IOH event that might not be suitable for all patients. This work proposes a composite multi-attention (CMA) framework to tackle these problems by conducting short-term predictions on user-definable IOH events using vital signals in a low sampling rate with demographic characteristics. Our framework leverages a multi-modal fusion network to make four vital signals and three demographic characteristics as input modalities. For each modality, a multi-attention mechanism is used for feature extraction for better model training. Experiments on two large-scale real-world data sets show that our method can achieve up to 94.1% accuracy on IOH events early warning while the signals sampling rate is reduced by 3000 times. Our proposal CMA can achieve a mean absolute error of 4.50 mm Hg in the most challenging 15-minute mean arterial pressure prediction task and the error reduction by 42.9% compared to existing solutions.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Feng and Li, Wei and Zhou, Zhiqiang and Song, Cheng and Sun, Yifei and Zhang, Yuwei and Ren, Yufei and Liao, Xiaofei and Jin, Hai and Luo, Ailin and Zomaya, Albert Y.}, year={2023}, month={Jun.}, pages={14374-14381} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26681/26453", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26681", + "pdf_size": 6883300, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14260058662257444230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "hust.edu.cn;sydney.edu.au;tjh.tjmu.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;tjh.tjmu.edu.cn;hust.edu.cn;hust.edu.cn;tjh.tjmu.edu.cn;sydney.edu.au", + "email": "hust.edu.cn;sydney.edu.au;tjh.tjmu.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;tjh.tjmu.edu.cn;hust.edu.cn;hust.edu.cn;tjh.tjmu.edu.cn;sydney.edu.au", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;1;0;0;0;0;0;0;0;0;1", + "aff_unique_norm": "Huazhong University of Science and Technology;The University of Sydney", + "aff_unique_dep": "School of Computer Science and Technology;School of Computer Science", + "aff_unique_url": "http://www.hust.edu.cn;https://www.sydney.edu.au", + "aff_unique_abbr": "HUST;USYD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0;0;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26709", + "title": "A Continual Pre-training Approach to Tele-Triaging Pregnant Women in Kenya", + "track": "aaai special track", + "status": "Technical", + "abstract": "Access to high-quality maternal health care services is limited in Kenya, which resulted in \u223c36,000 maternal and neonatal deaths in 2018. To tackle this challenge, Jacaranda Health (a non-profit organization working on maternal health in Kenya) developed PROMPTS, an SMS based tele-triage system for pregnant and puerperal women, which has more than 350,000 active users in Kenya. PROMPTS empowers pregnant women living far away from doctors and hospitals to send SMS messages to get quick answers (through human helpdesk agents) to questions about their medical symptoms and pregnancy status. Unfortunately, \u223c1.1 million SMS messages are received by PROMPTS every month, which makes it challenging for helpdesk agents to ensure that these messages can be interpreted correctly and evaluated by their level of emergency to ensure timely responses and/or treatments for women in need. This paper reports on a collaborative effort with Jacaranda Health to develop a state-of-the-art natural language processing (NLP) framework, TRIM-AI (TRIage for Mothers using AI), which can automatically predict the emergency level (or severity of medical condition) of a pregnant mother based on the content of their SMS messages. TRIM-AI leverages recent advances in multi-lingual pre-training and continual pre-training to tackle code-mixed SMS messages (between English and Swahili), and achieves a weighted F1 score of 0.774 on real-world datasets. TRIM-AI has been successfully deployed in the field since June 2022, and is being used by Jacaranda Health to prioritize the provision of services and care to pregnant women with the most critical medical conditions. Our preliminary A/B tests in the field show that TRIM-AI is \u223c17% more accurate at predicting high-risk medical conditions from SMS messages sent by pregnant Kenyan mothers, which reduces the helpdesk\u2019s workload by \u223c12%.", + "primary_area": "ai for social impact", + "author": "Wenbo Zhang; Hangzhi Guo; Prerna Ranganathan; Jay Patel; Sathyanath Rajasekharan; Nidhi Danayak; Manan Gupta; Amulya Yadav", + "authorids": "", + "aff": "Pennsylvania State University; Pennsylvania State University; Pennsylvania State University; Jacaranda Health; Jacaranda Health; Pennsylvania State University; Pennsylvania State University; Pennsylvania State University", + "bibtex": "@article{Zhang_Guo_Ranganathan_Patel_Rajasekharan_Danayak_Gupta_Yadav_2023, title={A Continual Pre-training Approach to Tele-Triaging Pregnant Women in Kenya}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26709}, DOI={10.1609/aaai.v37i12.26709}, abstractNote={Access to high-quality maternal health care services is limited in Kenya, which resulted in \u223c36,000 maternal and neonatal deaths in 2018. To tackle this challenge, Jacaranda Health (a non-profit organization working on maternal health in Kenya) developed PROMPTS, an SMS based tele-triage system for pregnant and puerperal women, which has more than 350,000 active users in Kenya. PROMPTS empowers pregnant women living far away from doctors and hospitals to send SMS messages to get quick answers (through human helpdesk agents) to questions about their medical symptoms and pregnancy status. Unfortunately, \u223c1.1 million SMS messages are received by PROMPTS every month, which makes it challenging for helpdesk agents to ensure that these messages can be interpreted correctly and evaluated by their level of emergency to ensure timely responses and/or treatments for women in need. This paper reports on a collaborative effort with Jacaranda Health to develop a state-of-the-art natural language processing (NLP) framework, TRIM-AI (TRIage for Mothers using AI), which can automatically predict the emergency level (or severity of medical condition) of a pregnant mother based on the content of their SMS messages. TRIM-AI leverages recent advances in multi-lingual pre-training and continual pre-training to tackle code-mixed SMS messages (between English and Swahili), and achieves a weighted F1 score of 0.774 on real-world datasets. TRIM-AI has been successfully deployed in the field since June 2022, and is being used by Jacaranda Health to prioritize the provision of services and care to pregnant women with the most critical medical conditions. Our preliminary A/B tests in the field show that TRIM-AI is \u223c17% more accurate at predicting high-risk medical conditions from SMS messages sent by pregnant Kenyan mothers, which reduces the helpdesk\u2019s workload by \u223c12%.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Wenbo and Guo, Hangzhi and Ranganathan, Prerna and Patel, Jay and Rajasekharan, Sathyanath and Danayak, Nidhi and Gupta, Manan and Yadav, Amulya}, year={2023}, month={Jun.}, pages={14620-14627} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26709/26481", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26709", + "pdf_size": 551103, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1989352764940688795&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff_domain": "psu.edu;psu.edu;psu.edu;jacarandahealth.org;jacarandahealth.org;psu.edu;psu.edu;psu.edu", + "email": "psu.edu;psu.edu;psu.edu;jacarandahealth.org;jacarandahealth.org;psu.edu;psu.edu;psu.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;1;0;0;0", + "aff_unique_norm": "Pennsylvania State University;Jacaranda Health", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.psu.edu;https://www.jacarandahealth.org", + "aff_unique_abbr": "PSU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;1;0;0;0", + "aff_country_unique": "United States;Kenya" + }, + { + "id": "article-26074", + "title": "A Coreset Learning Reality Check", + "track": "main", + "status": "Technical", + "abstract": "Subsampling algorithms are a natural approach to reduce data size before fitting models on massive datasets. In recent years, several works have proposed methods for subsampling rows from a data matrix while maintaining relevant information for classification. While these works are supported by theory and limited experiments, to date there has not been a comprehensive evaluation of these methods. In our work, we directly compare multiple methods for logistic regression drawn from the coreset and optimal subsampling literature and discover inconsistencies in their effectiveness. In many cases, methods do not outperform simple uniform subsampling.", + "primary_area": "machine learning ii", + "author": "Fred Lu; Edward Raff; James Holt", + "authorids": "", + "aff": "Booz Allen Hamilton + University of Maryland, Baltimore County + Laboratory for Physical Sciences; Booz Allen Hamilton + University of Maryland, Baltimore County + Laboratory for Physical Sciences; Laboratory for Physical Sciences", + "bibtex": "@article{Lu_Raff_Holt_2023, title={A Coreset Learning Reality Check}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26074}, DOI={10.1609/aaai.v37i7.26074}, abstractNote={Subsampling algorithms are a natural approach to reduce data size before fitting models on massive datasets. In recent years, several works have proposed methods for subsampling rows from a data matrix while maintaining relevant information for classification. While these works are supported by theory and limited experiments, to date there has not been a comprehensive evaluation of these methods. In our work, we directly compare multiple methods for logistic regression drawn from the coreset and optimal subsampling literature and discover inconsistencies in their effectiveness. In many cases, methods do not outperform simple uniform subsampling.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Fred and Raff, Edward and Holt, James}, year={2023}, month={Jun.}, pages={8940-8948} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26074/25846", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26074", + "pdf_size": 2460334, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17563874767333992263&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "bah.com;bah.com;lps.umd.edu", + "email": "bah.com;bah.com;lps.umd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+2;0+1+2;2", + "aff_unique_norm": "Booz Allen Hamilton;University of Maryland, Baltimore County;Laboratory for Physical Sciences", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.boozallen.com;https://www.umbc.edu;", + "aff_unique_abbr": "BAH;UMBC;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Baltimore County", + "aff_country_unique_index": "0+0+0;0+0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26711", + "title": "A Crowd-AI Collaborative Duo Relational Graph Learning Framework towards Social Impact Aware Photo Classification", + "track": "aaai special track", + "status": "Technical", + "abstract": "In artificial intelligence (AI), negative social impact (NSI) represents the negative effect on the society as a result of mistakes conducted by AI agents. While the photo classification problem has been widely studied in the AI community, the NSI made by photo misclassification is largely ignored due to the lack of quantitative measurements of the NSI and effective approaches to reduce it. In this paper, we focus on an NSI-aware photo classification problem where the goal is to develop a novel crowd-AI collaborative learning framework that leverages online crowd workers to quantitatively estimate and effectively reduce the NSI of misclassified photos. Our problem is motivated by the limitations of current NSI-aware photo classification approaches that either 1) cannot accurately estimate NSI because they simply model NSI as the semantic difference between true and misclassified categories or 2) require costly human annotations to estimate NSI of pairwise class categories. To address such limitations, we develop SocialCrowd, a crowdsourcing-based NSI-aware photo classification framework that explicitly reduces the NSI of photo misclassification by designing a duo relational NSI-aware graph with the NSI estimated by online crowd workers. The evaluation results on two large-scale image datasets show that SocialCrowd not only reduces the NSI of photo misclassification but also improves the classification accuracy on both datasets.", + "primary_area": "ai for social impact", + "author": "Yang Zhang; Ziyi Kou; Lanyu Shang; Huimin Zeng; Zhenrui Yue; Dong Wang", + "authorids": "", + "aff": "School of Information Sciences, University of Illinois Urbana-Champaign; Department of Computer Science and Engineering, University of Notre Dame; School of Information Sciences, University of Illinois Urbana-Champaign; School of Information Sciences, University of Illinois Urbana-Champaign; School of Information Sciences, University of Illinois Urbana-Champaign; School of Information Sciences, University of Illinois Urbana-Champaign", + "bibtex": "@article{Zhang_Kou_Shang_Zeng_Yue_Wang_2023, title={A Crowd-AI Collaborative Duo Relational Graph Learning Framework towards Social Impact Aware Photo Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26711}, DOI={10.1609/aaai.v37i12.26711}, abstractNote={In artificial intelligence (AI), negative social impact (NSI) represents the negative effect on the society as a result of mistakes conducted by AI agents. While the photo classification problem has been widely studied in the AI community, the NSI made by photo misclassification is largely ignored due to the lack of quantitative measurements of the NSI and effective approaches to reduce it. In this paper, we focus on an NSI-aware photo classification problem where the goal is to develop a novel crowd-AI collaborative learning framework that leverages online crowd workers to quantitatively estimate and effectively reduce the NSI of misclassified photos. Our problem is motivated by the limitations of current NSI-aware photo classification approaches that either 1) cannot accurately estimate NSI because they simply model NSI as the semantic difference between true and misclassified categories or 2) require costly human annotations to estimate NSI of pairwise class categories. To address such limitations, we develop SocialCrowd, a crowdsourcing-based NSI-aware photo classification framework that explicitly reduces the NSI of photo misclassification by designing a duo relational NSI-aware graph with the NSI estimated by online crowd workers. The evaluation results on two large-scale image datasets show that SocialCrowd not only reduces the NSI of photo misclassification but also improves the classification accuracy on both datasets.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yang and Kou, Ziyi and Shang, Lanyu and Zeng, Huimin and Yue, Zhenrui and Wang, Dong}, year={2023}, month={Jun.}, pages={14637-14645} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26711/26483", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26711", + "pdf_size": 1201687, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:sSyi3mMq0XAJ:scholar.google.com/&scioq=A+Crowd-AI+Collaborative+Duo+Relational+Graph+Learning+Framework+towards+Social+Impact+Aware+Photo+Classification&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "illinois.edu;nd.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;nd.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "University of Illinois Urbana-Champaign;University of Notre Dame", + "aff_unique_dep": "School of Information Sciences;Department of Computer Science and Engineering", + "aff_unique_url": "https://illinois.edu;https://www.nd.edu", + "aff_unique_abbr": "UIUC;Notre Dame", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26017", + "title": "A Data Source for Reasoning Embodied Agents", + "track": "main", + "status": "Technical", + "abstract": "Recent progress in using machine learning models for reasoning tasks has been driven by novel model architectures, large-scale pre-training protocols, and dedicated reasoning datasets for fine-tuning. In this work, to further pursue these advances, we introduce a new data generator for machine reasoning that integrates with an embodied agent. The generated data consists of templated text queries and answers, matched with world-states encoded into a database. The world-states are a result of both world dynamics and the actions of the agent. We show the results of several baseline models on instantiations of train sets. These include pre-trained language models fine-tuned on a text-formatted representation of the database, and graph-structured Transformers operating on a knowledge-graph representation of the database. We find that these models can answer some questions about the world-state, but struggle with others. These results hint at new research directions in designing neural reasoning models and database representations. Code to generate the data and train the models will be released at github.com/facebookresearch/neuralmemory", + "primary_area": "machine learning ii", + "author": "Jack Lanchantin; Sainbayar Sukhbaatar; Gabriel Synnaeve; Yuxuan Sun; Kavya Srinet; Arthur Szlam", + "authorids": "", + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "bibtex": "@article{Lanchantin_Sukhbaatar_Synnaeve_Sun_Srinet_Szlam_2023, title={A Data Source for Reasoning Embodied Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26017}, DOI={10.1609/aaai.v37i7.26017}, abstractNote={Recent progress in using machine learning models for reasoning tasks has been driven by novel model architectures, large-scale pre-training protocols, and dedicated reasoning datasets for fine-tuning. In this work, to further pursue these advances, we introduce a new data generator for machine reasoning that integrates with an embodied agent. The generated data consists of templated text queries and answers, matched with world-states encoded into a database. The world-states are a result of both world dynamics and the actions of the agent. We show the results of several baseline models on instantiations of train sets. These include pre-trained language models fine-tuned on a text-formatted representation of the database, and graph-structured Transformers operating on a knowledge-graph representation of the database. We find that these models can answer some questions about the world-state, but struggle with others. These results hint at new research directions in designing neural reasoning models and database representations. Code to generate the data and train the models will be released at github.com/facebookresearch/neuralmemory}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lanchantin, Jack and Sukhbaatar, Sainbayar and Synnaeve, Gabriel and Sun, Yuxuan and Srinet, Kavya and Szlam, Arthur}, year={2023}, month={Jun.}, pages={8438-8446} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26017/25789", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26017", + "pdf_size": 1072473, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3832761011527185747&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "email": "meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "github": "github.com/facebookresearch/neuralmemory", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26843", + "title": "A Dataset and Baseline Approach for Identifying Usage States from Non-intrusive Power Sensing with MiDAS IoT-Based Sensors", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The state identification problem seeks to identify power usage patterns of any system, like buildings or factories, of interest. In this challenge paper, we make power usage dataset available from 8 institutions in manufacturing, education and medical institutions from the US and India, and an initial unsupervised machine learning based solution as a baseline for the community to accelerate research in this area.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Bharath Muppasani; Cheyyur Jaya Anand; Chinmayi Appajigowda; Biplav Srivastava; Lokesh Johri", + "authorids": "", + "aff": "AI Institute, University of South Carolina, Columbia, South Carolina, USA; Tantiv4, San Jose, California, USA; Tantiv4, San Jose, California, USA; AI Institute, University of South Carolina, Columbia, South Carolina, USA; Tantiv4, San Jose, California, USA", + "bibtex": "@article{Muppasani_Anand_Appajigowda_Srivastava_Johri_2024, title={A Dataset and Baseline Approach for Identifying Usage States from Non-intrusive Power Sensing with MiDAS IoT-Based Sensors}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26843}, DOI={10.1609/aaai.v37i13.26843}, abstractNote={The state identification problem seeks to identify power usage patterns of any system, like buildings or factories, of interest. In this challenge paper, we make power usage dataset available from 8 institutions in manufacturing, education and medical institutions from the US and India, and an initial unsupervised machine learning based solution as a baseline for the community to accelerate research in this area.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Muppasani, Bharath and Anand, Cheyyur Jaya and Appajigowda, Chinmayi and Srivastava, Biplav and Johri, Lokesh}, year={2024}, month={Jul.}, pages={15545-15550} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26843/26615", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26843", + "pdf_size": 427200, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14547310209807367270&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "email.sc.edu;tantiv4.com;tantiv4.com;sc.edu;tantiv4.com", + "email": "email.sc.edu;tantiv4.com;tantiv4.com;sc.edu;tantiv4.com", + "github": "https://github.com/ai4society/PowerIoT-State-Identification", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;1", + "aff_unique_norm": "University of South Carolina;Tantiv4", + "aff_unique_dep": "AI Institute;", + "aff_unique_url": "https://www.sc.edu;", + "aff_unique_abbr": "USC;", + "aff_campus_unique_index": "0;1;1;0;1", + "aff_campus_unique": "Columbia;San Jose", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27091", + "title": "A Dataset for Learning University STEM Courses at Scale and Generating Questions at a Human Level", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "We present a new dataset for learning to solve, explain, and generate university-level STEM questions from 27 courses across a dozen departments in seven universities. We scale up previous approaches to questions from courses in the departments of Mechanical Engineering, Materials Science and Engineering, Chemistry, Electrical Engineering, Computer Science, Physics, Earth Atmospheric and Planetary Sciences, Economics, Mathematics, Biological Engineering, Data Systems, and Society, and Statistics. We visualize similarities and differences between questions across courses. We demonstrate that a large foundation model is able to generate questions that are as appropriate and at the same difficulty level as human-written questions.", + "primary_area": "", + "author": "Iddo Drori; Sarah Zhang; Zad Chin; Reece Shuttleworth; Albert Lu; Linda Chen; Bereket Birbo; Michele He; Pedro Lantigua; Sunny Tran; Gregory Hunter; Bo Feng; Newman Cheng; Roman Wang; Yann Hicke; Saisamrit Surbehera; Arvind Raghavan; Alexander Siemenn; Nikhil Singh; Jayson Lynch; Avi Shporer; Nakul Verma; Tonio Buonassisi; Armando Solar-Lezama", + "authorids": "", + "aff": "Massachusetts Institute of Technology+Boston University+Columbia University; Massachusetts Institute of Technology; Harvard University; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Columbia University; Columbia University; Columbia University; Columbia University; Cornell University; Columbia University; Columbia University; Massachusetts Institute of Technology; Massachusetts Institute of Technology; University of Waterloo; Massachusetts Institute of Technology; Columbia University; Massachusetts Institute of Technology; Massachusetts Institute of Technology", + "bibtex": "@article{Drori_Zhang_Chin_Shuttleworth_Lu_Chen_Birbo_He_Lantigua_Tran_Hunter_Feng_Cheng_Wang_Hicke_Surbehera_Raghavan_Siemenn_Singh_Lynch_Shporer_Verma_Buonassisi_Solar-Lezama_2024, title={A Dataset for Learning University STEM Courses at Scale and Generating Questions at a Human Level}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27091}, DOI={10.1609/aaai.v37i13.27091}, abstractNote={We present a new dataset for learning to solve, explain, and generate university-level STEM questions from 27 courses across a dozen departments in seven universities. We scale up previous approaches to questions from courses in the departments of Mechanical Engineering, Materials Science and Engineering, Chemistry, Electrical Engineering, Computer Science, Physics, Earth Atmospheric and Planetary Sciences, Economics, Mathematics, Biological Engineering, Data Systems, and Society, and Statistics. We visualize similarities and differences between questions across courses. We demonstrate that a large foundation model is able to generate questions that are as appropriate and at the same difficulty level as human-written questions.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Drori, Iddo and Zhang, Sarah and Chin, Zad and Shuttleworth, Reece and Lu, Albert and Chen, Linda and Birbo, Bereket and He, Michele and Lantigua, Pedro and Tran, Sunny and Hunter, Gregory and Feng, Bo and Cheng, Newman and Wang, Roman and Hicke, Yann and Surbehera, Saisamrit and Raghavan, Arvind and Siemenn, Alexander and Singh, Nikhil and Lynch, Jayson and Shporer, Avi and Verma, Nakul and Buonassisi, Tonio and Solar-Lezama, Armando}, year={2024}, month={Jul.}, pages={15921-15929} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27091/26864", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27091", + "pdf_size": 718527, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4224947612406198935&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "mit.edu;mit.edu;college.harvard.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu;cornell.edu;columbia.edu;columbia.edu;mit.edu;mit.edu;waterloo.ca;mit.edu;cs.columbia.edu;mit.edu;csail.mit.edu", + "email": "mit.edu;mit.edu;college.harvard.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu;cornell.edu;columbia.edu;columbia.edu;mit.edu;mit.edu;waterloo.ca;mit.edu;cs.columbia.edu;mit.edu;csail.mit.edu", + "github": "", + "project": "", + "author_num": 24, + "aff_unique_index": "0+1+2;0;3;0;0;0;0;0;0;0;2;2;2;2;4;2;2;0;0;5;0;2;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Boston University;Columbia University;Harvard University;Cornell University;University of Waterloo", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://web.mit.edu;https://www.bu.edu;https://www.columbia.edu;https://www.harvard.edu;https://www.cornell.edu;https://uwaterloo.ca", + "aff_unique_abbr": "MIT;BU;Columbia;Harvard;Cornell;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-26556", + "title": "A Disentangled-Attention Based Framework with Persona-Aware Prompt Learning for Dialogue Generation", + "track": "main", + "status": "Technical", + "abstract": "Endowing dialogue agents with personas is the key to delivering more human-like conversations. However, existing persona-grounded dialogue systems still lack informative details of human conversations and tend to reply with inconsistent and generic responses. One of the main underlying causes is that pre-defined persona sentences are generally short and merely superficial descriptions of personal attributes, making appropriate persona selection and understanding non-trivial. Another challenge is that it is crucial to consider the context and the conversation flow to dynamically determine when to invoke different types of persona signals. To address these problems, we propose a disentangled-attention based pre-training architecture, which incorporates persona-aware prompt learning to bridge the connection between the selected persona and response generation. Our model first exploits the conversation flow to select context-relevant personas, and subsequently enriches the superficial persona descriptions with extra personality traits through persona-aware prompting. Finally, the decoder leverages a disentangled-attention mechanism to flexibly control the reliance on personas and dialogue contexts, and incorporates A*-like keyword-based heuristic estimates for controllable generation. Extensive experiments show that our approach can outperform strong baselines and deliver more consistent and engaging responses on the PERSONA-CHAT dataset.", + "primary_area": "speech natural language processing", + "author": "Pingsheng Liu; Zhengjie Huang; Xiechi Zhang; Linlin Wang; Gerard de Melo; Xin Lin; Liang Pang; Liang He", + "authorids": "", + "aff": "East China Normal University; East China Normal University; East China Normal University; East China Normal University+The Thirty-Seventh AAAI Conference on Artificial Intelligence (AAAI-23); Hasso Plattner Institute, University of Potsdam; East China Normal University; Institute of Computing Technology, CAS; East China Normal University", + "bibtex": "@article{Liu_Huang_Zhang_Wang_de Melo_Lin_Pang_He_2023, title={A Disentangled-Attention Based Framework with Persona-Aware Prompt Learning for Dialogue Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26556}, DOI={10.1609/aaai.v37i11.26556}, abstractNote={Endowing dialogue agents with personas is the key to delivering more human-like conversations. However, existing persona-grounded dialogue systems still lack informative details of human conversations and tend to reply with inconsistent and generic responses. One of the main underlying causes is that pre-defined persona sentences are generally short and merely superficial descriptions of personal attributes, making appropriate persona selection and understanding non-trivial. Another challenge is that it is crucial to consider the context and the conversation flow to dynamically determine when to invoke different types of persona signals. To address these problems, we propose a disentangled-attention based pre-training architecture, which incorporates persona-aware prompt learning to bridge the connection between the selected persona and response generation. Our model first exploits the conversation flow to select context-relevant personas, and subsequently enriches the superficial persona descriptions with extra personality traits through persona-aware prompting. Finally, the decoder leverages a disentangled-attention mechanism to flexibly control the reliance on personas and dialogue contexts, and incorporates A*-like keyword-based heuristic estimates for controllable generation. Extensive experiments show that our approach can outperform strong baselines and deliver more consistent and engaging responses on the PERSONA-CHAT dataset.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Pingsheng and Huang, Zhengjie and Zhang, Xiechi and Wang, Linlin and de Melo, Gerard and Lin, Xin and Pang, Liang and He, Liang}, year={2023}, month={Jun.}, pages={13255-13263} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26556/26328", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26556", + "pdf_size": 881437, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1266838369227879297&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn;demelo.org;cs.ecnu.edu.cn;ict.ac.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn;demelo.org;cs.ecnu.edu.cn;ict.ac.cn;cs.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0+1;2;0;3;0", + "aff_unique_norm": "East China Normal University;AAAI Conference on Artificial Intelligence;Hasso Plattner Institute;Chinese Academy of Sciences", + "aff_unique_dep": ";;;Institute of Computing Technology", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.aaai.org;https://www.hpi.de;http://www.ict.cas.cn", + "aff_unique_abbr": "ECNU;AAAI;HPI;CAS", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Potsdam", + "aff_country_unique_index": "0;0;0;0+1;2;0;0;0", + "aff_country_unique": "China;United States;Germany" + }, + { + "id": "article-25635", + "title": "A Domain-Knowledge-Inspired Music Embedding Space and a Novel Attention Mechanism for Symbolic Music Modeling", + "track": "main", + "status": "Technical", + "abstract": "Following the success of the transformer architecture in the natural language domain, transformer-like architectures have been widely applied to the domain of symbolic music recently. Symbolic music and text, however, are two different modalities. Symbolic music contains multiple attributes, both absolute attributes (e.g., pitch) and relative attributes (e.g., pitch interval). These relative attributes shape human perception of musical motifs. These important relative attributes, however, are mostly ignored in existing symbolic music modelling methods with the main reason being the lack of a musically-meaningful embedding space where both the absolute and relative embeddings of the symbolic music tokens can be efficiently represented. In this paper, we propose the Fundamental Music Embedding (FME) for symbolic music based on a bias-adjusted sinusoidal encoding within which both the absolute and the relative attributes can be embedded and the fundamental musical properties (e.g., translational invariance) are explicitly preserved. Taking advantage of the proposed FME, we further propose a novel attention mechanism based on the relative index, pitch and onset embeddings (RIPO attention) such that the musical domain knowledge can be fully utilized for symbolic music modelling. Experiment results show that our proposed model: RIPO transformer which utilizes FME and RIPO attention outperforms the state-of-the-art transformers (i.e., music transformer, linear transformer) in a melody completion task. Moreover, using the RIPO transformer in a downstream music generation task, we notice that the notorious degeneration phenomenon no longer exists and the music generated by the RIPO transformer outperforms the music generated by state-of-the-art transformer models in both subjective and objective evaluations. The code of the proposed method is available online: github.com/guozixunnicolas/FundamentalMusicEmbedding.", + "primary_area": "domain s of application", + "author": "Zixun Guo; Jaeyong Kang; Dorien Herremans", + "authorids": "", + "aff": "Information Systems Technology and Design, Singapore University of Technology and Design, Singapore; Information Systems Technology and Design, Singapore University of Technology and Design, Singapore; Information Systems Technology and Design, Singapore University of Technology and Design, Singapore", + "bibtex": "@article{Guo_Kang_Herremans_2023, title={A Domain-Knowledge-Inspired Music Embedding Space and a Novel Attention Mechanism for Symbolic Music Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25635}, DOI={10.1609/aaai.v37i4.25635}, abstractNote={Following the success of the transformer architecture in the natural language domain, transformer-like architectures have been widely applied to the domain of symbolic music recently. Symbolic music and text, however, are two different modalities. Symbolic music contains multiple attributes, both absolute attributes (e.g., pitch) and relative attributes (e.g., pitch interval). These relative attributes shape human perception of musical motifs. These important relative attributes, however, are mostly ignored in existing symbolic music modelling methods with the main reason being the lack of a musically-meaningful embedding space where both the absolute and relative embeddings of the symbolic music tokens can be efficiently represented. In this paper, we propose the Fundamental Music Embedding (FME) for symbolic music based on a bias-adjusted sinusoidal encoding within which both the absolute and the relative attributes can be embedded and the fundamental musical properties (e.g., translational invariance) are explicitly preserved. Taking advantage of the proposed FME, we further propose a novel attention mechanism based on the relative index, pitch and onset embeddings (RIPO attention) such that the musical domain knowledge can be fully utilized for symbolic music modelling. Experiment results show that our proposed model: RIPO transformer which utilizes FME and RIPO attention outperforms the state-of-the-art transformers (i.e., music transformer, linear transformer) in a melody completion task. Moreover, using the RIPO transformer in a downstream music generation task, we notice that the notorious degeneration phenomenon no longer exists and the music generated by the RIPO transformer outperforms the music generated by state-of-the-art transformer models in both subjective and objective evaluations. The code of the proposed method is available online: github.com/guozixunnicolas/FundamentalMusicEmbedding.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Zixun and Kang, Jaeyong and Herremans, Dorien}, year={2023}, month={Jun.}, pages={5070-5077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25635/25407", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25635", + "pdf_size": 1311474, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5134801926232068313&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 8, + "aff_domain": "sutd.edu.sg;sutd.edu.sg;sutd.edu.sg", + "email": "sutd.edu.sg;sutd.edu.sg;sutd.edu.sg", + "github": "github.com/guozixunnicolas/FundamentalMusicEmbedding", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Singapore University of Technology and Design", + "aff_unique_dep": "Information Systems Technology and Design", + "aff_unique_url": "https://www.sutd.edu.sg", + "aff_unique_abbr": "SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26626", + "title": "A Domain-Transfer Meta Task Design Paradigm for Few-Shot Slot Tagging", + "track": "main", + "status": "Technical", + "abstract": "Few-shot slot tagging is an important task in dialogue systems and attracts much attention of researchers. Most previous few-shot slot tagging methods utilize meta-learning procedure for training and strive to construct a large number of different meta tasks to simulate the testing situation of insufficient data. However, there is a widespread phenomenon of overlap slot between two domains in slot tagging. Traditional meta tasks ignore this special phenomenon and cannot simulate such realistic few-shot slot tagging scenarios. It violates the basic principle of meta-learning which the meta task is consistent with the real testing task, leading to historical information forgetting problem. In this paper, we introduce a novel domain-transfer meta task design paradigm to tackle this problem. We distribute a basic domain to each target domain based on the coincidence degree of slot labels between these two domains. Unlike classic meta tasks which only rely on small samples of target domain, our meta tasks aim to correctly infer the class of target domain query samples based on both abundant data in basic domain and scarce data in target domain. To accomplish our meta task, we propose a Task Adaptation Network to effectively transfer the historical information from the basic domain to the target domain. We carry out sufficient experiments on the benchmark slot tagging dataset SNIPS and the name entity recognition dataset NER. Results demonstrate that our proposed model outperforms previous methods and achieves the state-of-the-art performance.", + "primary_area": "speech natural language processing", + "author": "Fengyi Yang; Xi Zhou; Yating Yang; Bo Ma; Rui Dong; Abibulla Atawulla", + "authorids": "", + "aff": "Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China; Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China; Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China; Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China; Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China; Xinjiang Technical Institute of Physics & Chemistry, Chinese Academy of Sciences, Urumqi 830011, China + University of Chinese Academy of Sciences, Beijing 100049, China + Xinjiang Laboratory of Minority Speech and Language Information Processing, Urumqi 830011, China", + "bibtex": "@article{Yang_Zhou_Yang_Ma_Dong_Atawulla_2023, title={A Domain-Transfer Meta Task Design Paradigm for Few-Shot Slot Tagging}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26626}, DOI={10.1609/aaai.v37i11.26626}, abstractNote={Few-shot slot tagging is an important task in dialogue systems and attracts much attention of researchers. Most previous few-shot slot tagging methods utilize meta-learning procedure for training and strive to construct a large number of different meta tasks to simulate the testing situation of insufficient data. However, there is a widespread phenomenon of overlap slot between two domains in slot tagging. Traditional meta tasks ignore this special phenomenon and cannot simulate such realistic few-shot slot tagging scenarios. It violates the basic principle of meta-learning which the meta task is consistent with the real testing task, leading to historical information forgetting problem. In this paper, we introduce a novel domain-transfer meta task design paradigm to tackle this problem. We distribute a basic domain to each target domain based on the coincidence degree of slot labels between these two domains. Unlike classic meta tasks which only rely on small samples of target domain, our meta tasks aim to correctly infer the class of target domain query samples based on both abundant data in basic domain and scarce data in target domain. To accomplish our meta task, we propose a Task Adaptation Network to effectively transfer the historical information from the basic domain to the target domain. We carry out sufficient experiments on the benchmark slot tagging dataset SNIPS and the name entity recognition dataset NER. Results demonstrate that our proposed model outperforms previous methods and achieves the state-of-the-art performance.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Fengyi and Zhou, Xi and Yang, Yating and Ma, Bo and Dong, Rui and Atawulla, Abibulla}, year={2023}, month={Jun.}, pages={13887-13895} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26626/26398", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26626", + "pdf_size": 232230, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:aT8crj0nD4AJ:scholar.google.com/&scioq=A+Domain-Transfer+Meta+Task+Design+Paradigm+for+Few-Shot+Slot+Tagging&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "mails.ucas.edu.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;mails.ucas.ac.cn", + "email": "mails.ucas.edu.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;ms.xjb.ac.cn;mails.ucas.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2;0+1+2;0+1+2", + "aff_unique_norm": "Xinjiang Technical Institute of Physics & Chemistry;University of Chinese Academy of Sciences;Xinjiang Laboratory of Minority Speech and Language Information Processing", + "aff_unique_dep": "Chinese Academy of Sciences;;Laboratory of Minority Speech and Language Information Processing", + "aff_unique_url": ";http://www.ucas.ac.cn;", + "aff_unique_abbr": ";UCAS;", + "aff_campus_unique_index": "0+1+0;0+1+0;0+1+0;0+1+0;0+1+0;0+1+0", + "aff_campus_unique": "Urumqi;Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26421", + "title": "A Dynamics and Task Decoupled Reinforcement Learning Architecture for High-Efficiency Dynamic Target Intercept", + "track": "main", + "status": "Technical", + "abstract": "Due to the flexibility and ease of control, unmanned aerial vehicles (UAVs) have been increasingly used in various scenarios and applications in recent years. Training UAVs with reinforcement learning (RL) for a specific task is often expensive in terms of time and computation. However, it is known that the main effort of the learning process is made to fit the low-level physical dynamics systems instead of the high-level task itself. In this paper, we study to apply UAVs in the dynamic target intercept (DTI) task, where the dynamics systems equipped by different UAV models are correspondingly distinct. To this end, we propose a dynamics and task decoupled RL architecture to address the inefficient learning procedure, where the RL module focuses on modeling the DTI task without involving physical dynamics, and the design of states, actions, and rewards are completely task-oriented while the dynamics control module can adaptively convert actions from the RL module to dynamics signals to control different UAVs without retraining the RL module. We show the efficiency and efficacy of our results in comparison and ablation experiments against state-of-the-art methods.", + "primary_area": "planning routing and scheduling", + "author": "Dora D. Liu; Liang Hu; Qi Zhang; Tangwei Ye; Usman Naseem; Zhong Yuan Lai", + "authorids": "", + "aff": "DeepBlue Academy of Sciences\u00b9,\u00b3; Tongji University\u00b2,\u00b9; University of Technology Sydney\u2074,\u00b9; DeepBlue Academy of Sciences\u00b9; University of Sydney\u2075; DeepBlue Academy of Sciences\u00b9", + "bibtex": "@article{Liu_Hu_Zhang_Ye_Naseem_Lai_2023, title={A Dynamics and Task Decoupled Reinforcement Learning Architecture for High-Efficiency Dynamic Target Intercept}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26421}, DOI={10.1609/aaai.v37i10.26421}, abstractNote={Due to the flexibility and ease of control, unmanned aerial vehicles (UAVs) have been increasingly used in various scenarios and applications in recent years. Training UAVs with reinforcement learning (RL) for a specific task is often expensive in terms of time and computation. However, it is known that the main effort of the learning process is made to fit the low-level physical dynamics systems instead of the high-level task itself. In this paper, we study to apply UAVs in the dynamic target intercept (DTI) task, where the dynamics systems equipped by different UAV models are correspondingly distinct. To this end, we propose a dynamics and task decoupled RL architecture to address the inefficient learning procedure, where the RL module focuses on modeling the DTI task without involving physical dynamics, and the design of states, actions, and rewards are completely task-oriented while the dynamics control module can adaptively convert actions from the RL module to dynamics signals to control different UAVs without retraining the RL module. We show the efficiency and efficacy of our results in comparison and ablation experiments against state-of-the-art methods.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Dora D. and Hu, Liang and Zhang, Qi and Ye, Tangwei and Naseem, Usman and Lai, Zhong Yuan}, year={2023}, month={Jun.}, pages={12049-12057} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26421/26193", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26421", + "pdf_size": 373010, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15427375323106243016&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "163.com;tongji.edu.cn;student.uts.edu.au;163.com;sydney.edu.au;yahoo.com", + "email": "163.com;tongji.edu.cn;student.uts.edu.au;163.com;sydney.edu.au;yahoo.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;3;0", + "aff_unique_norm": "DeepBlue Academy of Sciences;Tongji University;University of Technology Sydney;University of Sydney", + "aff_unique_dep": ";;;", + "aff_unique_url": ";https://www.tongji.edu.cn;https://www.uts.edu.au;https://www.sydney.edu.au", + "aff_unique_abbr": ";Tongji;UTS;USYD", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Sydney", + "aff_country_unique_index": "0;0;1;0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26196", + "title": "A Fair Generative Model Using LeCam Divergence", + "track": "main", + "status": "Technical", + "abstract": "We explore a fairness-related challenge that arises in generative models. The challenge is that biased training data with imbalanced demographics may yield a high asymmetry in size of generated samples across distinct groups. We focus on practically-relevant scenarios wherein demographic labels are not available and therefore the design of a fair generative model is non-straightforward. In this paper, we propose an optimization framework that regulates the unfairness under such practical settings via one statistical measure, LeCam (LC)-divergence. Specifically to quantify the degree of unfairness, we employ a balanced-yet-small reference dataset and then measure its distance with generated samples using the LC-divergence, which is shown to be particularly instrumental to a small size of the reference dataset. We take a variational optimization approach to implement the LC-based measure. Experiments on benchmark real datasets demonstrate that the proposed framework can significantly improve the fairness performance while maintaining realistic sample quality for a wide range of the reference set size all the way down to 1% relative to training set.", + "primary_area": "machine learning iii", + "author": "Soobin Um; Changho Suh", + "authorids": "", + "aff": "Graduate School of AI, KAIST; School of Electrical Engineering, KAIST", + "bibtex": "@article{Um_Suh_2023, title={A Fair Generative Model Using LeCam Divergence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26196}, DOI={10.1609/aaai.v37i8.26196}, abstractNote={We explore a fairness-related challenge that arises in generative models. The challenge is that biased training data with imbalanced demographics may yield a high asymmetry in size of generated samples across distinct groups. We focus on practically-relevant scenarios wherein demographic labels are not available and therefore the design of a fair generative model is non-straightforward. In this paper, we propose an optimization framework that regulates the unfairness under such practical settings via one statistical measure, LeCam (LC)-divergence. Specifically to quantify the degree of unfairness, we employ a balanced-yet-small reference dataset and then measure its distance with generated samples using the LC-divergence, which is shown to be particularly instrumental to a small size of the reference dataset. We take a variational optimization approach to implement the LC-based measure. Experiments on benchmark real datasets demonstrate that the proposed framework can significantly improve the fairness performance while maintaining realistic sample quality for a wide range of the reference set size all the way down to 1% relative to training set.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Um, Soobin and Suh, Changho}, year={2023}, month={Jun.}, pages={10034-10042} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26196/25968", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26196", + "pdf_size": 430473, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11028431160167075986&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "Graduate School of AI", + "aff_unique_url": "https://www.kaist.edu", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26653", + "title": "A Fair Incentive Scheme for Community Health Workers", + "track": "aaai special track", + "status": "Technical", + "abstract": "Community health workers (CHWs) play a crucial role in\nthe last mile delivery of essential health services to underserved\npopulations in low-income countries. Many nongovernmental\norganizations (NGOs) provide training and\nsupport to enable CHWs to deliver health services to their\ncommunities, with no charge to the recipients of the services.\nThis includes monetary compensation for the work that\nCHWs perform, which is broken down into a series of well defined\ntasks. In this work, we partner with a NGO D-Tree\nInternational to design a fair monetary compensation scheme\nfor tasks performed by CHWs in the semi-autonomous region\nof Zanzibar in Tanzania, Africa. In consultation with\nstakeholders, we interpret fairness as the equal opportunity\nto earn, which means that each CHW has the opportunity to\nearn roughly the same total payment over a given T month\nperiod, if the CHW reacts to the incentive scheme almost rationally.\nWe model this problem as a reward design problem\nfor a Markov Decision Process (MDP) formulation for the\nCHWs\u2019 earning. There is a need for the mechanism to be\nsimple so that it is understood by the CHWs, thus, we explore\nlinear and piecewise linear rewards in the CHWs\u2019 measured\nunits of work. We solve this design problem via a novel\npolicy-reward gradient result. Our experiments using two real\nworld parameters from the ground provide evidence of reasonable\nincentive output by our scheme.", + "primary_area": "ai for social impact", + "author": "Avinandan Bose; Tracey Li; Arunesh Sinha; Tien Mai", + "authorids": "", + "aff": "University of Washington; D-Tree International; Rutgers University; Singapore Management University", + "bibtex": "@article{Bose_Li_Sinha_Mai_2023, title={A Fair Incentive Scheme for Community Health Workers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26653}, DOI={10.1609/aaai.v37i12.26653}, abstractNote={Community health workers (CHWs) play a crucial role in\nthe last mile delivery of essential health services to underserved\npopulations in low-income countries. Many nongovernmental\norganizations (NGOs) provide training and\nsupport to enable CHWs to deliver health services to their\ncommunities, with no charge to the recipients of the services.\nThis includes monetary compensation for the work that\nCHWs perform, which is broken down into a series of well defined\ntasks. In this work, we partner with a NGO D-Tree\nInternational to design a fair monetary compensation scheme\nfor tasks performed by CHWs in the semi-autonomous region\nof Zanzibar in Tanzania, Africa. In consultation with\nstakeholders, we interpret fairness as the equal opportunity\nto earn, which means that each CHW has the opportunity to\nearn roughly the same total payment over a given T month\nperiod, if the CHW reacts to the incentive scheme almost rationally.\nWe model this problem as a reward design problem\nfor a Markov Decision Process (MDP) formulation for the\nCHWs\u2019 earning. There is a need for the mechanism to be\nsimple so that it is understood by the CHWs, thus, we explore\nlinear and piecewise linear rewards in the CHWs\u2019 measured\nunits of work. We solve this design problem via a novel\npolicy-reward gradient result. Our experiments using two real\nworld parameters from the ground provide evidence of reasonable\nincentive output by our scheme.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bose, Avinandan and Li, Tracey and Sinha, Arunesh and Mai, Tien}, year={2023}, month={Jun.}, pages={14127-14135} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26653/26425", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26653", + "pdf_size": 493232, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12599789836981416308&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "cs.washington.edu;d-tree.org;rutgers.edu;smu.edu.sg", + "email": "cs.washington.edu;d-tree.org;rutgers.edu;smu.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Washington;D-Tree International;Rutgers University;Singapore Management University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.washington.edu;https://www.dtreeinternational.org;https://www.rutgers.edu;https://www.smu.edu.sg", + "aff_unique_abbr": "UW;D-Tree;Rutgers;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "article-26440", + "title": "A Faster Practical Approximation Scheme for the Permanent", + "track": "main", + "status": "Technical", + "abstract": "The permanent of a matrix has numerous applications but is notoriously hard to compute. While nonnegative matrices admit polynomial approximation schemes based on rapidly mixing Markov chains, the known practical estimators of the permanent rely on importance or rejection sampling. We advance the rejection sampling approach, which provides probabilistic accuracy guarantees, unlike importance sampling. Specifically, we give a novel class of nesting upper bounds and a simple preprocessing method that, in comparison to previous works, enable faster sampling with better acceptance rate; we demonstrate order-of-magnitude improvements with both theoretical and empirical analyses. In addition, we display instances on which our approximation scheme is competitive against state-of-the-art importance sampling based estimators.", + "primary_area": "reasoning under uncertainty", + "author": "Juha Harviainen; Mikko Koivisto", + "authorids": "", + "aff": "University of Helsinki; University of Helsinki", + "bibtex": "@article{Harviainen_Koivisto_2023, title={A Faster Practical Approximation Scheme for the Permanent}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26440}, DOI={10.1609/aaai.v37i10.26440}, abstractNote={The permanent of a matrix has numerous applications but is notoriously hard to compute. While nonnegative matrices admit polynomial approximation schemes based on rapidly mixing Markov chains, the known practical estimators of the permanent rely on importance or rejection sampling. We advance the rejection sampling approach, which provides probabilistic accuracy guarantees, unlike importance sampling. Specifically, we give a novel class of nesting upper bounds and a simple preprocessing method that, in comparison to previous works, enable faster sampling with better acceptance rate; we demonstrate order-of-magnitude improvements with both theoretical and empirical analyses. In addition, we display instances on which our approximation scheme is competitive against state-of-the-art importance sampling based estimators.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Harviainen, Juha and Koivisto, Mikko}, year={2023}, month={Jun.}, pages={12216-12224} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26440/26212", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26440", + "pdf_size": 527959, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4244024096792551422&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "helsinki.fi;helsinki.fi", + "email": "helsinki.fi;helsinki.fi", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Helsinki", + "aff_unique_dep": "", + "aff_unique_url": "https://www.helsinki.fi", + "aff_unique_abbr": "UH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Finland" + }, + { + "id": "article-26984", + "title": "A Federated Learning Monitoring Tool for Self-Driving Car Simulation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We propose CARLA-FLMon, which can monitor the progress of running federated learning (FL) training in the open-source autonomous driving simulation software, CARLA. The purpose of CARLA-FLMon is to visually present the status and results of federated learning training, and to provide an extensible FL training environment with which FL training can be performed repeatedly with updated learning strategies through analysis. With CARLA-FLMon, we can determine what factors have positive or negative influences on learning by visualizing training data. Then, we can optimize the parameters of the FL training model to improve the accuracy of FL. With preliminary experiments of CARLA-FLMon on lane recognition, we demonstrate that CARLA-FLmon can increase the overall accuracy from 80.33% to 93.82% by identifying lowly-contributing clients and excluding them.", + "primary_area": "", + "author": "Taejoon Lee; Hyunsu Mun; Youngseok Lee", + "authorids": "", + "aff": "Dept. of Computer Science and Engineering, Chungnam National University; Dept. of Computer Science and Engineering, Chungnam National University; Dept. of Computer Science and Engineering, Chungnam National University", + "bibtex": "@article{Lee_Mun_Lee_2024, title={A Federated Learning Monitoring Tool for Self-Driving Car Simulation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26984}, DOI={10.1609/aaai.v37i13.26984}, abstractNote={We propose CARLA-FLMon, which can monitor the progress of running federated learning (FL) training in the open-source autonomous driving simulation software, CARLA. The purpose of CARLA-FLMon is to visually present the status and results of federated learning training, and to provide an extensible FL training environment with which FL training can be performed repeatedly with updated learning strategies through analysis. With CARLA-FLMon, we can determine what factors have positive or negative influences on learning by visualizing training data. Then, we can optimize the parameters of the FL training model to improve the accuracy of FL. With preliminary experiments of CARLA-FLMon on lane recognition, we demonstrate that CARLA-FLmon can increase the overall accuracy from 80.33% to 93.82% by identifying lowly-contributing clients and excluding them.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Taejoon and Mun, Hyunsu and Lee, Youngseok}, year={2024}, month={Jul.}, pages={16248-16249} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26984/26756", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26984", + "pdf_size": 2034343, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2587062571972998559&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "o.cnu.ac.kr;cnu.ac.kr;cnu.ac.kr", + "email": "o.cnu.ac.kr;cnu.ac.kr;cnu.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Chungnam National University", + "aff_unique_dep": "Dept. of Computer Science and Engineering", + "aff_unique_url": "http://www.cnu.ac.kr", + "aff_unique_abbr": "CNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26464", + "title": "A Formal Metareasoning Model of Concurrent Planning and Execution", + "track": "main", + "status": "Technical", + "abstract": "Agents that plan and act in the real world must deal with the fact that time passes as they are planning. When timing is tight, there may be insufficient time to complete the search for a plan before it is time to act. By commencing execution before search concludes, one gains time to search by making planning and execution concurrent. However, this incurs the risk of making incorrect action choices, especially if actions are irreversible. This tradeoff between opportunity and risk is the problem addressed in this paper. Our main contribution is to formally define this setting as an abstract metareasoning problem. We find that the abstract problem is intractable. However, we identify special cases that are solvable in polynomial time, develop greedy solution algorithms, and, through tests on instances derived from search problems, find several methods that achieve promising practical performance. This work lays the foundation for a principled time-aware executive that concurrently plans and executes.", + "primary_area": "search and optimization", + "author": "Amihay Elboher; Ava Bensoussan; Erez Karpas; Wheeler Ruml; Shahaf S. Shperberg; Eyal Shimony", + "authorids": "", + "aff": "Ben-Gurion University, Israel; Ben-Gurion University, Israel; Technion, Israel; University of New Hampshire, USA; Ben-Gurion University, Israel; Ben-Gurion University, Israel", + "bibtex": "@article{Elboher_Bensoussan_Karpas_Ruml_Shperberg_Shimony_2023, title={A Formal Metareasoning Model of Concurrent Planning and Execution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26464}, DOI={10.1609/aaai.v37i10.26464}, abstractNote={Agents that plan and act in the real world must deal with the fact that time passes as they are planning. When timing is tight, there may be insufficient time to complete the search for a plan before it is time to act. By commencing execution before search concludes, one gains time to search by making planning and execution concurrent. However, this incurs the risk of making incorrect action choices, especially if actions are irreversible. This tradeoff between opportunity and risk is the problem addressed in this paper. Our main contribution is to formally define this setting as an abstract metareasoning problem. We find that the abstract problem is intractable. However, we identify special cases that are solvable in polynomial time, develop greedy solution algorithms, and, through tests on instances derived from search problems, find several methods that achieve promising practical performance. This work lays the foundation for a principled time-aware executive that concurrently plans and executes.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Elboher, Amihay and Bensoussan, Ava and Karpas, Erez and Ruml, Wheeler and Shperberg, Shahaf S. and Shimony, Eyal}, year={2023}, month={Jun.}, pages={12427-12435} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26464/26236", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26464", + "pdf_size": 236002, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17479532874756609499&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "post.bgu.ac.il;post.bgu.ac.il;technion.ac.il;cs.unh.edu;post.bgu.ac.il;cs.bgu.ac.il", + "email": "post.bgu.ac.il;post.bgu.ac.il;technion.ac.il;cs.unh.edu;post.bgu.ac.il;cs.bgu.ac.il", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "Ben-Gurion University;Technion - Israel Institute of Technology;University of New Hampshire", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.bgu.ac.il;https://www.technion.ac.il/en/;https://www.unh.edu", + "aff_unique_abbr": "BGU;Technion;UNH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "Israel;United States" + }, + { + "id": "article-25511", + "title": "A Framework to Design Approximation Algorithms for Finding Diverse Solutions in Combinatorial Problems", + "track": "main", + "status": "Technical", + "abstract": "Finding a \\emph{single} best solution is the most common objective in combinatorial optimization problems. However, such a single solution may not be applicable to real-world problems as objective functions and constraints are only ``approximately'' formulated for original real-world problems. To solve this issue, finding \\emph{multiple} solutions is a natural direction, and diversity of solutions is an important concept in this context. Unfortunately, finding diverse solutions is much harder than finding a single solution. To cope with the difficulty, we investigate the approximability of finding diverse solutions. As a main result, we propose a framework to design approximation algorithms for finding diverse solutions, which yields several outcomes including constant-factor approximation algorithms for finding diverse matchings in graphs and diverse common bases in two matroids and PTASes for finding diverse minimum cuts and interval schedulings.", + "primary_area": "constraint satisfaction and optimization", + "author": "Tesshu Hanaka; Masashi Kiyomi; Yasuaki Kobayashi; Yusuke Kobayashi; Kazuhiro Kurita; Yota Otachi", + "authorids": "", + "aff": "Kyushu University, Fukuoka, Japan; Seikei University, Musashino-shi, Tokyo, Japan; Hokkaido University, Sapporo, Japan; Kyoto University, Kyoto, Japan; Natoya University, Nagoya, Japan; Natoya University, Nagoya, Japan", + "bibtex": "@article{Hanaka_Kiyomi_Kobayashi_Kobayashi_Kurita_Otachi_2023, title={A Framework to Design Approximation Algorithms for Finding Diverse Solutions in Combinatorial Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25511}, DOI={10.1609/aaai.v37i4.25511}, abstractNote={Finding a \\emph{single} best solution is the most common objective in combinatorial optimization problems. However, such a single solution may not be applicable to real-world problems as objective functions and constraints are only ``approximately\u2019\u2019 formulated for original real-world problems. To solve this issue, finding \\emph{multiple} solutions is a natural direction, and diversity of solutions is an important concept in this context. Unfortunately, finding diverse solutions is much harder than finding a single solution. To cope with the difficulty, we investigate the approximability of finding diverse solutions. As a main result, we propose a framework to design approximation algorithms for finding diverse solutions, which yields several outcomes including constant-factor approximation algorithms for finding diverse matchings in graphs and diverse common bases in two matroids and PTASes for finding diverse minimum cuts and interval schedulings.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hanaka, Tesshu and Kiyomi, Masashi and Kobayashi, Yasuaki and Kobayashi, Yusuke and Kurita, Kazuhiro and Otachi, Yota}, year={2023}, month={Jun.}, pages={3968-3976} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25511/25283", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25511", + "pdf_size": 180840, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15764893915815701969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "inf.kyushu-u.ac.jp;st.seikei.ac.jp;ist.hokudai.ac.jp;kurims.kyoto-u.ac.jp;i.nagoya-u.ac.jp;nagoya-u.jp", + "email": "inf.kyushu-u.ac.jp;st.seikei.ac.jp;ist.hokudai.ac.jp;kurims.kyoto-u.ac.jp;i.nagoya-u.ac.jp;nagoya-u.jp", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;4", + "aff_unique_norm": "Kyushu University;Seikei University;Hokkaido University;Kyoto University;Natoya University", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.kyushu-u.ac.jp;https://www.seikei-u.ac.jp;https://www.hokudai.ac.jp;https://www.kyoto-u.ac.jp;", + "aff_unique_abbr": "Kyushu U;Seikei U;Hokkaido U;Kyoto U;", + "aff_campus_unique_index": "0;1;2;3;4;4", + "aff_campus_unique": "Fukuoka;Tokyo;Sapporo;Kyoto;Nagoya", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26474", + "title": "A Generalized Scalarization Method for Evolutionary Multi-Objective Optimization", + "track": "main", + "status": "Technical", + "abstract": "The decomposition-based multi-objective evolutionary algorithm (MOEA/D) transforms a multi-objective optimization problem (MOP) into a set of single-objective subproblems for collaborative optimization. Mismatches between subproblems and solutions can lead to severe performance degradation of MOEA/D. Most existing mismatch coping strategies only work when the L\u221e scalarization is used. A mismatch coping strategy that can use any Lp scalarization, even when facing MOPs with non-convex Pareto fronts, is of great significance for MOEA/D. This paper uses the global replacement (GR) as the backbone. We analyze how GR can no longer avoid mismatches when L\u221e is replaced by another Lp with p \u2208 [1, \u221e), and find that the Lp-based (1 \u2264 p < \u221e) subproblems having inconsistently large preference regions. When p is set to a small value, some middle subproblems have very small preference regions so that their direction vectors cannot pass through their corresponding preference regions. Therefore, we propose a generalized Lp (GLp) scalarization to ensure that the subproblem\u2019s direction vector passes through its preference region. Our theoretical analysis shows that GR can always avoid mismatches when using the GLp scalarization for any p \u2265 1. The experimental studies on various MOPs conform to the theoretical analysis.", + "primary_area": "search and optimization", + "author": "Ruihao Zheng; Zhenkun Wang", + "authorids": "", + "aff": "School of System Design and Intelligent Manufacturing, Southern University of Science and Technology, Shenzhen, China; School of System Design and Intelligent Manufacturing, Southern University of Science and Technology, Shenzhen, China", + "bibtex": "@article{Zheng_Wang_2023, title={A Generalized Scalarization Method for Evolutionary Multi-Objective Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26474}, DOI={10.1609/aaai.v37i10.26474}, abstractNote={The decomposition-based multi-objective evolutionary algorithm (MOEA/D) transforms a multi-objective optimization problem (MOP) into a set of single-objective subproblems for collaborative optimization. Mismatches between subproblems and solutions can lead to severe performance degradation of MOEA/D. Most existing mismatch coping strategies only work when the L\u221e scalarization is used. A mismatch coping strategy that can use any Lp scalarization, even when facing MOPs with non-convex Pareto fronts, is of great significance for MOEA/D. This paper uses the global replacement (GR) as the backbone. We analyze how GR can no longer avoid mismatches when L\u221e is replaced by another Lp with p \u2208 [1, \u221e), and find that the Lp-based (1 \u2264 p < \u221e) subproblems having inconsistently large preference regions. When p is set to a small value, some middle subproblems have very small preference regions so that their direction vectors cannot pass through their corresponding preference regions. Therefore, we propose a generalized Lp (GLp) scalarization to ensure that the subproblem\u2019s direction vector passes through its preference region. Our theoretical analysis shows that GR can always avoid mismatches when using the GLp scalarization for any p \u2265 1. The experimental studies on various MOPs conform to the theoretical analysis.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Ruihao and Wang, Zhenkun}, year={2023}, month={Jun.}, pages={12518-12525} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26474/26246", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26474", + "pdf_size": 1178793, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12894705653640941141&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.sustech.edu.cn;gmail.com", + "email": "mail.sustech.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Southern University of Science and Technology", + "aff_unique_dep": "School of System Design and Intelligent Manufacturing", + "aff_unique_url": "https://www.sustech.edu.cn", + "aff_unique_abbr": "SUSTech", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26173", + "title": "A Generalized Unbiased Risk Estimator for Learning with Augmented Classes", + "track": "main", + "status": "Technical", + "abstract": "In contrast to the standard learning paradigm where all classes can be observed in training data, learning with augmented classes (LAC) tackles the problem where augmented classes unobserved in the training data may emerge in the test phase. Previous research showed that given unlabeled data, an unbiased risk estimator (URE) can be derived, which can be minimized for LAC with theoretical guarantees. However, this URE is only restricted to the specific type of one-versus-rest loss functions for multi-class classification, making it not flexible enough when the loss needs to be changed with the dataset in practice. In this paper, we propose a generalized URE that can be equipped with arbitrary loss functions while maintaining the theoretical guarantees, given unlabeled data for LAC. To alleviate the issue of negative empirical risk commonly encountered by previous studies, we further propose a novel risk-penalty regularization term. Experiments demonstrate the effectiveness of our proposed method.", + "primary_area": "machine learning iii", + "author": "Senlin Shu; Shuo He; Haobo Wang; Hongxin Wei; Tao Xiang; Lei Feng", + "authorids": "", + "aff": "College of Computer Science, Chongqing University, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, China; College of Computer Science and Technology, Zhejiang University, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore; College of Computer Science, Chongqing University, China; College of Computer Science, Chongqing University, China", + "bibtex": "@article{Shu_He_Wang_Wei_Xiang_Feng_2023, title={A Generalized Unbiased Risk Estimator for Learning with Augmented Classes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26173}, DOI={10.1609/aaai.v37i8.26173}, abstractNote={In contrast to the standard learning paradigm where all classes can be observed in training data, learning with augmented classes (LAC) tackles the problem where augmented classes unobserved in the training data may emerge in the test phase. Previous research showed that given unlabeled data, an unbiased risk estimator (URE) can be derived, which can be minimized for LAC with theoretical guarantees. However, this URE is only restricted to the specific type of one-versus-rest loss functions for multi-class classification, making it not flexible enough when the loss needs to be changed with the dataset in practice. In this paper, we propose a generalized URE that can be equipped with arbitrary loss functions while maintaining the theoretical guarantees, given unlabeled data for LAC. To alleviate the issue of negative empirical risk commonly encountered by previous studies, we further propose a novel risk-penalty regularization term. Experiments demonstrate the effectiveness of our proposed method.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shu, Senlin and He, Shuo and Wang, Haobo and Wei, Hongxin and Xiang, Tao and Feng, Lei}, year={2023}, month={Jun.}, pages={9829-9836} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26173/25945", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26173", + "pdf_size": 13979895, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16099792673021537829&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.cqu.edu.cn;std.uestc.edu.cn;zju.edu.cn;e.ntu.edu.sg;cqu.edu.cn;cqu.edu.cn", + "email": "stu.cqu.edu.cn;std.uestc.edu.cn;zju.edu.cn;e.ntu.edu.sg;cqu.edu.cn;cqu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;0;0", + "aff_unique_norm": "Chongqing University;University of Electronic Science and Technology of China;Zhejiang University;Nanyang Technological University", + "aff_unique_dep": "College of Computer Science;School of Computer Science and Engineering;College of Computer Science and Technology;School of Computer Science and Engineering", + "aff_unique_url": "http://en.cqu.edu.cn/;http://www.uestc.edu.cn;http://www.zju.edu.cn;https://www.ntu.edu.sg", + "aff_unique_abbr": ";UESTC;ZJU;NTU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Singapore", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26645", + "title": "A Generative Approach for Script Event Prediction via Contrastive Fine-Tuning", + "track": "main", + "status": "Technical", + "abstract": "Script event prediction aims to predict the subsequent event given the context. This requires the capability to infer the correlations between events. Recent works have attempted to improve event correlation reasoning by using pretrained language models and incorporating external knowledge (e.g., discourse relations). Though promising results have been achieved, some challenges still remain. First, the pretrained language models adopted by current works ignore event-level knowledge, resulting in an inability to capture the correlations between events well. Second, modeling correlations between events with discourse relations is limited because it can only capture explicit correlations between events with discourse markers, and cannot capture many implicit correlations. To this end, we propose a novel generative approach for this task, in which a pretrained language model is fine-tuned with an event-centric pretraining objective and predicts the next event within a generative paradigm. Specifically, we first introduce a novel event-level blank infilling strategy as the learning objective to inject event-level knowledge into the pretrained language model, and then design a likelihood-based contrastive loss for fine-tuning the generative model. Instead of using an additional prediction layer, we perform prediction by using sequence likelihoods generated by the generative model. Our approach models correlations between events in a soft way without any external knowledge. The likelihood-based prediction eliminates the need to use additional networks to make predictions and is somewhat interpretable since it scores each word in the event. Experimental results on the multi-choice narrative cloze (MCNC) task demonstrate that our approach achieves better results than other state-of-the-art baselines. Our code will be available at https://github.com/zhufq00/mcnc.", + "primary_area": "speech natural language processing", + "author": "Fangqi Zhu; Jun Gao; Changlong Yu; Wei Wang; Chen Xu; Xin Mu; Min Yang; Ruifeng Xu", + "authorids": "", + "aff": "Harbin Institute of Technology, Shenzhen; Harbin Institute of Technology, Shenzhen; ; ; Beijing University of Technology; Peng Cheng Laboratory; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; Harbin Institute of Technology, Shenzhen+Peng Cheng Laboratory+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies", + "bibtex": "@article{Zhu_Gao_Yu_Wang_Xu_Mu_Yang_Xu_2023, title={A Generative Approach for Script Event Prediction via Contrastive Fine-Tuning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26645}, DOI={10.1609/aaai.v37i11.26645}, abstractNote={Script event prediction aims to predict the subsequent event given the context. This requires the capability to infer the correlations between events. Recent works have attempted to improve event correlation reasoning by using pretrained language models and incorporating external knowledge (e.g., discourse relations). Though promising results have been achieved, some challenges still remain. First, the pretrained language models adopted by current works ignore event-level knowledge, resulting in an inability to capture the correlations between events well. Second, modeling correlations between events with discourse relations is limited because it can only capture explicit correlations between events with discourse markers, and cannot capture many implicit correlations. To this end, we propose a novel generative approach for this task, in which a pretrained language model is fine-tuned with an event-centric pretraining objective and predicts the next event within a generative paradigm. Specifically, we first introduce a novel event-level blank infilling strategy as the learning objective to inject event-level knowledge into the pretrained language model, and then design a likelihood-based contrastive loss for fine-tuning the generative model. Instead of using an additional prediction layer, we perform prediction by using sequence likelihoods generated by the generative model. Our approach models correlations between events in a soft way without any external knowledge. The likelihood-based prediction eliminates the need to use additional networks to make predictions and is somewhat interpretable since it scores each word in the event. Experimental results on the multi-choice narrative cloze (MCNC) task demonstrate that our approach achieves better results than other state-of-the-art baselines. Our code will be available at https://github.com/zhufq00/mcnc.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Fangqi and Gao, Jun and Yu, Changlong and Wang, Wei and Xu, Chen and Mu, Xin and Yang, Min and Xu, Ruifeng}, year={2023}, month={Jun.}, pages={14056-14064} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26645/26417", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26645", + "pdf_size": 664048, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16894760446836266260&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com; ; ; ; ; ;hit.edu.cn", + "email": "gmail.com;gmail.com; ; ; ; ; ;hit.edu.cn", + "github": "https://github.com/zhufq00/mcnc", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;3;0+2+4", + "aff_unique_norm": "Harbin Institute of Technology;Beijing University of Technology;Peng Cheng Laboratory;Shenzhen Institute of Advanced Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies", + "aff_unique_dep": ";;;;Provincial Key Laboratory of Novel Security Intelligence Technologies", + "aff_unique_url": "http://en.hhit.edu.cn/;http://www.bjut.edu.cn;http://www.pcl.ac.cn;http://www.siat.cas.cn;", + "aff_unique_abbr": "HIT;BJUT;PCL;SIAT;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26004", + "title": "A Gift from Label Smoothing: Robust Training with Adaptive Label Smoothing via Auxiliary Classifier under Label Noise", + "track": "main", + "status": "Technical", + "abstract": "As deep neural networks can easily overfit noisy labels, robust training in the presence of noisy labels is becoming an important challenge in modern deep learning. While existing methods address this problem in various directions, they still produce unpredictable sub-optimal results since they rely on the posterior information estimated by the feature extractor corrupted by noisy labels. Lipschitz regularization successfully alleviates this problem by training a robust feature extractor, but it requires longer training time and expensive computations. Motivated by this, we propose a simple yet effective method, called ALASCA, which efficiently provides a robust feature extractor under label noise. ALASCA integrates two key ingredients: (1) adaptive label smoothing based on our theoretical analysis that label smoothing implicitly induces Lipschitz regularization, and (2) auxiliary classifiers that enable practical application of intermediate Lipschitz regularization with negligible computations. We conduct wide-ranging experiments for ALASCA and combine our proposed method with previous noise-robust methods on several synthetic and real-world datasets. Experimental results show that our framework consistently improves the robustness of feature extractors and the performance of existing baselines with efficiency.", + "primary_area": "machine learning ii", + "author": "Jongwoo Ko; Bongsoo Yi; Se-Young Yun", + "authorids": "", + "aff": "Kim Jaechul Graduate School of AI, KAIST; Department of Statistics and Operations Research, University of North Carolina at Chapel Hill; Kim Jaechul Graduate School of AI, KAIST", + "bibtex": "@article{Ko_Yi_Yun_2023, title={A Gift from Label Smoothing: Robust Training with Adaptive Label Smoothing via Auxiliary Classifier under Label Noise}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26004}, DOI={10.1609/aaai.v37i7.26004}, abstractNote={As deep neural networks can easily overfit noisy labels, robust training in the presence of noisy labels is becoming an important challenge in modern deep learning. While existing methods address this problem in various directions, they still produce unpredictable sub-optimal results since they rely on the posterior information estimated by the feature extractor corrupted by noisy labels. Lipschitz regularization successfully alleviates this problem by training a robust feature extractor, but it requires longer training time and expensive computations. Motivated by this, we propose a simple yet effective method, called ALASCA, which efficiently provides a robust feature extractor under label noise. ALASCA integrates two key ingredients: (1) adaptive label smoothing based on our theoretical analysis that label smoothing implicitly induces Lipschitz regularization, and (2) auxiliary classifiers that enable practical application of intermediate Lipschitz regularization with negligible computations. We conduct wide-ranging experiments for ALASCA and combine our proposed method with previous noise-robust methods on several synthetic and real-world datasets. Experimental results show that our framework consistently improves the robustness of feature extractors and the performance of existing baselines with efficiency.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ko, Jongwoo and Yi, Bongsoo and Yun, Se-Young}, year={2023}, month={Jun.}, pages={8325-8333} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26004/25776", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26004", + "pdf_size": 324941, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11309960870569569762&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "kaist.ac.kr;unc.edu;kaist.ac.kr", + "email": "kaist.ac.kr;unc.edu;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "KAIST;University of North Carolina at Chapel Hill", + "aff_unique_dep": "Kim Jaechul Graduate School of AI;Department of Statistics and Operations Research", + "aff_unique_url": "https://www.kaist.edu;https://www.unc.edu", + "aff_unique_abbr": "KAIST;UNC Chapel Hill", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Chapel Hill", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-26623", + "title": "A Graph Fusion Approach for Cross-Lingual Machine Reading Comprehension", + "track": "main", + "status": "Technical", + "abstract": "Although great progress has been made for Machine Reading Comprehension (MRC) in English, scaling out to a large number of languages remains a huge challenge due to the lack of large amounts of annotated training data in non-English languages. To address this challenge, some recent efforts of cross-lingual MRC employ machine translation to transfer knowledge from English to other languages, through either explicit alignment or implicit attention. For effective knowledge transition, it is beneficial to leverage both semantic and syntactic information. However, the existing methods fail to explicitly incorporate syntax information in model learning. Consequently, the models are not robust to errors in alignment and noises in attention. In this work, we propose a novel approach, which jointly models the cross-lingual alignment information and the mono-lingual syntax information using a graph. We develop a series of algorithms, including graph construction, learning, and pre-training. The experiments on two benchmark datasets for cross-lingual MRC show that our approach outperforms all strong baselines, which verifies the effectiveness of syntax information for cross-lingual MRC.", + "primary_area": "speech natural language processing", + "author": "Zenan Xu; Linjun Shou; Jian Pei; Ming Gong; Qinliang Su; Xiaojun Quan; Daxin Jiang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; Microsoft Search Technology Center Asia (STCA); School of Computing Science, Simon Fraser University; Microsoft Search Technology Center Asia (STCA); School of Computer Science and Engineering, Sun Yat-sen University + Guangdong Key Laboratory of Big Data Analysis and Processing; School of Computer Science and Engineering, Sun Yat-sen University; Microsoft Search Technology Center Asia (STCA)", + "bibtex": "@article{Xu_Shou_Pei_Gong_Su_Quan_Jiang_2023, title={A Graph Fusion Approach for Cross-Lingual Machine Reading Comprehension}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26623}, DOI={10.1609/aaai.v37i11.26623}, abstractNote={Although great progress has been made for Machine Reading Comprehension (MRC) in English, scaling out to a large number of languages remains a huge challenge due to the lack of large amounts of annotated training data in non-English languages. To address this challenge, some recent efforts of cross-lingual MRC employ machine translation to transfer knowledge from English to other languages, through either explicit alignment or implicit attention. For effective knowledge transition, it is beneficial to leverage both semantic and syntactic information. However, the existing methods fail to explicitly incorporate syntax information in model learning. Consequently, the models are not robust to errors in alignment and noises in attention. In this work, we propose a novel approach, which jointly models the cross-lingual alignment information and the mono-lingual syntax information using a graph. We develop a series of algorithms, including graph construction, learning, and pre-training. The experiments on two benchmark datasets for cross-lingual MRC show that our approach outperforms all strong baselines, which verifies the effectiveness of syntax information for cross-lingual MRC.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zenan and Shou, Linjun and Pei, Jian and Gong, Ming and Su, Qinliang and Quan, Xiaojun and Jiang, Daxin}, year={2023}, month={Jun.}, pages={13861-13868} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26623/26395", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26623", + "pdf_size": 347869, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4962251889467846912&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;microsoft.com;cs.sfu.ca;microsoft.com;mail.sysu.edu.cn;mail.sysu.edu.cn;microsoft.com", + "email": "mail2.sysu.edu.cn;microsoft.com;cs.sfu.ca;microsoft.com;mail.sysu.edu.cn;mail.sysu.edu.cn;microsoft.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;0+3;0;1", + "aff_unique_norm": "Sun Yat-sen University;Microsoft Corporation;Simon Fraser University;Guangdong Key Laboratory of Big Data Analysis and Processing", + "aff_unique_dep": "School of Computer Science and Engineering;Search Technology Center;School of Computing Science;Big Data Analysis and Processing", + "aff_unique_url": "http://www.sysu.edu.cn;https://www.microsoft.com;https://www.sfu.ca;", + "aff_unique_abbr": "SYSU;Microsoft STCA;SFU;", + "aff_campus_unique_index": "1;2;1;;1", + "aff_campus_unique": ";Asia;Burnaby", + "aff_country_unique_index": "0;1;2;1;0+0;0;1", + "aff_country_unique": "China;International;Canada" + }, + { + "id": "article-26994", + "title": "A Highly Efficient Marine Mammals Classifier Based on a Cross-Covariance Attended Compact Feed-Forward Sequential Memory Network (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Military active sonar and marine transportation are detrimental to the livelihood of marine mammals and the ecosystem. Early detection and classification of marine mammals using machine learning can help humans to mitigate the harm to marine mammals. This paper proposes a cross-covariance attended compact Feed-Forward Sequential Memory Network (CC-FSMN). The proposed framework shows improved efficiency over multiple convolutional neural network (CNN) backbones. It also maintains a relatively decent performance.", + "primary_area": "", + "author": "Xiangrui Liu; Julian Cheng", + "authorids": "", + "aff": "University of British Columbia, Kelowna, British Columbia, Canada; University of British Columbia, Kelowna, British Columbia, Canada", + "bibtex": "@article{Liu_Cheng_2024, title={A Highly Efficient Marine Mammals Classifier Based on a Cross-Covariance Attended Compact Feed-Forward Sequential Memory Network (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26994}, DOI={10.1609/aaai.v37i13.26994}, abstractNote={Military active sonar and marine transportation are detrimental to the livelihood of marine mammals and the ecosystem. Early detection and classification of marine mammals using machine learning can help humans to mitigate the harm to marine mammals. This paper proposes a cross-covariance attended compact Feed-Forward Sequential Memory Network (CC-FSMN). The proposed framework shows improved efficiency over multiple convolutional neural network (CNN) backbones. It also maintains a relatively decent performance.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xiangrui and Cheng, Julian}, year={2024}, month={Jul.}, pages={16268-16269} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26994/26766", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26994", + "pdf_size": 91168, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16291906524092137072&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 2, + "aff_domain": "student.ubc.ca;ubc.ca", + "email": "student.ubc.ca;ubc.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of British Columbia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ubc.ca", + "aff_unique_abbr": "UBC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Kelowna", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26752", + "title": "A Holistic Approach to Undesired Content Detection in the Real World", + "track": "aaai special track", + "status": "Technical", + "abstract": "We present a holistic approach to building a robust and useful natural language classification system for real-world content moderation. The success of such a system relies on a chain of carefully designed and executed steps, including the design of content taxonomies and labeling instructions, data quality control, an active learning pipeline to capture rare events, and a variety of methods to make the model robust and to avoid overfitting. Our moderation system is trained to detect a broad set of categories of undesired content, including sexual content, hateful content, violence, self-harm, and harassment. This approach generalizes to a wide range of different content taxonomies and can be used to create high-quality content classifiers that outperform off-the-shelf models.", + "primary_area": "safe and robust ai", + "author": "Todor Markov; Chong Zhang; Sandhini Agarwal; Florentine Eloundou Nekoul; Theodore Lee; Steven Adler; Angela Jiang; Lilian Weng", + "authorids": "", + "aff": "OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI", + "bibtex": "@article{Markov_Zhang_Agarwal_Eloundou Nekoul_Lee_Adler_Jiang_Weng_2023, title={A Holistic Approach to Undesired Content Detection in the Real World}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26752}, DOI={10.1609/aaai.v37i12.26752}, abstractNote={We present a holistic approach to building a robust and useful natural language classification system for real-world content moderation. The success of such a system relies on a chain of carefully designed and executed steps, including the design of content taxonomies and labeling instructions, data quality control, an active learning pipeline to capture rare events, and a variety of methods to make the model robust and to avoid overfitting. Our moderation system is trained to detect a broad set of categories of undesired content, including sexual content, hateful content, violence, self-harm, and harassment. This approach generalizes to a wide range of different content taxonomies and can be used to create high-quality content classifiers that outperform off-the-shelf models.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Markov, Todor and Zhang, Chong and Agarwal, Sandhini and Eloundou Nekoul, Florentine and Lee, Theodore and Adler, Steven and Jiang, Angela and Weng, Lilian}, year={2023}, month={Jun.}, pages={15009-15018} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26752/26524", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26752", + "pdf_size": 370170, + "gs_citation": 249, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=573718946433614522&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "openai.com;openai.com;openai.com;openai.com;openai.com;openai.com;openai.com;openai.com", + "email": "openai.com;openai.com;openai.com;openai.com;openai.com;openai.com;openai.com;openai.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "OpenAI", + "aff_unique_dep": "", + "aff_unique_url": "https://openai.com", + "aff_unique_abbr": "OpenAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26593", + "title": "A Latent-Variable Model for Intrinsic Probing", + "track": "main", + "status": "Technical", + "abstract": "The success of pre-trained contextualized representations has prompted researchers to analyze them for the presence of linguistic information. \nIndeed, it is natural to assume that these pre-trained representations do encode some level of linguistic knowledge as they have brought about large empirical improvements on a wide variety of NLP tasks, which suggests they are learning true linguistic generalization.\nIn this work, we focus on intrinsic probing, an analysis technique where the goal is not only to identify whether a representation encodes a linguistic attribute but also to pinpoint where this attribute is encoded.\nWe propose a novel latent-variable formulation for constructing intrinsic probes and derive a tractable variational approximation to the log-likelihood.\nOur results show that our model is versatile and yields tighter mutual information estimates than two intrinsic probes previously proposed in the literature.\nFinally, we find empirical evidence that pre-trained representations \ndevelop a cross-lingually entangled notion of morphosyntax.", + "primary_area": "speech natural language processing", + "author": "Karolina Sta\u0144czak; Lucas Torroba Hennigen; Adina Williams; Ryan Cotterell; Isabelle Augenstein", + "authorids": "", + "aff": "University of Copenhagen; Massachusetts Institute of Technology; Meta AI; ETH Z\u00fcrich; University of Copenhagen", + "bibtex": "@article{Sta\u0144czak_Torroba Hennigen_Williams_Cotterell_Augenstein_2023, title={A Latent-Variable Model for Intrinsic Probing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26593}, DOI={10.1609/aaai.v37i11.26593}, abstractNote={The success of pre-trained contextualized representations has prompted researchers to analyze them for the presence of linguistic information. Indeed, it is natural to assume that these pre-trained representations do encode some level of linguistic knowledge as they have brought about large empirical improvements on a wide variety of NLP tasks, which suggests they are learning true linguistic generalization.\nIn this work, we focus on intrinsic probing, an analysis technique where the goal is not only to identify whether a representation encodes a linguistic attribute but also to pinpoint where this attribute is encoded.\nWe propose a novel latent-variable formulation for constructing intrinsic probes and derive a tractable variational approximation to the log-likelihood.\nOur results show that our model is versatile and yields tighter mutual information estimates than two intrinsic probes previously proposed in the literature.\nFinally, we find empirical evidence that pre-trained representations develop a cross-lingually entangled notion of morphosyntax.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sta\u0144czak, Karolina and Torroba Hennigen, Lucas and Williams, Adina and Cotterell, Ryan and Augenstein, Isabelle}, year={2023}, month={Jun.}, pages={13591-13599} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26593/26365", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26593", + "pdf_size": 361514, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15721904041711698622&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "di.ku.dk;mit.edu;meta.com;inf.ethz.ch;di.ku.dk", + "email": "di.ku.dk;mit.edu;meta.com;inf.ethz.ch;di.ku.dk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "University of Copenhagen;Massachusetts Institute of Technology;Meta Platforms, Inc.;ETH Z\u00fcrich", + "aff_unique_dep": ";;Meta AI;", + "aff_unique_url": "https://www.ku.dk;https://web.mit.edu;https://meta.com;https://www.ethz.ch", + "aff_unique_abbr": "UCPH;MIT;Meta;ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;2;0", + "aff_country_unique": "Denmark;United States;Switzerland" + }, + { + "id": "article-25307", + "title": "A Learnable Radial Basis Positional Embedding for Coordinate-MLPs", + "track": "main", + "status": "Technical", + "abstract": "We propose a novel method to enhance the performance of coordinate-MLPs (also referred to as neural fields) by learning instance-specific positional embeddings. End-to-end optimization of positional embedding parameters along with network weights leads to poor generalization performance. Instead, we develop a generic framework to learn the positional embedding based on the classic graph-Laplacian regularization, which can implicitly balance the trade-off between memorization and generalization. This framework is then used to propose a novel positional embedding scheme, where the hyperparameters are learned per coordinate (i.e instance) to deliver optimal performance. We show that the proposed embedding achieves better performance with higher stability compared to the well-established random Fourier features (RFF). Further, we demonstrate that the proposed embedding scheme yields stable gradients, enabling seamless integration into deep architectures as intermediate layers.", + "primary_area": "computer vision ii", + "author": "Sameera Ramasinghe; Simon Lucey", + "authorids": "", + "aff": "Amazon; University of Adelaide", + "bibtex": "@article{Ramasinghe_Lucey_2023, title={A Learnable Radial Basis Positional Embedding for Coordinate-MLPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25307}, DOI={10.1609/aaai.v37i2.25307}, abstractNote={We propose a novel method to enhance the performance of coordinate-MLPs (also referred to as neural fields) by learning instance-specific positional embeddings. End-to-end optimization of positional embedding parameters along with network weights leads to poor generalization performance. Instead, we develop a generic framework to learn the positional embedding based on the classic graph-Laplacian regularization, which can implicitly balance the trade-off between memorization and generalization. This framework is then used to propose a novel positional embedding scheme, where the hyperparameters are learned per coordinate (i.e instance) to deliver optimal performance. We show that the proposed embedding achieves better performance with higher stability compared to the well-established random Fourier features (RFF). Further, we demonstrate that the proposed embedding scheme yields stable gradients, enabling seamless integration into deep architectures as intermediate layers.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ramasinghe, Sameera and Lucey, Simon}, year={2023}, month={Jun.}, pages={2137-2145} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25307/25079", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25307", + "pdf_size": 12665622, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17865737339423922308&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "amazon.com; ", + "email": "amazon.com; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Amazon.com, Inc.;University of Adelaide", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.amazon.com;https://www.adelaide.edu.au", + "aff_unique_abbr": "Amazon;Adelaide", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-25075", + "title": "A Machine with Short-Term, Episodic, and Semantic Memory Systems", + "track": "main", + "status": "Technical", + "abstract": "Inspired by the cognitive science theory of the explicit human memory systems, we have modeled an agent with short-term, episodic, and semantic memory systems, each of which is modeled with a knowledge graph. To evaluate this system and analyze the behavior of this agent, we designed and released our own reinforcement learning agent environment, \u201cthe Room\u201d, where an agent has to learn how to encode, store, and retrieve memories to maximize its return by answering questions. We show that our deep Q-learning based agent successfully learns whether a short-term memory should be forgotten, or rather be stored in the episodic or semantic memory systems. Our experiments indicate that an agent with human-like memory systems can outperform an agent without this memory structure in the environment.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Taewoon Kim; Michael Cochez; Vincent Francois-Lavet; Mark Neerincx; Piek Vossen", + "authorids": "", + "aff": "Vrije Universiteit Amsterdam; Vrije Universiteit Amsterdam; Vrije Universiteit Amsterdam; Technische Universiteit Delft; Vrije Universiteit Amsterdam", + "bibtex": "@article{Kim_Cochez_Francois-Lavet_Neerincx_Vossen_2023, title={A Machine with Short-Term, Episodic, and Semantic Memory Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25075}, DOI={10.1609/aaai.v37i1.25075}, abstractNote={Inspired by the cognitive science theory of the explicit human memory systems, we have modeled an agent with short-term, episodic, and semantic memory systems, each of which is modeled with a knowledge graph. To evaluate this system and analyze the behavior of this agent, we designed and released our own reinforcement learning agent environment, \u201cthe Room\u201d, where an agent has to learn how to encode, store, and retrieve memories to maximize its return by answering questions. We show that our deep Q-learning based agent successfully learns whether a short-term memory should be forgotten, or rather be stored in the episodic or semantic memory systems. Our experiments indicate that an agent with human-like memory systems can outperform an agent without this memory structure in the environment.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Taewoon and Cochez, Michael and Francois-Lavet, Vincent and Neerincx, Mark and Vossen, Piek}, year={2023}, month={Jun.}, pages={48-56} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25075/24847", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25075", + "pdf_size": 449525, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14069036524015669619&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "vu.nl;vu.nl;vu.nl;tudelft.nl;vu.nl", + "email": "vu.nl;vu.nl;vu.nl;tudelft.nl;vu.nl", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Vrije Universiteit Amsterdam;Delft University of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.vu.nl;https://www.tudelft.nl", + "aff_unique_abbr": "VU Amsterdam;TUDelft", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Delft", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "article-26133", + "title": "A Model-Agnostic Heuristics for Selective Classification", + "track": "main", + "status": "Technical", + "abstract": "Selective classification (also known as classification with reject option) conservatively extends a classifier with a selection function to determine whether or not a prediction should be accepted (i.e., trusted, used, deployed). This is a highly relevant issue in socially sensitive tasks, such as credit scoring.\nState-of-the-art approaches rely on Deep Neural Networks (DNNs) that train at the same time both the classifier and the selection function. These approaches are model-specific and computationally expensive. \nWe propose a model-agnostic approach, as it can work with any base probabilistic binary classification algorithm, and it can be scalable to large tabular datasets if the base classifier is so. The proposed algorithm, called SCROSS, exploits a cross-fitting strategy and theoretical results for quantile estimation to build the selection function. Experiments on real-world data show that SCROSS improves over existing methods.", + "primary_area": "machine learning iii", + "author": "Andrea Pugnana; Salvatore Ruggieri", + "authorids": "", + "aff": "Scuola Normale Superiore, Pisa, Italy; University of Pisa, Pisa, Italy", + "bibtex": "@article{Pugnana_Ruggieri_2023, title={A Model-Agnostic Heuristics for Selective Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26133}, DOI={10.1609/aaai.v37i8.26133}, abstractNote={Selective classification (also known as classification with reject option) conservatively extends a classifier with a selection function to determine whether or not a prediction should be accepted (i.e., trusted, used, deployed). This is a highly relevant issue in socially sensitive tasks, such as credit scoring.\nState-of-the-art approaches rely on Deep Neural Networks (DNNs) that train at the same time both the classifier and the selection function. These approaches are model-specific and computationally expensive. We propose a model-agnostic approach, as it can work with any base probabilistic binary classification algorithm, and it can be scalable to large tabular datasets if the base classifier is so. The proposed algorithm, called SCROSS, exploits a cross-fitting strategy and theoretical results for quantile estimation to build the selection function. Experiments on real-world data show that SCROSS improves over existing methods.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pugnana, Andrea and Ruggieri, Salvatore}, year={2023}, month={Jun.}, pages={9461-9469} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26133/25905", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26133", + "pdf_size": 165662, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4011826789001082516&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sns.it;unipi.it", + "email": "sns.it;unipi.it", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Scuola Normale Superiore;University of Pisa", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.sns.it;https://www.unipi.it", + "aff_unique_abbr": "SNS;UNIP", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pisa", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26908", + "title": "A Multi-User Virtual World with Music Recommendations and Mood-Based Virtual Effects", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "The SEND/RETURN (S/R) project is created to explore the efficacy of content-based music recommendations alongside a uniquely generated Unreal Engine 5 (UE5) virtual environment based on audio features. S/R employs both a k-means clustering algorithm using audio features and a fast pattern matching (FPM) algorithm using 30-second audio signals to find similar-sounding songs to recommend to users. The feature values of the recommended song are then communicated via HTTP to the UE5 virtual environment, which changes a number of effects in real-time. All of this is being replicated from a listen-server to other clients to create a multiplayer audio session. S/R successfully creates a lightweight online environment that replicates song information to all clients and suggests new songs that alter the world around you. In this work, we extend S/R by training a convolutional neural network using Mel-spectrograms of 30-second audio samples to predict the mood of a song. This model can then orchestrate the post-processing effect in the UE5 virtual environment. The developed convolutional model had a validation accuracy of 67.5% in predicting 4 moods ('calm', 'energetic', 'happy', 'sad').", + "primary_area": "", + "author": "Charats Burch; Robert Sprowl; Mehmet Ergezer", + "authorids": "", + "aff": "Wentworth Institute of Technology; Wentworth Institute of Technology; Wentworth Institute of Technology", + "bibtex": "@article{Burch_Sprowl_Ergezer_2024, title={A Multi-User Virtual World with Music Recommendations and Mood-Based Virtual Effects}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26908}, DOI={10.1609/aaai.v37i13.26908}, abstractNote={The SEND/RETURN (S/R) project is created to explore the efficacy of content-based music recommendations alongside a uniquely generated Unreal Engine 5 (UE5) virtual environment based on audio features. S/R employs both a k-means clustering algorithm using audio features and a fast pattern matching (FPM) algorithm using 30-second audio signals to find similar-sounding songs to recommend to users. The feature values of the recommended song are then communicated via HTTP to the UE5 virtual environment, which changes a number of effects in real-time. All of this is being replicated from a listen-server to other clients to create a multiplayer audio session. S/R successfully creates a lightweight online environment that replicates song information to all clients and suggests new songs that alter the world around you. In this work, we extend S/R by training a convolutional neural network using Mel-spectrograms of 30-second audio samples to predict the mood of a song. This model can then orchestrate the post-processing effect in the UE5 virtual environment. The developed convolutional model had a validation accuracy of 67.5% in predicting 4 moods (\u2019calm\u2019, \u2019energetic\u2019, \u2019happy\u2019, \u2019sad\u2019).}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Burch, Charats and Sprowl, Robert and Ergezer, Mehmet}, year={2024}, month={Jul.}, pages={16063-16069} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26908/26680", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26908", + "pdf_size": 4717207, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:2eSB8wACBX8J:scholar.google.com/&scioq=A+Multi-User+Virtual+World+with+Music+Recommendations+and+Mood-Based+Virtual+Effects&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "wit.edu;wit.edu;wit.edu", + "email": "wit.edu;wit.edu;wit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Wentworth Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.wit.edu", + "aff_unique_abbr": "WIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26999", + "title": "A Mutually Enhanced Bidirectional Approach for Jointly Mining User Demand and Sentiment (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "User demand mining aims to identify the implicit demand from the e-commerce reviews, which are always irregular, vague and diverse. Existing sentiment analysis research mainly focuses on aspect-opinion-sentiment triplet extraction, while the deeper user demands remain unexplored. In this paper, we formulate a novel research question of jointly mining aspect-opinion-sentiment-demand, and propose a Mutually Enhanced Bidirectional Extraction (MEMB) framework for capturing the dynamic interaction among different types of information. Finally, experiments on Chinese e-commerce data demonstrate the efficacy of the proposed model.", + "primary_area": "", + "author": "Xue Mao; Haoda Qian; Minjie Yuan; Qiudan Li", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China; Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China; Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China; Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China", + "bibtex": "@article{Mao_Qian_Yuan_Li_2024, title={A Mutually Enhanced Bidirectional Approach for Jointly Mining User Demand and Sentiment (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26999}, DOI={10.1609/aaai.v37i13.26999}, abstractNote={User demand mining aims to identify the implicit demand from the e-commerce reviews, which are always irregular, vague and diverse. Existing sentiment analysis research mainly focuses on aspect-opinion-sentiment triplet extraction, while the deeper user demands remain unexplored. In this paper, we formulate a novel research question of jointly mining aspect-opinion-sentiment-demand, and propose a Mutually Enhanced Bidirectional Extraction (MEMB) framework for capturing the dynamic interaction among different types of information. Finally, experiments on Chinese e-commerce data demonstrate the efficacy of the proposed model.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Xue and Qian, Haoda and Yuan, Minjie and Li, Qiudan}, year={2024}, month={Jul.}, pages={16278-16279} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26999/26771", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26999", + "pdf_size": 76684, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Q5FTCC8uYHwJ:scholar.google.com/&scioq=A+Mutually+Enhanced+Bidirectional+Approach+for+Jointly+Mining+User+Demand+and+Sentiment+(Student+Abstract)&hl=en&as_sdt=0,14", + "gs_version_total": 2, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26638", + "title": "A Neural Span-Based Continual Named Entity Recognition Model", + "track": "main", + "status": "Technical", + "abstract": "Named Entity Recognition (NER) models capable of Continual Learning (CL) are realistically valuable in areas where entity types continuously increase (e.g., personal assistants). Meanwhile the learning paradigm of NER advances to new patterns such as the span-based methods. However, its potential to CL has not been fully explored. In this paper, we propose SpanKL, a simple yet effective Span-based model with Knowledge distillation (KD) to preserve memories and multi-Label prediction to prevent conflicts in CL-NER. Unlike prior sequence labeling approaches, the inherently independent modeling in span and entity level with the designed coherent optimization on SpanKL promotes its learning at each incremental step and mitigates the forgetting. Experiments on synthetic CL datasets derived from OntoNotes and Few-NERD show that SpanKL significantly outperforms previous SoTA in many aspects, and obtains the smallest gap from CL to the upper bound revealing its high practiced value. The code is available at https://github.com/Qznan/SpanKL.", + "primary_area": "speech natural language processing", + "author": "Yunan Zhang; Qingcai Chen", + "authorids": "", + "aff": "Harbin Institute of Technology (Shenzhen), Shenzhen, China; Harbin Institute of Technology (Shenzhen), Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China", + "bibtex": "@article{Zhang_Chen_2023, title={A Neural Span-Based Continual Named Entity Recognition Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26638}, DOI={10.1609/aaai.v37i11.26638}, abstractNote={Named Entity Recognition (NER) models capable of Continual Learning (CL) are realistically valuable in areas where entity types continuously increase (e.g., personal assistants). Meanwhile the learning paradigm of NER advances to new patterns such as the span-based methods. However, its potential to CL has not been fully explored. In this paper, we propose SpanKL, a simple yet effective Span-based model with Knowledge distillation (KD) to preserve memories and multi-Label prediction to prevent conflicts in CL-NER. Unlike prior sequence labeling approaches, the inherently independent modeling in span and entity level with the designed coherent optimization on SpanKL promotes its learning at each incremental step and mitigates the forgetting. Experiments on synthetic CL datasets derived from OntoNotes and Few-NERD show that SpanKL significantly outperforms previous SoTA in many aspects, and obtains the smallest gap from CL to the upper bound revealing its high practiced value. The code is available at https://github.com/Qznan/SpanKL.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yunan and Chen, Qingcai}, year={2023}, month={Jun.}, pages={13993-14001} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26638/26410", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26638", + "pdf_size": 2174859, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4255912977537209789&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "outlook.com;hit.edu.cn", + "email": "outlook.com;hit.edu.cn", + "github": "https://github.com/Qznan/SpanKL", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://en.hhit.edu.cn/;", + "aff_unique_abbr": "HIT;", + "aff_campus_unique_index": "0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26832", + "title": "A New Challenge in Policy Evaluation", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "This paper proposes a new challenge in policy evaluation: to improve the online data efficiency of Monte Carlo methods via information extracted from offline data while maintaining the unbiasedness of Monte Carlo methods.", + "primary_area": "", + "author": "Shangtong Zhang", + "authorids": "", + "aff": "University of Virginia", + "bibtex": "@article{Zhang_2024, title={A New Challenge in Policy Evaluation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26832}, DOI={10.1609/aaai.v37i13.26832}, abstractNote={This paper proposes a new challenge in policy evaluation: to improve the online data efficiency of Monte Carlo methods via information extracted from offline data while maintaining the unbiasedness of Monte Carlo methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Shangtong}, year={2024}, month={Jul.}, pages={15465-15465} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26832/26604", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26832", + "pdf_size": 44789, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff_domain": "virginia.edu", + "email": "virginia.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25606", + "title": "A Noise-Tolerant Differentiable Learning Approach for Single Occurrence Regular Expression with Interleaving", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of learning a single occurrence regular expression with interleaving (SOIRE) from a set of text strings possibly with noise. SOIRE fully supports interleaving and covers a large portion of regular expressions used in practice. Learning SOIREs is challenging because it requires heavy computation and text strings usually contain noise in practice. Most of the previous studies only learn restricted SOIREs and are not robust on noisy data. To tackle these issues, we propose a noise-tolerant differentiable learning approach SOIREDL for SOIRE. We design a neural network to simulate SOIRE matching and theoretically prove that certain assignments of the set of parameters learnt by the neural network, called faithful encodings, are one-to-one corresponding to SOIREs for a bounded size. Based on this correspondence, we interpret the target SOIRE from an assignment of the set of parameters of the neural network by exploring the nearest faithful encodings. Experimental results show that SOIREDL outperforms the state-of-the-art approaches, especially on noisy data.", + "primary_area": "data mining and knowledge management", + "author": "Rongzhen Ye; Tianqu Zhuang; Hai Wan; Jianfeng Du; Weilin Luo; Pingjia Liang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; Guangzhou Key Laboratory of Multilingual Intelligent Processing, Guangdong University of Foreign Studies; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University", + "bibtex": "@article{Ye_Zhuang_Wan_Du_Luo_Liang_2023, title={A Noise-Tolerant Differentiable Learning Approach for Single Occurrence Regular Expression with Interleaving}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25606}, DOI={10.1609/aaai.v37i4.25606}, abstractNote={We study the problem of learning a single occurrence regular expression with interleaving (SOIRE) from a set of text strings possibly with noise. SOIRE fully supports interleaving and covers a large portion of regular expressions used in practice. Learning SOIREs is challenging because it requires heavy computation and text strings usually contain noise in practice. Most of the previous studies only learn restricted SOIREs and are not robust on noisy data. To tackle these issues, we propose a noise-tolerant differentiable learning approach SOIREDL for SOIRE. We design a neural network to simulate SOIRE matching and theoretically prove that certain assignments of the set of parameters learnt by the neural network, called faithful encodings, are one-to-one corresponding to SOIREs for a bounded size. Based on this correspondence, we interpret the target SOIRE from an assignment of the set of parameters of the neural network by exploring the nearest faithful encodings. Experimental results show that SOIREDL outperforms the state-of-the-art approaches, especially on noisy data.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Rongzhen and Zhuang, Tianqu and Wan, Hai and Du, Jianfeng and Luo, Weilin and Liang, Pingjia}, year={2023}, month={Jun.}, pages={4809-4817} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25606/25378", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25606", + "pdf_size": 576541, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4908825800146557003&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;gdufs.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;gdufs.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Sun Yat-sen University;Guangdong University of Foreign Studies", + "aff_unique_dep": "School of Computer Science and Engineering;Guangzhou Key Laboratory of Multilingual Intelligent Processing", + "aff_unique_url": "http://www.sysu.edu.cn;http://www.gdufs.edu.cn", + "aff_unique_abbr": "SYSU;GDUFS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25691", + "title": "A Pair-Approximation Method for Modelling the Dynamics of Multi-Agent Stochastic Games", + "track": "main", + "status": "Technical", + "abstract": "Developing a dynamical model for learning in games has attracted much recent interest. In stochastic games, agents need to make decisions in multiple states, and transitions between states, in turn, influence the dynamics of strategies. While previous works typically focus either on 2-agent stochastic games or on normal form games under an infinite-agent setting, we aim at formally modelling the learning dynamics in stochastic games under the infinite-agent setting. With a novel use of pair-approximation method, we develop a formal model for myopic Q-learning in stochastic games with symmetric state transition. We verify the descriptive power of our model (a partial differential equation) across various games through comparisons with agent-based simulation results. Based on our proposed model, we can gain qualitative and quantitative insights into the influence of transition probabilities on the dynamics of strategies. In particular, we illustrate that a careful design of transition probabilities can help players overcome the social dilemmas and promote cooperation, even if agents are myopic learners.", + "primary_area": "game theory and economic paradigms", + "author": "Chen Chu; Zheng Yuan; Shuyue Hu; Chunjiang Mu; Zhen Wang", + "authorids": "", + "aff": "School of Statistics and Mathematics, Yunnan University of Finance and Economics + School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University; School of Statistics and Mathematics, Yunnan University of Finance and Economics; Shanghai Artificial Intelligence Laboratory; School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University + School of Cybersecurity, Northwestern Polytechnical University; School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University + School of Cybersecurity, Northwestern Polytechnical University", + "bibtex": "@article{Chu_Yuan_Hu_Mu_Wang_2023, title={A Pair-Approximation Method for Modelling the Dynamics of Multi-Agent Stochastic Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25691}, DOI={10.1609/aaai.v37i5.25691}, abstractNote={Developing a dynamical model for learning in games has attracted much recent interest. In stochastic games, agents need to make decisions in multiple states, and transitions between states, in turn, influence the dynamics of strategies. While previous works typically focus either on 2-agent stochastic games or on normal form games under an infinite-agent setting, we aim at formally modelling the learning dynamics in stochastic games under the infinite-agent setting. With a novel use of pair-approximation method, we develop a formal model for myopic Q-learning in stochastic games with symmetric state transition. We verify the descriptive power of our model (a partial differential equation) across various games through comparisons with agent-based simulation results. Based on our proposed model, we can gain qualitative and quantitative insights into the influence of transition probabilities on the dynamics of strategies. In particular, we illustrate that a careful design of transition probabilities can help players overcome the social dilemmas and promote cooperation, even if agents are myopic learners.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chu, Chen and Yuan, Zheng and Hu, Shuyue and Mu, Chunjiang and Wang, Zhen}, year={2023}, month={Jun.}, pages={5565-5572} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25691/25463", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25691", + "pdf_size": 3168068, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5780987607055622653&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": ";;pjlab.org.cn; ;nwpu.edu.cn", + "email": ";;pjlab.org.cn; ;nwpu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;2;1+1;1+1", + "aff_unique_norm": "Yunnan University of Finance and Economics;Northwestern Polytechnical University;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "School of Statistics and Mathematics;School of Artificial Intelligence, OPtics and ElectroNics (iOPEN);", + "aff_unique_url": ";https://www.nwpu.edu.cn;http://www.shailab.org/", + "aff_unique_abbr": ";NWPU;Shanghai AI Lab", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25837", + "title": "A Parameterized Theory of PAC Learning", + "track": "main", + "status": "Technical", + "abstract": "Probably Approximately Correct (i.e., PAC) learning is a core concept of sample complexity theory, and efficient PAC learnability is often seen as a natural counterpart to the class P in classical computational complexity. But while the nascent theory of parameterized complexity has allowed us to push beyond the P-NP \"dichotomy\" in classical computational complexity and identify the exact boundaries of tractability for numerous problems, there is no analogue in the domain of sample complexity that could push beyond efficient PAC learnability.\n\nAs our core contribution, we fill this gap by developing a theory of parameterized PAC learning which allows us to shed new light on several recent PAC learning results that incorporated elements of parameterized complexity. Within the theory, we identify not one but two notions of fixed-parameter learnability that both form distinct counterparts to the class FPT - the core concept at the center of the parameterized complexity paradigm - and develop the machinery required to exclude fixed-parameter learnability. We then showcase the applications of this theory to identify refined boundaries of tractability for CNF and DNF learning as well as for a range of learning problems on graphs.", + "primary_area": "machine learning i", + "author": "Cornelius Brand; Robert Ganian; Kirill Simonov", + "authorids": "", + "aff": "Algorithms and Complexity Group, TU Wien, Austria; Algorithms and Complexity Group, TU Wien, Austria; Chair for Algorithm Engineering, Hasso Plattner Institute, Germany", + "bibtex": "@article{Brand_Ganian_Simonov_2023, title={A Parameterized Theory of PAC Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25837}, DOI={10.1609/aaai.v37i6.25837}, abstractNote={Probably Approximately Correct (i.e., PAC) learning is a core concept of sample complexity theory, and efficient PAC learnability is often seen as a natural counterpart to the class P in classical computational complexity. But while the nascent theory of parameterized complexity has allowed us to push beyond the P-NP "dichotomy" in classical computational complexity and identify the exact boundaries of tractability for numerous problems, there is no analogue in the domain of sample complexity that could push beyond efficient PAC learnability. As our core contribution, we fill this gap by developing a theory of parameterized PAC learning which allows us to shed new light on several recent PAC learning results that incorporated elements of parameterized complexity. Within the theory, we identify not one but two notions of fixed-parameter learnability that both form distinct counterparts to the class FPT - the core concept at the center of the parameterized complexity paradigm - and develop the machinery required to exclude fixed-parameter learnability. We then showcase the applications of this theory to identify refined boundaries of tractability for CNF and DNF learning as well as for a range of learning problems on graphs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brand, Cornelius and Ganian, Robert and Simonov, Kirill}, year={2023}, month={Jun.}, pages={6834-6841} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25837/25609", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25837", + "pdf_size": 233339, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2558792459200599243&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ac.tuwien.ac.at;ac.tuwien.ac.at;hpi.de", + "email": "ac.tuwien.ac.at;ac.tuwien.ac.at;hpi.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "TU Wien;Hasso Plattner Institute", + "aff_unique_dep": "Algorithms and Complexity Group;Chair for Algorithm Engineering", + "aff_unique_url": "https://www.tuwien.ac.at;https://www.hpi.de", + "aff_unique_abbr": "TU Wien;HPI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Austria;Germany" + }, + { + "id": "article-27013", + "title": "A Probabilistic Graph Diffusion Model for Source Localization (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Source localization, as a reverse problem of graph diffusion, is important for many applications such as rumor tracking, detecting computer viruses, and finding epidemic spreaders. However, it is still under-explored due to the inherent uncertainty of the diffusion process: after a long period of propagation, the same diffusion process may start with diverse sources. Most existing solutions utilize deterministic models and therefore cannot describe the diffusion uncertainty of sources. Moreover, current probabilistic approaches are hard to conduct smooth transformations with variational inference. To overcome the limitations, we propose a probabilistic framework using continuous normalizing flows with invertible transformations and graph neural networks to explicitly model the uncertainty of the diffusion source. Experimental results on two real-world datasets demonstrate the effectiveness of our model over strong baselines.", + "primary_area": "", + "author": "Tangjiang Qian; Xovee Xu; Zhe Xiao; Ting Zhong; Fan Zhou", + "authorids": "", + "aff": "University of Electronic Science and Technology of China, Chengdu, Sichuan 610054, China; University of Electronic Science and Technology of China, Chengdu, Sichuan 610054, China; Science and Technology on Communication Networks Laboratory, Shijiazhuang, Hebei 050050, China; Kashi Institute of Electronics and Information Industry, Kashi, Xinjiang 844000, China; University of Electronic Science and Technology of China, Chengdu, Sichuan 610054, China", + "bibtex": "@article{Qian_Xu_Xiao_Zhong_Zhou_2024, title={A Probabilistic Graph Diffusion Model for Source Localization (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27013}, DOI={10.1609/aaai.v37i13.27013}, abstractNote={Source localization, as a reverse problem of graph diffusion, is important for many applications such as rumor tracking, detecting computer viruses, and finding epidemic spreaders. However, it is still under-explored due to the inherent uncertainty of the diffusion process: after a long period of propagation, the same diffusion process may start with diverse sources. Most existing solutions utilize deterministic models and therefore cannot describe the diffusion uncertainty of sources. Moreover, current probabilistic approaches are hard to conduct smooth transformations with variational inference. To overcome the limitations, we propose a probabilistic framework using continuous normalizing flows with invertible transformations and graph neural networks to explicitly model the uncertainty of the diffusion source. Experimental results on two real-world datasets demonstrate the effectiveness of our model over strong baselines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qian, Tangjiang and Xu, Xovee and Xiao, Zhe and Zhong, Ting and Zhou, Fan}, year={2024}, month={Jul.}, pages={16306-16307} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27013/26785", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27013", + "pdf_size": 347309, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15016135354398767791&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "std.uestc.edu.cn;ieee.org;126.com;uestc.edu.cn;uestc.edu.cn", + "email": "std.uestc.edu.cn;ieee.org;126.com;uestc.edu.cn;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Science and Technology on Communication Networks Laboratory;Kashi Institute of Electronics and Information Industry", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.uestc.edu.cn/;;", + "aff_unique_abbr": "UESTC;;", + "aff_campus_unique_index": "0;0;2;0", + "aff_campus_unique": "Chengdu;;Kashi", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26460", + "title": "A Proof That Using Crossover Can Guarantee Exponential Speed-Ups in Evolutionary Multi-Objective Optimisation", + "track": "main", + "status": "Technical", + "abstract": "Evolutionary algorithms are popular algorithms for multiobjective optimisation (also called Pareto optimisation) as they use a population to store trade-offs between different objectives. Despite their popularity, the theoretical foundation of multiobjective evolutionary optimisation (EMO) is still in its early development. Fundamental questions such as the benefits of the crossover operator are still not fully understood.\n\nWe provide a theoretical analysis of well-known EMO algorithms GSEMO and NSGA-II to showcase the possible advantages of crossover. We propose a class of problems on which these EMO algorithms using crossover find the Pareto set in expected polynomial time. In sharp contrast, they and many other EMO algorithms without crossover require exponential time to even find a single Pareto-optimal point. This is the first example of an exponential performance gap through the use of crossover for the widely used NSGA-II algorithm.", + "primary_area": "search and optimization", + "author": "Duc-Cuong Dang; Andre Opris; Bahare Salehi; Dirk Sudholt", + "authorids": "", + "aff": "Chair of Algorithms for Intelligent Systems, University of Passau, Passau, Germany; Chair of Algorithms for Intelligent Systems, University of Passau, Passau, Germany; Chair of Algorithms for Intelligent Systems, University of Passau, Passau, Germany + School of Electrical and Computer Engineering, Shiraz University, Shiraz, Iran; Chair of Algorithms for Intelligent Systems, University of Passau, Passau, Germany", + "bibtex": "@article{Dang_Opris_Salehi_Sudholt_2023, title={A Proof That Using Crossover Can Guarantee Exponential Speed-Ups in Evolutionary Multi-Objective Optimisation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26460}, DOI={10.1609/aaai.v37i10.26460}, abstractNote={Evolutionary algorithms are popular algorithms for multiobjective optimisation (also called Pareto optimisation) as they use a population to store trade-offs between different objectives. Despite their popularity, the theoretical foundation of multiobjective evolutionary optimisation (EMO) is still in its early development. Fundamental questions such as the benefits of the crossover operator are still not fully understood. We provide a theoretical analysis of well-known EMO algorithms GSEMO and NSGA-II to showcase the possible advantages of crossover. We propose a class of problems on which these EMO algorithms using crossover find the Pareto set in expected polynomial time. In sharp contrast, they and many other EMO algorithms without crossover require exponential time to even find a single Pareto-optimal point. This is the first example of an exponential performance gap through the use of crossover for the widely used NSGA-II algorithm.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dang, Duc-Cuong and Opris, Andre and Salehi, Bahare and Sudholt, Dirk}, year={2023}, month={Jun.}, pages={12390-12398} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26460/26232", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26460", + "pdf_size": 166340, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18074814677619140351&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "uni-passau.de;uni-passau.de;gmail.com;uni-passau.de", + "email": "uni-passau.de;uni-passau.de;gmail.com;uni-passau.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "University of Passau;Shiraz University", + "aff_unique_dep": "Chair of Algorithms for Intelligent Systems;School of Electrical and Computer Engineering", + "aff_unique_url": "https://www.uni-passau.de;", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "0;0;0+1;0", + "aff_campus_unique": "Passau;Shiraz", + "aff_country_unique_index": "0;0;0+1;0", + "aff_country_unique": "Germany;Iran" + }, + { + "id": "article-25621", + "title": "A Provable Framework of Learning Graph Embeddings via Summarization", + "track": "main", + "status": "Technical", + "abstract": "Given a large graph, can we learn its node embeddings from a smaller summary graph? What is the relationship between embeddings learned from original graphs and their summary graphs? Graph representation learning plays an important role in many graph mining applications, but learning em-beddings of large-scale graphs remains a challenge. Recent works try to alleviate it via graph summarization, which typ-ically includes the three steps: reducing the graph size by combining nodes and edges into supernodes and superedges,learning the supernode embedding on the summary graph and then restoring the embeddings of the original nodes. How-ever, the justification behind those steps is still unknown.\nIn this work, we propose GELSUMM, a well-formulated graph embedding learning framework based on graph sum-marization, in which we show the theoretical ground of learn-ing from summary graphs and the restoration with the three well-known graph embedding approaches in a closed form.Through extensive experiments on real-world datasets, we demonstrate that our methods can learn graph embeddings with matching or better performance on downstream tasks.This work provides theoretical analysis for learning node em-beddings via summarization and helps explain and under-stand the mechanism of the existing works.", + "primary_area": "data mining and knowledge management", + "author": "Houquan Zhou; Shenghua Liu; Danai Koutra; Huawei Shen; Xueqi Cheng", + "authorids": "", + "aff": "Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; University of Michigan; Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences", + "bibtex": "@article{Zhou_Liu_Koutra_Shen_Cheng_2023, title={A Provable Framework of Learning Graph Embeddings via Summarization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25621}, DOI={10.1609/aaai.v37i4.25621}, abstractNote={Given a large graph, can we learn its node embeddings from a smaller summary graph? What is the relationship between embeddings learned from original graphs and their summary graphs? Graph representation learning plays an important role in many graph mining applications, but learning em-beddings of large-scale graphs remains a challenge. Recent works try to alleviate it via graph summarization, which typ-ically includes the three steps: reducing the graph size by combining nodes and edges into supernodes and superedges,learning the supernode embedding on the summary graph and then restoring the embeddings of the original nodes. How-ever, the justification behind those steps is still unknown.\nIn this work, we propose GELSUMM, a well-formulated graph embedding learning framework based on graph sum-marization, in which we show the theoretical ground of learn-ing from summary graphs and the restoration with the three well-known graph embedding approaches in a closed form.Through extensive experiments on real-world datasets, we demonstrate that our methods can learn graph embeddings with matching or better performance on downstream tasks.This work provides theoretical analysis for learning node em-beddings via summarization and helps explain and under-stand the mechanism of the existing works.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Houquan and Liu, Shenghua and Koutra, Danai and Shen, Huawei and Cheng, Xueqi}, year={2023}, month={Jun.}, pages={4946-4953} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25621/25393", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25621", + "pdf_size": 673607, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11661730289258206547&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ict.ac.cn;ict.ac.cn;umich.edu.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;umich.edu.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;University of Michigan", + "aff_unique_dep": "Institute of Computing Technology;;", + "aff_unique_url": "http://www.ict.ac.cn;http://www.ucas.ac.cn;https://www.umich.edu", + "aff_unique_abbr": "CAS;UCAS;UM", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0+0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26516", + "title": "A Question-Answering Approach to Key Value Pair Extraction from Form-Like Document Images", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we present a new question-answering (QA) based key-value pair extraction approach, called KVPFormer, to robustly extracting key-value relationships between entities from form-like document images. Specifically, KVPFormer first identifies key entities from all entities in an image with a Transformer encoder, then takes these key entities as questions and feeds them into a Transformer decoder to predict their corresponding answers (i.e., value entities) in parallel. To achieve higher answer prediction accuracy, we propose a coarse-to-fine answer prediction approach further, which first extracts multiple answer candidates for each identified question in the coarse stage and then selects the most likely one among these candidates in the fine stage. In this way, the learning difficulty of answer prediction can be effectively reduced so that the prediction accuracy can be improved. Moreover, we introduce a spatial compatibility attention bias into the self-attention/cross-attention mechanism for KVPFormer to better model the spatial interactions between entities. With these new techniques, our proposed KVPFormer achieves state-of-the-art results on FUNSD and XFUND datasets, outperforming the previous best-performing method by 7.2% and 13.2% in F1 score, respectively.", + "primary_area": "speech natural language processing", + "author": "Kai Hu; Zhuoyuan Wu; Zhuoyao Zhong; Weihong Lin; Lei Sun; Qiang Huo", + "authorids": "", + "aff": "Department of EEIS., University of Science and Technology of China, Hefei, China + Microsoft Research Asia, Beijing, China; Peking University Shenzhen Graduate School, Shenzhen, China + Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China", + "bibtex": "@article{Hu_Wu_Zhong_Lin_Sun_Huo_2023, title={A Question-Answering Approach to Key Value Pair Extraction from Form-Like Document Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26516}, DOI={10.1609/aaai.v37i11.26516}, abstractNote={In this paper, we present a new question-answering (QA) based key-value pair extraction approach, called KVPFormer, to robustly extracting key-value relationships between entities from form-like document images. Specifically, KVPFormer first identifies key entities from all entities in an image with a Transformer encoder, then takes these key entities as questions and feeds them into a Transformer decoder to predict their corresponding answers (i.e., value entities) in parallel. To achieve higher answer prediction accuracy, we propose a coarse-to-fine answer prediction approach further, which first extracts multiple answer candidates for each identified question in the coarse stage and then selects the most likely one among these candidates in the fine stage. In this way, the learning difficulty of answer prediction can be effectively reduced so that the prediction accuracy can be improved. Moreover, we introduce a spatial compatibility attention bias into the self-attention/cross-attention mechanism for KVPFormer to better model the spatial interactions between entities. With these new techniques, our proposed KVPFormer achieves state-of-the-art results on FUNSD and XFUND datasets, outperforming the previous best-performing method by 7.2% and 13.2% in F1 score, respectively.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Kai and Wu, Zhuoyuan and Zhong, Zhuoyao and Lin, Weihong and Sun, Lei and Huo, Qiang}, year={2023}, month={Jun.}, pages={12899-12906} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26516/26288", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26516", + "pdf_size": 223732, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14727136296587094696&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;pku.edu.cn;gmail.com;gmail.com;gmail.com;microsoft.com", + "email": "mail.ustc.edu.cn;pku.edu.cn;gmail.com;gmail.com;gmail.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2+1;1;1;1;1", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research Asia;Peking University", + "aff_unique_dep": "Department of EEIS;Research;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;http://www.pku.edu.cn", + "aff_unique_abbr": "USTC;MSRA;PKU", + "aff_campus_unique_index": "0+1;2+1;1;1;1;1", + "aff_campus_unique": "Hefei;Beijing;Shenzhen", + "aff_country_unique_index": "0+0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26976", + "title": "A Reinforcement Learning Badminton Environment for Simulating Player Tactics (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent techniques for analyzing sports precisely has stimulated various approaches to improve player performance and fan engagement.\nHowever, existing approaches are only able to evaluate offline performance since testing in real-time matches requires exhaustive costs and cannot be replicated.\nTo test in a safe and reproducible simulator, we focus on turn-based sports and introduce a badminton environment by simulating rallies with different angles of view and designing the states, actions, and training procedures.\nThis benefits not only coaches and players by simulating past matches for tactic investigation, but also researchers from rapidly evaluating their novel algorithms.\nOur code is available at https://github.com/wywyWang/CoachAI-Projects/tree/main/Strategic%20Environment.", + "primary_area": "", + "author": "Li-Chun Huang; Nai-Zen Hsueh; Yen-Che Chien; Wei-Yao Wang; Kuang-Da Wang; Wen-Chih Peng", + "authorids": "", + "aff": "National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan", + "bibtex": "@article{Huang_Hsueh_Chien_Wang_Wang_Peng_2024, title={A Reinforcement Learning Badminton Environment for Simulating Player Tactics (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26976}, DOI={10.1609/aaai.v37i13.26976}, abstractNote={Recent techniques for analyzing sports precisely has stimulated various approaches to improve player performance and fan engagement.\nHowever, existing approaches are only able to evaluate offline performance since testing in real-time matches requires exhaustive costs and cannot be replicated.\nTo test in a safe and reproducible simulator, we focus on turn-based sports and introduce a badminton environment by simulating rallies with different angles of view and designing the states, actions, and training procedures.\nThis benefits not only coaches and players by simulating past matches for tactic investigation, but also researchers from rapidly evaluating their novel algorithms.\nOur code is available at https://github.com/wywyWang/CoachAI-Projects/tree/main/Strategic%20Environment.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Li-Chun and Hsueh, Nai-Zen and Chien, Yen-Che and Wang, Wei-Yao and Wang, Kuang-Da and Peng, Wen-Chih}, year={2024}, month={Jul.}, pages={16232-16233} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26976/26748", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26976", + "pdf_size": 87289, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16402370885316056559&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw;nctu.edu.tw;cs.nycu.edu.tw", + "email": "gmail.com;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw;nctu.edu.tw;cs.nycu.edu.tw", + "github": "https://github.com/wywyWang/CoachAI-Projects/tree/main/Strategic%20Environment", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "National Yang Ming Chiao Tung University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nycu.edu.tw", + "aff_unique_abbr": "NYCU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Hsinchu", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26753", + "title": "A Risk-Sensitive Approach to Policy Optimization", + "track": "aaai special track", + "status": "Technical", + "abstract": "Standard deep reinforcement learning (DRL) aims to maximize expected reward, considering collected experiences equally in formulating a policy. This differs from human decision-making, where gains and losses are valued differently and outlying outcomes are given increased consideration. It also fails to capitalize on opportunities to improve safety and/or performance through the incorporation of distributional context. Several approaches to distributional DRL have been investigated, with one popular strategy being to evaluate the projected distribution of returns for possible actions. We propose a more direct approach whereby risk-sensitive objectives, specified in terms of the cumulative distribution function (CDF) of the distribution of full-episode rewards, are optimized. This approach allows for outcomes to be weighed based on relative quality, can be used for both continuous and discrete action spaces, and may naturally be applied in both constrained and unconstrained settings. We show how to compute an asymptotically consistent estimate of the policy gradient for a broad class of risk-sensitive objectives via sampling, subsequently incorporating variance reduction and regularization measures to facilitate effective on-policy learning. We then demonstrate that the use of moderately \"pessimistic\" risk profiles, which emphasize scenarios where the agent performs poorly, leads to enhanced exploration and a continual focus on addressing deficiencies. We test the approach using different risk profiles in six OpenAI Safety Gym environments, comparing to state of the art on-policy methods. Without cost constraints, we find that pessimistic risk profiles can be used to reduce cost while improving total reward accumulation. With cost constraints, they are seen to provide higher positive rewards than risk-neutral approaches at the prescribed allowable cost.", + "primary_area": "safe and robust ai", + "author": "Jared Markowitz; Ryan W. Gardner; Ashley Llorens; Raman Arora; I-Jeng Wang", + "authorids": "", + "aff": "Johns Hopkins University Applied Physics Laboratory; Johns Hopkins University Applied Physics Laboratory; Microsoft Corporation; Johns Hopkins University; Johns Hopkins University Applied Physics Laboratory", + "bibtex": "@article{Markowitz_Gardner_Llorens_Arora_Wang_2023, title={A Risk-Sensitive Approach to Policy Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26753}, DOI={10.1609/aaai.v37i12.26753}, abstractNote={Standard deep reinforcement learning (DRL) aims to maximize expected reward, considering collected experiences equally in formulating a policy. This differs from human decision-making, where gains and losses are valued differently and outlying outcomes are given increased consideration. It also fails to capitalize on opportunities to improve safety and/or performance through the incorporation of distributional context. Several approaches to distributional DRL have been investigated, with one popular strategy being to evaluate the projected distribution of returns for possible actions. We propose a more direct approach whereby risk-sensitive objectives, specified in terms of the cumulative distribution function (CDF) of the distribution of full-episode rewards, are optimized. This approach allows for outcomes to be weighed based on relative quality, can be used for both continuous and discrete action spaces, and may naturally be applied in both constrained and unconstrained settings. We show how to compute an asymptotically consistent estimate of the policy gradient for a broad class of risk-sensitive objectives via sampling, subsequently incorporating variance reduction and regularization measures to facilitate effective on-policy learning. We then demonstrate that the use of moderately "pessimistic" risk profiles, which emphasize scenarios where the agent performs poorly, leads to enhanced exploration and a continual focus on addressing deficiencies. We test the approach using different risk profiles in six OpenAI Safety Gym environments, comparing to state of the art on-policy methods. Without cost constraints, we find that pessimistic risk profiles can be used to reduce cost while improving total reward accumulation. With cost constraints, they are seen to provide higher positive rewards than risk-neutral approaches at the prescribed allowable cost.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Markowitz, Jared and Gardner, Ryan W. and Llorens, Ashley and Arora, Raman and Wang, I-Jeng}, year={2023}, month={Jun.}, pages={15019-15027} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26753/26525", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26753", + "pdf_size": 12296921, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11488471258203981120&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "jhuapl.edu; ; ; ; ", + "email": "jhuapl.edu; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Johns Hopkins University;Microsoft Corporation", + "aff_unique_dep": "Applied Physics Laboratory;", + "aff_unique_url": "https://www.jhuapl.edu;https://www.microsoft.com", + "aff_unique_abbr": "JHU APL;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26838", + "title": "A Robust and Scalable Stacked Ensemble for Day-Ahead Forecasting of Distribution Network Losses", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Accurate day-ahead nominations of grid losses in electrical distribution networks are important to reduce the societal cost of these losses. We present a modification of the CatBoost ensemble-based system for day-ahead grid loss prediction detailed in Dalal et al. (2020), making four main changes. Base models predict on the log-space of the target, to ensure non-negative predictions. The model ensemble is changed to include different model types, for increased ensemble variance. Feature engineering is applied to consumption and weather forecasts, to improve base model performance. Finally, a non-negative least squares-based stacking method that uses as many available models as possible for each prediction is introduced, to achieve an improved model selection that is robust to missing data.\nWhen deployed for over three months in 2022, the resulting system reduced mean absolute error by 10.7% compared to the system from Dalal et al. (2020), a reduction from 5.05 to 4.51 MW. With no tuning of machine learning parameters, the system was also extended to three new grids, where it achieved similar relative error as on the old grids. Our system is robust and easily scalable, and our proposed stacking method could provide improved performance in applications outside grid loss.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Gunnar Grotmol; Eivind Hovdeg\u00e5rd Furdal; Nisha Dalal; Are L\u00f8kken Ottesen; Ella-Lovise Hammervold R\u00f8rvik; Martin M\u00f8ln\u00e5; Gleb Sizov; Odd Erik Gundersen", + "authorids": "", + "aff": "Aneo AS, Trondheim, Norway + Norwegian University of Science and Technology, Trondheim, Norway; Aneo AS, Trondheim, Norway + Norwegian University of Science and Technology, Trondheim, Norway; Aneo AS, Trondheim, Norway; Aneo AS, Trondheim, Norway; Aneo AS, Trondheim, Norway; Aneo AS, Trondheim, Norway; Aneo AS, Trondheim, Norway; Aneo AS, Trondheim, Norway + Norwegian University of Science and Technology, Trondheim, Norway", + "bibtex": "@article{Grotmol_Furdal_Dalal_Ottesen_R\u00f8rvik_M\u00f8ln\u00e5_Sizov_Gundersen_2024, title={A Robust and Scalable Stacked Ensemble for Day-Ahead Forecasting of Distribution Network Losses}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26838}, DOI={10.1609/aaai.v37i13.26838}, abstractNote={Accurate day-ahead nominations of grid losses in electrical distribution networks are important to reduce the societal cost of these losses. We present a modification of the CatBoost ensemble-based system for day-ahead grid loss prediction detailed in Dalal et al. (2020), making four main changes. Base models predict on the log-space of the target, to ensure non-negative predictions. The model ensemble is changed to include different model types, for increased ensemble variance. Feature engineering is applied to consumption and weather forecasts, to improve base model performance. Finally, a non-negative least squares-based stacking method that uses as many available models as possible for each prediction is introduced, to achieve an improved model selection that is robust to missing data.\nWhen deployed for over three months in 2022, the resulting system reduced mean absolute error by 10.7% compared to the system from Dalal et al. (2020), a reduction from 5.05 to 4.51 MW. With no tuning of machine learning parameters, the system was also extended to three new grids, where it achieved similar relative error as on the old grids. Our system is robust and easily scalable, and our proposed stacking method could provide improved performance in applications outside grid loss.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Grotmol, Gunnar and Furdal, Eivind Hovdeg\u00e5rd and Dalal, Nisha and Ottesen, Are L\u00f8kken and R\u00f8rvik, Ella-Lovise Hammervold and M\u00f8ln\u00e5, Martin and Sizov, Gleb and Gundersen, Odd Erik}, year={2024}, month={Jul.}, pages={15503-15511} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26838/26610", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26838", + "pdf_size": 363074, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15738204136638587904&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ntnu.no;gmail.com;aneo.com;aneo.com;aneo.com;aneo.com;aneo.com;ntnu.no", + "email": "ntnu.no;gmail.com;aneo.com;aneo.com;aneo.com;aneo.com;aneo.com;ntnu.no", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0;0;0;0;0;0+1", + "aff_unique_norm": "Aneo AS;Norwegian University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": ";https://www.ntnu.no", + "aff_unique_abbr": ";NTNU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Trondheim", + "aff_country_unique_index": "0+0;0+0;0;0;0;0;0;0+0", + "aff_country_unique": "Norway" + }, + { + "id": "article-26493", + "title": "A Scope Sensitive and Result Attentive Model for Multi-Intent Spoken Language Understanding", + "track": "main", + "status": "Technical", + "abstract": "Multi-Intent Spoken Language Understanding (SLU), a novel and more complex scenario of SLU, is attracting increasing attention. Unlike traditional SLU, each intent in this scenario has its specific scope. Semantic information outside the scope even hinders the prediction, which tremendously increases the difficulty of intent detection. More seriously, guiding slot filling with these inaccurate intent labels suffers error propagation problems, resulting in unsatisfied overall performance. To solve these challenges, in this paper, we propose a novel Scope-Sensitive Result Attention Network (SSRAN) based on Transformer, which contains a Scope Recognizer (SR) and a Result Attention Network (RAN). SR assignments scope information to each token, reducing the distraction of out-of-scope tokens. RAN effectively utilizes the bidirectional interaction between SF and ID results, mitigating the error propagation problem. Experiments on two public datasets indicate that our model significantly improves SLU performance (5.4% and 2.1% on Overall accuracy) over the state-of-the-art baseline.", + "primary_area": "speech natural language processing", + "author": "Lizhi Cheng; Wenmian Yang; Weijia Jia", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Nanyang Technological University; BNU-UIC Institute of Artificial Intelligence and Future Networks, Beijing Normal University (Zhuhai), Guangdong Key Lab of AI and Multi-Modal Data Processing, BNU-HKBU United International College", + "bibtex": "@article{Cheng_Yang_Jia_2023, title={A Scope Sensitive and Result Attentive Model for Multi-Intent Spoken Language Understanding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26493}, DOI={10.1609/aaai.v37i11.26493}, abstractNote={Multi-Intent Spoken Language Understanding (SLU), a novel and more complex scenario of SLU, is attracting increasing attention. Unlike traditional SLU, each intent in this scenario has its specific scope. Semantic information outside the scope even hinders the prediction, which tremendously increases the difficulty of intent detection. More seriously, guiding slot filling with these inaccurate intent labels suffers error propagation problems, resulting in unsatisfied overall performance. To solve these challenges, in this paper, we propose a novel Scope-Sensitive Result Attention Network (SSRAN) based on Transformer, which contains a Scope Recognizer (SR) and a Result Attention Network (RAN). SR assignments scope information to each token, reducing the distraction of out-of-scope tokens. RAN effectively utilizes the bidirectional interaction between SF and ID results, mitigating the error propagation problem. Experiments on two public datasets indicate that our model significantly improves SLU performance (5.4% and 2.1% on Overall accuracy) over the state-of-the-art baseline.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Lizhi and Yang, Wenmian and Jia, Weijia}, year={2023}, month={Jun.}, pages={12691-12699} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26493/26265", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26493", + "pdf_size": 261483, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18368785765312492083&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;ntu.edu.sg;bnu.edu.cn", + "email": "sjtu.edu.cn;ntu.edu.sg;bnu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Shanghai Jiao Tong University;Nanyang Technological University;Beijing Normal University", + "aff_unique_dep": ";;Institute of Artificial Intelligence and Future Networks", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.ntu.edu.sg;http://www.bnu.edu.cn", + "aff_unique_abbr": "SJTU;NTU;BNU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Zhuhai", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25074", + "title": "A Semi-parametric Model for Decision Making in High-Dimensional Sensory Discrimination Tasks", + "track": "main", + "status": "Technical", + "abstract": "Psychometric functions typically characterize binary sensory decisions along a single stimulus dimension. However, real-life sensory tasks vary along a greater variety of dimensions (e.g. color, contrast and luminance for visual stimuli). Approaches to characterizing high-dimensional sensory spaces either require strong parametric assumptions about these additional contextual dimensions, or fail to leverage known properties of classical psychometric curves. We overcome both limitations by introducing a semi-parametric model of sensory discrimination that applies traditional psychophysical models along a stimulus intensity dimension, but puts Gaussian process (GP) priors on the parameters of these models with respect to the remaining dimensions. By combining the flexibility of the GP with the deep literature on parametric psychophysics, our semi-parametric models achieve good performance with much less data than baselines on both synthetic and real-world, high-dimensional psychophysics datasets. We additionally show strong performance in a Bayesian active learning setting, and present a novel active learning paradigm for the semi-parametric model.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Stephen Keeley; Benjamin Letham; Craig Sanders; Chase Tymms; Michael Shvartsman", + "authorids": "", + "aff": "Department of Natural Sciences, Fordham University, USA+Meta; Meta; Meta; Meta; Meta", + "bibtex": "@article{Keeley_Letham_Sanders_Tymms_Shvartsman_2023, title={A Semi-parametric Model for Decision Making in High-Dimensional Sensory Discrimination Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25074}, DOI={10.1609/aaai.v37i1.25074}, abstractNote={Psychometric functions typically characterize binary sensory decisions along a single stimulus dimension. However, real-life sensory tasks vary along a greater variety of dimensions (e.g. color, contrast and luminance for visual stimuli). Approaches to characterizing high-dimensional sensory spaces either require strong parametric assumptions about these additional contextual dimensions, or fail to leverage known properties of classical psychometric curves. We overcome both limitations by introducing a semi-parametric model of sensory discrimination that applies traditional psychophysical models along a stimulus intensity dimension, but puts Gaussian process (GP) priors on the parameters of these models with respect to the remaining dimensions. By combining the flexibility of the GP with the deep literature on parametric psychophysics, our semi-parametric models achieve good performance with much less data than baselines on both synthetic and real-world, high-dimensional psychophysics datasets. We additionally show strong performance in a Bayesian active learning setting, and present a novel active learning paradigm for the semi-parametric model.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Keeley, Stephen and Letham, Benjamin and Sanders, Craig and Tymms, Chase and Shvartsman, Michael}, year={2023}, month={Jun.}, pages={40-47} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25074/24846", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25074", + "pdf_size": 1896830, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13949803962931839511&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "fordham.edu;meta.com;meta.com;meta.com;meta.com", + "email": "fordham.edu;meta.com;meta.com;meta.com;meta.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;1", + "aff_unique_norm": "Fordham University;Meta Platforms, Inc.", + "aff_unique_dep": "Department of Natural Sciences;", + "aff_unique_url": "https://www.fordham.edu;https://meta.com", + "aff_unique_abbr": "Fordham;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26745", + "title": "A Semidefinite Relaxation Based Branch-and-Bound Method for Tight Neural Network Verification", + "track": "aaai special track", + "status": "Technical", + "abstract": "We introduce a novel method based on semidefinite program (SDP) for the tight and efficient verification of neural networks. The proposed SDP relaxation advances the present state of the art in SDP-based neural network verification by adding a set of linear constraints based on eigenvectors. We extend this novel SDP relaxation by combining it with a branch-and-bound method that can provably close the relaxation gap up to zero. We show formally that the proposed approach leads to a provably tighter solution than the present state of the art. We report experimental results showing that the proposed method outperforms baselines in terms of verified accuracy while retaining an acceptable computational overhead.", + "primary_area": "safe and robust ai", + "author": "Jianglin Lan; Benedikt Br\u00fcckner; Alessio Lomuscio", + "authorids": "", + "aff": "James Watt School of Engineering, University of Glasgow, UK; Department of Computing, Imperial College London, UK; Department of Computing, Imperial College London, UK", + "bibtex": "@article{Lan_Br\u00fcckner_Lomuscio_2023, title={A Semidefinite Relaxation Based Branch-and-Bound Method for Tight Neural Network Verification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26745}, DOI={10.1609/aaai.v37i12.26745}, abstractNote={We introduce a novel method based on semidefinite program (SDP) for the tight and efficient verification of neural networks. The proposed SDP relaxation advances the present state of the art in SDP-based neural network verification by adding a set of linear constraints based on eigenvectors. We extend this novel SDP relaxation by combining it with a branch-and-bound method that can provably close the relaxation gap up to zero. We show formally that the proposed approach leads to a provably tighter solution than the present state of the art. We report experimental results showing that the proposed method outperforms baselines in terms of verified accuracy while retaining an acceptable computational overhead.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Jianglin and Br\u00fcckner, Benedikt and Lomuscio, Alessio}, year={2023}, month={Jun.}, pages={14946-14954} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26745/26517", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26745", + "pdf_size": 222200, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4074455101214656020&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "glasgow.ac.uk;imperial.ac.uk;imperial.ac.uk", + "email": "glasgow.ac.uk;imperial.ac.uk;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University of Glasgow;Imperial College London", + "aff_unique_dep": "James Watt School of Engineering;Department of Computing", + "aff_unique_url": "https://www.gla.ac.uk;https://www.imperial.ac.uk", + "aff_unique_abbr": "UoG;Imperial", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Glasgow;London", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25759", + "title": "A Set of Control Points Conditioned Pedestrian Trajectory Prediction", + "track": "main", + "status": "Technical", + "abstract": "Predicting the trajectories of pedestrians in crowded conditions is an important task for applications like autonomous navigation systems. Previous studies have tackled this problem using two strategies. They (1) infer all future steps recursively, or (2) predict the potential destinations of pedestrians at once and interpolate the intermediate steps to arrive there. However, these strategies often suffer from the accumulated errors of the recursive inference, or restrictive assumptions about social relations in the intermediate path. In this paper, we present a graph convolutional network-based trajectory prediction. Firstly, we propose a control point prediction that divides the future path into three sections and infers the intermediate destinations of pedestrians to reduce the accumulated error. To do this, we construct multi-relational weighted graphs to account for their physical and complex social relations. We then introduce a trajectory refinement step based on a spatio-temporal and multi-relational graph. By considering the social interactions between neighbors, better prediction results are achievable. In experiments, the proposed network achieves state-of-the-art performance on various real-world trajectory prediction benchmarks.", + "primary_area": "intelligent robotics", + "author": "Inhwan Bae; Hae-Gon Jeon", + "authorids": "", + "aff": "Gwangju Institute of Science and Technology (GIST); Gwangju Institute of Science and Technology (GIST)", + "bibtex": "@article{Bae_Jeon_2023, title={A Set of Control Points Conditioned Pedestrian Trajectory Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25759}, DOI={10.1609/aaai.v37i5.25759}, abstractNote={Predicting the trajectories of pedestrians in crowded conditions is an important task for applications like autonomous navigation systems. Previous studies have tackled this problem using two strategies. They (1) infer all future steps recursively, or (2) predict the potential destinations of pedestrians at once and interpolate the intermediate steps to arrive there. However, these strategies often suffer from the accumulated errors of the recursive inference, or restrictive assumptions about social relations in the intermediate path. In this paper, we present a graph convolutional network-based trajectory prediction. Firstly, we propose a control point prediction that divides the future path into three sections and infers the intermediate destinations of pedestrians to reduce the accumulated error. To do this, we construct multi-relational weighted graphs to account for their physical and complex social relations. We then introduce a trajectory refinement step based on a spatio-temporal and multi-relational graph. By considering the social interactions between neighbors, better prediction results are achievable. In experiments, the proposed network achieves state-of-the-art performance on various real-world trajectory prediction benchmarks.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bae, Inhwan and Jeon, Hae-Gon}, year={2023}, month={Jun.}, pages={6155-6165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25759/25531", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25759", + "pdf_size": 2426199, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3210843190882818752&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gm.gist.ac.kr;gist.ac.kr", + "email": "gm.gist.ac.kr;gist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Gwangju Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gist.ac.kr", + "aff_unique_abbr": "GIST", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Gwangju", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25460", + "title": "A Simple Baseline for Multi-Camera 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "3D object detection with surrounding cameras has been a promising direction for autonomous driving. In this paper, we present SimMOD, a Simple baseline for Multi-camera Object Detection, to solve the problem. To incorporate multiview information as well as build upon previous efforts on monocular 3D object detection, the framework is built on sample-wise object proposals and designed to work in a twostage manner. First, we extract multi-scale features and generate the perspective object proposals on each monocular image. Second, the multi-view proposals are aggregated and then iteratively refined with multi-view and multi-scale visual features in the DETR3D-style. The refined proposals are endto-end decoded into the detection results. To further boost the performance, we incorporate the auxiliary branches alongside the proposal generation to enhance the feature learning. Also, we design the methods of target filtering and teacher forcing to promote the consistency of two-stage training. We conduct extensive experiments on the 3D object detection benchmark of nuScenes to demonstrate the effectiveness of SimMOD and achieve competitive performance. Code will be available at https://github.com/zhangyp15/SimMOD.", + "primary_area": "computer vision iii", + "author": "Yunpeng Zhang; Wenzhao Zheng; Zheng Zhu; Guan Huang; Jiwen Lu; Jie Zhou", + "authorids": "", + "aff": "PhiGent Robotics; Department of Automation, Tsinghua University; PhiGent Robotics; PhiGent Robotics; Department of Automation, Tsinghua University; Department of Automation, Tsinghua University", + "bibtex": "@article{Zhang_Zheng_Zhu_Huang_Lu_Zhou_2023, title={A Simple Baseline for Multi-Camera 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25460}, DOI={10.1609/aaai.v37i3.25460}, abstractNote={3D object detection with surrounding cameras has been a promising direction for autonomous driving. In this paper, we present SimMOD, a Simple baseline for Multi-camera Object Detection, to solve the problem. To incorporate multiview information as well as build upon previous efforts on monocular 3D object detection, the framework is built on sample-wise object proposals and designed to work in a twostage manner. First, we extract multi-scale features and generate the perspective object proposals on each monocular image. Second, the multi-view proposals are aggregated and then iteratively refined with multi-view and multi-scale visual features in the DETR3D-style. The refined proposals are endto-end decoded into the detection results. To further boost the performance, we incorporate the auxiliary branches alongside the proposal generation to enhance the feature learning. Also, we design the methods of target filtering and teacher forcing to promote the consistency of two-stage training. We conduct extensive experiments on the 3D object detection benchmark of nuScenes to demonstrate the effectiveness of SimMOD and achieve competitive performance. Code will be available at https://github.com/zhangyp15/SimMOD.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yunpeng and Zheng, Wenzhao and Zhu, Zheng and Huang, Guan and Lu, Jiwen and Zhou, Jie}, year={2023}, month={Jun.}, pages={3507-3515} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25460/25232", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25460", + "pdf_size": 4423035, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12857822394874735061&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "phigent.ai;mails.tsinghua.edu.cn;ieee.org;phigent.ai;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "phigent.ai;mails.tsinghua.edu.cn;ieee.org;phigent.ai;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/zhangyp15/SimMOD", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;1;1", + "aff_unique_norm": "PhiGent Robotics;Tsinghua University", + "aff_unique_dep": ";Department of Automation", + "aff_unique_url": ";https://www.tsinghua.edu.cn", + "aff_unique_abbr": ";THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;1", + "aff_country_unique": ";China" + }, + { + "id": "article-26436", + "title": "A Simple Unified Approach to Testing High-Dimensional Conditional Independences for Categorical and Ordinal Data", + "track": "main", + "status": "Technical", + "abstract": "Conditional independence (CI) tests underlie many approaches to model testing and structure learning in causal inference. Most existing CI tests for categorical and ordinal data stratify the sample by the conditioning variables, perform simple independence tests in each stratum, and combine the results. Unfortunately, the statistical power of this approach degrades rapidly as the number of conditioning variables increases. Here we propose a simple unified CI test for ordinal and categorical data that maintains reasonable calibration and power in high dimensions. We show that our test outperforms existing baselines in model testing and structure learning for dense directed graphical models while being comparable for sparse models. Our approach could be attractive for causal model testing because it is easy to implement, can be used with non-parametric or parametric probability models, has the symmetry property, and has reasonable computational requirements.", + "primary_area": "reasoning under uncertainty", + "author": "Ankur Ankan; Johannes Textor", + "authorids": "", + "aff": "Data Science, Institute for Computing and Information Sciences, Radboud University, Nijmegen, The Netherlands; Data Science, Institute for Computing and Information Sciences, Radboud University, Nijmegen, The Netherlands", + "bibtex": "@article{Ankan_Textor_2023, title={A Simple Unified Approach to Testing High-Dimensional Conditional Independences for Categorical and Ordinal Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26436}, DOI={10.1609/aaai.v37i10.26436}, abstractNote={Conditional independence (CI) tests underlie many approaches to model testing and structure learning in causal inference. Most existing CI tests for categorical and ordinal data stratify the sample by the conditioning variables, perform simple independence tests in each stratum, and combine the results. Unfortunately, the statistical power of this approach degrades rapidly as the number of conditioning variables increases. Here we propose a simple unified CI test for ordinal and categorical data that maintains reasonable calibration and power in high dimensions. We show that our test outperforms existing baselines in model testing and structure learning for dense directed graphical models while being comparable for sparse models. Our approach could be attractive for causal model testing because it is easy to implement, can be used with non-parametric or parametric probability models, has the symmetry property, and has reasonable computational requirements.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ankan, Ankur and Textor, Johannes}, year={2023}, month={Jun.}, pages={12180-12188} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26436/26208", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26436", + "pdf_size": 326140, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12084307324971903402&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "ru.nl;ru.nl", + "email": "ru.nl;ru.nl", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Radboud University", + "aff_unique_dep": "Institute for Computing and Information Sciences", + "aff_unique_url": "https://www.ru.nl", + "aff_unique_abbr": "RU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Nijmegen", + "aff_country_unique_index": "0;0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-26515", + "title": "A Simple Yet Effective Subsequence-Enhanced Approach for Cross-Domain NER", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain named entity recognition (NER), aiming to address the limitation of labeled resources in the target domain, is a challenging yet important task. Most existing studies alleviate the data discrepancy across different domains at the coarse level via combing NER with language modelings or introducing domain-adaptive pre-training (DAPT). Notably, source and target domains tend to share more fine-grained local information within denser subsequences than global information within the whole sequence, such that subsequence features are easier to transfer, which has not been explored well. Besides, compared to token-level representation, subsequence-level information can help the model distinguish different meanings of the same word in different domains. In this paper, we propose to incorporate subsequence-level features for promoting the cross-domain NER. In detail, we first utilize a pre-trained encoder to extract the global information. Then, we re-express each sentence as a group of subsequences and propose a novel bidirectional memory recurrent unit (BMRU) to capture features from the subsequences. Finally, an adaptive coupling unit (ACU) is proposed to combine global information and subsequence features for predicting entity labels. Experimental results on several benchmark datasets illustrate the effectiveness of our model, which achieves considerable improvements.", + "primary_area": "speech natural language processing", + "author": "Jinpeng Hu; DanDan Guo; Yang Liu; Zhuo Li; Zhihong Chen; Xiang Wan; Tsung-Hui Chang", + "authorids": "", + "aff": "Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China; The Chinese University of Hong Kong, Shenzhen; Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China; Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China; Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China; Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China + Pazhou Lab, Guangzhou, 510330, China; Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, Guangdong, China + The Chinese University of Hong Kong, Shenzhen", + "bibtex": "@article{Hu_Guo_Liu_Li_Chen_Wan_Chang_2023, title={A Simple Yet Effective Subsequence-Enhanced Approach for Cross-Domain NER}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26515}, DOI={10.1609/aaai.v37i11.26515}, abstractNote={Cross-domain named entity recognition (NER), aiming to address the limitation of labeled resources in the target domain, is a challenging yet important task. Most existing studies alleviate the data discrepancy across different domains at the coarse level via combing NER with language modelings or introducing domain-adaptive pre-training (DAPT). Notably, source and target domains tend to share more fine-grained local information within denser subsequences than global information within the whole sequence, such that subsequence features are easier to transfer, which has not been explored well. Besides, compared to token-level representation, subsequence-level information can help the model distinguish different meanings of the same word in different domains. In this paper, we propose to incorporate subsequence-level features for promoting the cross-domain NER. In detail, we first utilize a pre-trained encoder to extract the global information. Then, we re-express each sentence as a group of subsequences and propose a novel bidirectional memory recurrent unit (BMRU) to capture features from the subsequences. Finally, an adaptive coupling unit (ACU) is proposed to combine global information and subsequence features for predicting entity labels. Experimental results on several benchmark datasets illustrate the effectiveness of our model, which achieves considerable improvements.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Jinpeng and Guo, DanDan and Liu, Yang and Li, Zhuo and Chen, Zhihong and Wan, Xiang and Chang, Tsung-Hui}, year={2023}, month={Jun.}, pages={12890-12898} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26515/26287", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26515", + "pdf_size": 355772, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13308412589196834678&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;sribd.cn;link.cuhk.edu.cn", + "email": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;sribd.cn;link.cuhk.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0+1;0+0", + "aff_unique_norm": "The Chinese University of Hong Kong;Pazhou Lab", + "aff_unique_dep": "Shenzhen Research Institute of Big Data;", + "aff_unique_url": "https://www.cuhk.edu.cn;", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": "0;0;0;0;0;0+1;0+0", + "aff_campus_unique": "Shenzhen;Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26592", + "title": "A Speaker Turn-Aware Multi-Task Adversarial Network for Joint User Satisfaction Estimation and Sentiment Analysis", + "track": "main", + "status": "Technical", + "abstract": "User Satisfaction Estimation is an important task and increasingly being applied in goal-oriented dialogue systems to estimate whether the user is satisfied with the service. It is observed that whether the user\u2019s needs are met often triggers various sentiments, which can be pertinent to the successful estimation of user satisfaction, and vice versa. Thus, User Satisfaction Estimation (USE) and Sentiment Analysis (SA) should be treated as a joint, collaborative effort, considering the strong connections between the sentiment states of speakers and the user satisfaction. Existing joint learning frameworks mainly unify the two highly pertinent tasks over cascade or shared-bottom implementations, however they fail to distinguish task-specific and common features, which will produce sub-optimal utterance representations for downstream tasks. In this paper, we propose a novel Speaker Turn-Aware Multi-Task Adversarial Network (STMAN) for dialogue-level USE and utterance-level SA. Specifically, we first introduce a multi-task adversarial strategy which trains a task discriminator to make utterance representation more task-specific, and then utilize a speaker-turn aware multi-task interaction strategy to extract the common features which are complementary to each task. Extensive experiments conducted on two real-world service dialogue datasets show that our model outperforms several state-of-the-art methods.", + "primary_area": "speech natural language processing", + "author": "Kaisong Song; Yangyang Kang; Jiawei Liu; Xurui Li; Changlong Sun; Xiaozhong Liu", + "authorids": "", + "aff": "Alibaba Group, Hangzhou, China + Northeastern University, Shenyang, China; Alibaba Group, Hangzhou, China; Wuhan University, Wuhan, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Worcester Polytechnic Institute, Worcester, USA", + "bibtex": "@article{Song_Kang_Liu_Li_Sun_Liu_2023, title={A Speaker Turn-Aware Multi-Task Adversarial Network for Joint User Satisfaction Estimation and Sentiment Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26592}, DOI={10.1609/aaai.v37i11.26592}, abstractNote={User Satisfaction Estimation is an important task and increasingly being applied in goal-oriented dialogue systems to estimate whether the user is satisfied with the service. It is observed that whether the user\u2019s needs are met often triggers various sentiments, which can be pertinent to the successful estimation of user satisfaction, and vice versa. Thus, User Satisfaction Estimation (USE) and Sentiment Analysis (SA) should be treated as a joint, collaborative effort, considering the strong connections between the sentiment states of speakers and the user satisfaction. Existing joint learning frameworks mainly unify the two highly pertinent tasks over cascade or shared-bottom implementations, however they fail to distinguish task-specific and common features, which will produce sub-optimal utterance representations for downstream tasks. In this paper, we propose a novel Speaker Turn-Aware Multi-Task Adversarial Network (STMAN) for dialogue-level USE and utterance-level SA. Specifically, we first introduce a multi-task adversarial strategy which trains a task discriminator to make utterance representation more task-specific, and then utilize a speaker-turn aware multi-task interaction strategy to extract the common features which are complementary to each task. Extensive experiments conducted on two real-world service dialogue datasets show that our model outperforms several state-of-the-art methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Kaisong and Kang, Yangyang and Liu, Jiawei and Li, Xurui and Sun, Changlong and Liu, Xiaozhong}, year={2023}, month={Jun.}, pages={13582-13590} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26592/26364", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26592", + "pdf_size": 427421, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10005996675494733279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;whu.edu.cn;alibaba-inc.com;taobao.com;wpi.edu", + "email": "alibaba-inc.com;alibaba-inc.com;whu.edu.cn;alibaba-inc.com;taobao.com;wpi.edu", + "github": "", + "project": "https://www.taobao.com/", + "author_num": 6, + "aff_unique_index": "0+1;0;2;0;0;3", + "aff_unique_norm": "Alibaba Group;Northeastern University;Wuhan University;Worcester Polytechnic Institute", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.alibaba.com;http://www.neu.edu.cn/;http://www.whu.edu.cn/;https://www.wpi.edu", + "aff_unique_abbr": "Alibaba;;WHU;WPI", + "aff_campus_unique_index": "0+1;0;2;0;0;3", + "aff_campus_unique": "Hangzhou;Shenyang;Wuhan;Worcester", + "aff_country_unique_index": "0+0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25777", + "title": "A Structural Complexity Analysis of Synchronous Dynamical Systems", + "track": "main", + "status": "Technical", + "abstract": "Synchronous dynamical systems are well-established models that have been used to capture a range of phenomena in networks, including opinion diffusion, spread of disease and product adoption. We study the three most notable problems in synchronous dynamical systems: whether the system will transition to a target configuration from a starting configuration, whether the system will reach convergence from a starting configuration, and whether the system is guaranteed to converge from every possible starting configuration. While all three problems were known to be intractable in the classical sense, we initiate the study of their exact boundaries of tractability from the perspective of structural parameters of the network by making use of the more fine-grained parameterized complexity paradigm. \n\nAs our first result, we consider treewidth - as the most prominent and ubiquitous structural parameter - and show that all three problems remain intractable even on instances of constant treewidth. We complement this negative finding with fixed-parameter algorithms for the former two problems parameterized by treedepth, a well-studied restriction of treewidth. While it is possible to rule out a similar algorithm for convergence guarantee under treedepth, we conclude with a fixed-parameter algorithm for this last problem when parameterized by treedepth and the maximum in-degree.", + "primary_area": "knowledge representation and reasoning", + "author": "Eduard Eiben; Robert Ganian; Thekla Hamm; Viktoriia Korchemna", + "authorids": "", + "aff": "Department of Computer Science, Royal Holloway, University of London, UK; Algorithms and Complexity Group, TU Wien, Austria; Utrecht University, Netherlands; Algorithms and Complexity Group, TU Wien, Austria", + "bibtex": "@article{Eiben_Ganian_Hamm_Korchemna_2023, title={A Structural Complexity Analysis of Synchronous Dynamical Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25777}, DOI={10.1609/aaai.v37i5.25777}, abstractNote={Synchronous dynamical systems are well-established models that have been used to capture a range of phenomena in networks, including opinion diffusion, spread of disease and product adoption. We study the three most notable problems in synchronous dynamical systems: whether the system will transition to a target configuration from a starting configuration, whether the system will reach convergence from a starting configuration, and whether the system is guaranteed to converge from every possible starting configuration. While all three problems were known to be intractable in the classical sense, we initiate the study of their exact boundaries of tractability from the perspective of structural parameters of the network by making use of the more fine-grained parameterized complexity paradigm. As our first result, we consider treewidth - as the most prominent and ubiquitous structural parameter - and show that all three problems remain intractable even on instances of constant treewidth. We complement this negative finding with fixed-parameter algorithms for the former two problems parameterized by treedepth, a well-studied restriction of treewidth. While it is possible to rule out a similar algorithm for convergence guarantee under treedepth, we conclude with a fixed-parameter algorithm for this last problem when parameterized by treedepth and the maximum in-degree.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Eiben, Eduard and Ganian, Robert and Hamm, Thekla and Korchemna, Viktoriia}, year={2023}, month={Jun.}, pages={6313-6321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25777/25549", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25777", + "pdf_size": 165180, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:r2JBGCGY7UkJ:scholar.google.com/&scioq=A+Structural+Complexity+Analysis+of+Synchronous+Dynamical+Systems&hl=en&as_sdt=0,33", + "gs_version_total": 9, + "aff_domain": "rhul.ac.uk;ac.wien.ac.at;uu.nl;ac.wien.ac.at", + "email": "rhul.ac.uk;ac.wien.ac.at;uu.nl;ac.wien.ac.at", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "University of London;TU Wien;Utrecht University", + "aff_unique_dep": "Department of Computer Science;Algorithms and Complexity Group;", + "aff_unique_url": "https://www.royalholloway.ac.uk;https://www.tuwien.ac.at;https://www.uu.nl", + "aff_unique_abbr": "RHUL;TU Wien;UU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Royal Holloway;", + "aff_country_unique_index": "0;1;2;1", + "aff_country_unique": "United Kingdom;Austria;Netherlands" + }, + { + "id": "article-26875", + "title": "A Study of Students\u2019 Learning of Computing through an LP-Based Integrated Curriculum for Middle Schools", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "There has been a consensus on integrating Computing into the teaching and learning of STEM (Science, Technology, Engineering and Math) subjects in K-12 (Kindergarten to 12th grade in the US education system). However, rigorous study on the impact of an integrated curriculum on students' learning in computing and/or the STEM subject(s) is still rare. In this paper, we report our research on how well an integrated curriculum helps middle school students learn Computing through the microgenetic analysis methods.", + "primary_area": "", + "author": "Joshua Archer; Rory Eckel; Joshua Hawkins; Jianlan Wang; Darrel Musslewhite; Yuanlin Zhang", + "authorids": "", + "aff": "Texas Tech University; Texas Tech University; Texas Tech University; Texas Tech University; Laura Bush Middle School; Texas Tech University", + "bibtex": "@article{Archer_Eckel_Hawkins_Wang_Musslewhite_Zhang_2024, title={A Study of Students\u2019 Learning of Computing through an LP-Based Integrated Curriculum for Middle Schools}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26875}, DOI={10.1609/aaai.v37i13.26875}, abstractNote={There has been a consensus on integrating Computing into the teaching and learning of STEM (Science, Technology, Engineering and Math) subjects in K-12 (Kindergarten to 12th grade in the US education system). However, rigorous study on the impact of an integrated curriculum on students\u2019 learning in computing and/or the STEM subject(s) is still rare. In this paper, we report our research on how well an integrated curriculum helps middle school students learn Computing through the microgenetic analysis methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Archer, Joshua and Eckel, Rory and Hawkins, Joshua and Wang, Jianlan and Musslewhite, Darrel and Zhang, Yuanlin}, year={2024}, month={Jul.}, pages={15790-15797} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26875/26647", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26875", + "pdf_size": 117993, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:uBxOLZzu3fMJ:scholar.google.com/&scioq=A+Study+of+Students%E2%80%99+Learning+of+Computing+through+an+LP-Based+Integrated+Curriculum+for+Middle+Schools&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "ttu.edu;gmail.com;gmail.com;ttu.edu;yahoo.com;ttu.edu", + "email": "ttu.edu;gmail.com;gmail.com;ttu.edu;yahoo.com;ttu.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Texas Tech University;Laura Bush Middle School", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ttu.edu;", + "aff_unique_abbr": "TTU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26255", + "title": "A Survey on Model Compression and Acceleration for Pretrained Language Models", + "track": "main", + "status": "Technical", + "abstract": "Despite achieving state-of-the-art performance on many NLP tasks, the high energy cost and long inference delay prevent Transformer-based pretrained language models (PLMs) from seeing broader adoption including for edge and mobile computing. Efficient NLP research aims to comprehensively consider computation, time and carbon emission for the entire life-cycle of NLP, including data preparation, model training and inference. In this survey, we focus on the inference stage and review the current state of model compression and acceleration for pretrained language models, including benchmarks, metrics and methodology.", + "primary_area": "machine learning iv", + "author": "Canwen Xu; Julian McAuley", + "authorids": "", + "aff": "University of California, San Diego; University of California, San Diego", + "bibtex": "@article{Xu_McAuley_2023, title={A Survey on Model Compression and Acceleration for Pretrained Language Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26255}, DOI={10.1609/aaai.v37i9.26255}, abstractNote={Despite achieving state-of-the-art performance on many NLP tasks, the high energy cost and long inference delay prevent Transformer-based pretrained language models (PLMs) from seeing broader adoption including for edge and mobile computing. Efficient NLP research aims to comprehensively consider computation, time and carbon emission for the entire life-cycle of NLP, including data preparation, model training and inference. In this survey, we focus on the inference stage and review the current state of model compression and acceleration for pretrained language models, including benchmarks, metrics and methodology.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Canwen and McAuley, Julian}, year={2023}, month={Jun.}, pages={10566-10575} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26255/26027", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26255", + "pdf_size": 208302, + "gs_citation": 79, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14106048406325296467&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26248", + "title": "A Tale of Two Latent Flows: Learning Latent Space Normalizing Flow with Short-Run Langevin Flow for Approximate Inference", + "track": "main", + "status": "Technical", + "abstract": "We study a normalizing flow in the latent space of a top-down generator model, in which the normalizing flow model plays the role of the informative prior model of the generator. We propose to jointly learn the latent space normalizing flow prior model and the top-down generator model by a Markov chain Monte Carlo (MCMC)-based maximum likelihood algorithm, where a short-run Langevin sampling from the intractable posterior distribution is performed to infer the latent variables for each observed example, so that the parameters of the normalizing flow prior and the generator can be updated with the inferred latent variables. We show that, under the scenario of non-convergent short-run MCMC, the finite step Langevin dynamics is a flow-like approximate inference model and the learning objective actually follows the perturbation of the maximum likelihood estimation (MLE). We further point out that the learning framework seeks to (i) match the latent space normalizing flow and the aggregated posterior produced by the short-run Langevin flow, and (ii) bias the model from MLE such that the short-run Langevin flow inference is close to the true posterior. Empirical results of extensive experiments validate the effectiveness of the proposed latent space normalizing flow model in the tasks of image generation, image reconstruction, anomaly detection, supervised image inpainting and unsupervised image recovery.", + "primary_area": "machine learning iv", + "author": "Jianwen Xie; Yaxuan Zhu; Yifei Xu; Dingcheng Li; Ping Li", + "authorids": "", + "aff": "Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research", + "bibtex": "@article{Xie_Zhu_Xu_Li_Li_2023, title={A Tale of Two Latent Flows: Learning Latent Space Normalizing Flow with Short-Run Langevin Flow for Approximate Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26248}, DOI={10.1609/aaai.v37i9.26248}, abstractNote={We study a normalizing flow in the latent space of a top-down generator model, in which the normalizing flow model plays the role of the informative prior model of the generator. We propose to jointly learn the latent space normalizing flow prior model and the top-down generator model by a Markov chain Monte Carlo (MCMC)-based maximum likelihood algorithm, where a short-run Langevin sampling from the intractable posterior distribution is performed to infer the latent variables for each observed example, so that the parameters of the normalizing flow prior and the generator can be updated with the inferred latent variables. We show that, under the scenario of non-convergent short-run MCMC, the finite step Langevin dynamics is a flow-like approximate inference model and the learning objective actually follows the perturbation of the maximum likelihood estimation (MLE). We further point out that the learning framework seeks to (i) match the latent space normalizing flow and the aggregated posterior produced by the short-run Langevin flow, and (ii) bias the model from MLE such that the short-run Langevin flow inference is close to the true posterior. Empirical results of extensive experiments validate the effectiveness of the proposed latent space normalizing flow model in the tasks of image generation, image reconstruction, anomaly detection, supervised image inpainting and unsupervised image recovery.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Jianwen and Zhu, Yaxuan and Xu, Yifei and Li, Dingcheng and Li, Ping}, year={2023}, month={Jun.}, pages={10499-10509} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26248/26020", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26248", + "pdf_size": 2981350, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9043613179918945984&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Baidu Research", + "aff_unique_dep": "Cognitive Computing Lab", + "aff_unique_url": "https://baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27059", + "title": "A Tool for Generating Controllable Variations of Musical Themes Using Variational Autoencoders with Latent Space Regularisation", + "track": "demonstrations", + "status": "Technical", + "abstract": "A common musical composition practice is to develop musical pieces using variations of musical themes. In this study, we present an interactive tool which can generate variations of musical themes in real-time using a variational autoencoder model. Our tool is controllable using semantically meaningful musical attributes via latent space regularisation technique to increase the explainability of the model. The tool is integrated into an industry standard digital audio workstation - Ableton Live - using the Max4Live device framework and can run locally on an average personal CPU rather than requiring a costly GPU cluster. In this way we demonstrate how cutting-edge AI research can be integrated into the exiting workflows of professional and practising musicians for use in the real-world beyond the research lab.", + "primary_area": "", + "author": "Berker Banar; Nick Bryan-Kinns; Simon Colton", + "authorids": "", + "aff": "School of Electronic Engineering and Computer Science, Queen Mary University of London, UK; School of Electronic Engineering and Computer Science, Queen Mary University of London, UK; School of Electronic Engineering and Computer Science, Queen Mary University of London, UK", + "bibtex": "@article{Banar_Bryan-Kinns_Colton_2024, title={A Tool for Generating Controllable Variations of Musical Themes Using Variational Autoencoders with Latent Space Regularisation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27059}, DOI={10.1609/aaai.v37i13.27059}, abstractNote={A common musical composition practice is to develop musical pieces using variations of musical themes. In this study, we present an interactive tool which can generate variations of musical themes in real-time using a variational autoencoder model. Our tool is controllable using semantically meaningful musical attributes via latent space regularisation technique to increase the explainability of the model. The tool is integrated into an industry standard digital audio workstation - Ableton Live - using the Max4Live device framework and can run locally on an average personal CPU rather than requiring a costly GPU cluster. In this way we demonstrate how cutting-edge AI research can be integrated into the exiting workflows of professional and practising musicians for use in the real-world beyond the research lab.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Banar, Berker and Bryan-Kinns, Nick and Colton, Simon}, year={2024}, month={Jul.}, pages={16401-16403} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27059/26831", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27059", + "pdf_size": 116934, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5867249658375630564&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "qmul.ac.uk;qmul.ac.uk;qmul.ac.uk", + "email": "qmul.ac.uk;qmul.ac.uk;qmul.ac.uk", + "github": "https://bit.ly/3ULwmxZ", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Queen Mary University of London", + "aff_unique_dep": "School of Electronic Engineering and Computer Science", + "aff_unique_url": "https://www.qmul.ac.uk", + "aff_unique_abbr": "QMUL", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26488", + "title": "A Vector Quantized Approach for Text to Speech Synthesis on Real-World Spontaneous Speech", + "track": "main", + "status": "Technical", + "abstract": "Recent Text-to-Speech (TTS) systems trained on reading or acted corpora have achieved near human-level naturalness. The diversity of human speech, however, often goes beyond the coverage of these corpora. We believe the ability to handle such diversity is crucial for AI systems to achieve human-level communication. Our work explores the use of more abundant real-world data for building speech synthesizers. We train TTS systems using real-world speech from YouTube and podcasts. We observe the mismatch between training and inference alignments in mel-spectrogram based autoregressive models, leading to unintelligible synthesis, and demonstrate that learned discrete codes within multiple code groups effectively resolves this issue. We introduce our MQTTS system whose architecture is designed for multiple code generation and monotonic alignment, along with the use of a clean silence prompt to improve synthesis quality. We conduct ablation analyses to identify the efficacy of our methods. We show that MQTTS outperforms existing TTS systems in several objective and subjective measures.", + "primary_area": "speech natural language processing", + "author": "Li-Wei Chen; Shinji Watanabe; Alexander Rudnicky", + "authorids": "", + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "bibtex": "@article{Chen_Watanabe_Rudnicky_2023, title={A Vector Quantized Approach for Text to Speech Synthesis on Real-World Spontaneous Speech}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26488}, DOI={10.1609/aaai.v37i11.26488}, abstractNote={Recent Text-to-Speech (TTS) systems trained on reading or acted corpora have achieved near human-level naturalness. The diversity of human speech, however, often goes beyond the coverage of these corpora. We believe the ability to handle such diversity is crucial for AI systems to achieve human-level communication. Our work explores the use of more abundant real-world data for building speech synthesizers. We train TTS systems using real-world speech from YouTube and podcasts. We observe the mismatch between training and inference alignments in mel-spectrogram based autoregressive models, leading to unintelligible synthesis, and demonstrate that learned discrete codes within multiple code groups effectively resolves this issue. We introduce our MQTTS system whose architecture is designed for multiple code generation and monotonic alignment, along with the use of a clean silence prompt to improve synthesis quality. We conduct ablation analyses to identify the efficacy of our methods. We show that MQTTS outperforms existing TTS systems in several objective and subjective measures.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Li-Wei and Watanabe, Shinji and Rudnicky, Alexander}, year={2023}, month={Jun.}, pages={12644-12652} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26488/26260", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26488", + "pdf_size": 1719561, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1779712084933914260&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/b04901014/MQTTS", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26814", + "title": "AAAI New Faculty Highlights: General and Scalable Optimization for Robust AI", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Deep neural networks (DNNs) can easily be manipulated (by an adversary) to output drastically different predictions and can be done so in a controlled and directed way. This process is known as adversarial attack and is considered one of the major hurdles in using DNNs in high-stakes and real-world applications. Although developing methods to secure DNNs against adversaries is now a primary research focus, it suffers from limitations such as lack of optimization generality and lack of optimization scalability. My research highlights will offer a holistic understanding of optimization foundations for robust AI, peer into their emerging challenges, and present recent solutions developed by my research group.", + "primary_area": "", + "author": "Sijia Liu", + "authorids": "", + "aff": "Department of Computer Science & Engineering, Michigan State University, MI, USA+MIT-IBM Watson AI Lab, IBM Research, USA", + "bibtex": "@article{Liu_2024, title={AAAI New Faculty Highlights: General and Scalable Optimization for Robust AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26814}, DOI={10.1609/aaai.v37i13.26814}, abstractNote={Deep neural networks (DNNs) can easily be manipulated (by an adversary) to output drastically different predictions and can be done so in a controlled and directed way. This process is known as adversarial attack and is considered one of the major hurdles in using DNNs in high-stakes and real-world applications. Although developing methods to secure DNNs against adversaries is now a primary research focus, it suffers from limitations such as lack of optimization generality and lack of optimization scalability. My research highlights will offer a holistic understanding of optimization foundations for robust AI, peer into their emerging challenges, and present recent solutions developed by my research group.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Sijia}, year={2024}, month={Jul.}, pages={15447-15447} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26814/26586", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26814", + "pdf_size": 44582, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Cw7qZI2sS_4J:scholar.google.com/&scioq=AAAI+New+Faculty+Highlights:+General+and+Scalable+Optimization+for+Robust+AI&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "msu.edu", + "email": "msu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "Michigan State University;MIT-IBM Watson AI Lab", + "aff_unique_dep": "Department of Computer Science & Engineering;AI Lab", + "aff_unique_url": "https://www.msu.edu;", + "aff_unique_abbr": "MSU;MIT-IBM AI Lab", + "aff_campus_unique_index": "0", + "aff_campus_unique": "East Lansing;", + "aff_country_unique_index": "0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26456", + "title": "AC-Band: A Combinatorial Bandit-Based Approach to Algorithm Configuration", + "track": "main", + "status": "Technical", + "abstract": "We study the algorithm configuration (AC) problem, in which one seeks to find an optimal parameter configuration of a given target algorithm in an automated way. Although this field of research has experienced much progress recently regarding approaches satisfying strong theoretical guarantees, there is still a gap between the practical performance of these approaches and the heuristic state-of-the-art approaches. Recently, there has been significant progress in designing AC approaches that satisfy strong theoretical guarantees. However, a significant gap still remains between the practical performance of these approaches and state-of-the-art heuristic methods. To this end, we introduce AC-Band, a general approach for the AC problem based on multi-armed bandits that provides theoretical guarantees while exhibiting strong practical performance. We show that AC-Band requires significantly less computation time than other AC approaches providing theoretical guarantees while still yielding high-quality configurations.", + "primary_area": "search and optimization", + "author": "Jasmin Brandt; Elias Schede; Bj\u00f6rn Haddenhorst; Viktor Bengs; Eyke H\u00fcllermeier; Kevin Tierney", + "authorids": "", + "aff": "Department of Computer Science, Paderborn University, Germany; Decision and Operation Technologies Group, Bielefeld University, Germany; Department of Computer Science, Paderborn University, Germany; Institute of Informatics, LMU Munich, Germany + Munich Center for Machine Learning (MCML), Germany; Institute of Informatics, LMU Munich, Germany + Munich Center for Machine Learning (MCML), Germany; Decision and Operation Technologies Group, Bielefeld University, Germany", + "bibtex": "@article{Brandt_Schede_Haddenhorst_Bengs_H\u00fcllermeier_Tierney_2023, title={AC-Band: A Combinatorial Bandit-Based Approach to Algorithm Configuration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26456}, DOI={10.1609/aaai.v37i10.26456}, abstractNote={We study the algorithm configuration (AC) problem, in which one seeks to find an optimal parameter configuration of a given target algorithm in an automated way. Although this field of research has experienced much progress recently regarding approaches satisfying strong theoretical guarantees, there is still a gap between the practical performance of these approaches and the heuristic state-of-the-art approaches. Recently, there has been significant progress in designing AC approaches that satisfy strong theoretical guarantees. However, a significant gap still remains between the practical performance of these approaches and state-of-the-art heuristic methods. To this end, we introduce AC-Band, a general approach for the AC problem based on multi-armed bandits that provides theoretical guarantees while exhibiting strong practical performance. We show that AC-Band requires significantly less computation time than other AC approaches providing theoretical guarantees while still yielding high-quality configurations.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brandt, Jasmin and Schede, Elias and Haddenhorst, Bj\u00f6rn and Bengs, Viktor and H\u00fcllermeier, Eyke and Tierney, Kevin}, year={2023}, month={Jun.}, pages={12355-12363} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26456/26228", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26456", + "pdf_size": 246309, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=158408290071201255&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "upb.de;uni-bielefeld.de;upb.de;ifi.lmu.de;ifi.lmu.de;uni-bielefeld.de", + "email": "upb.de;uni-bielefeld.de;upb.de;ifi.lmu.de;ifi.lmu.de;uni-bielefeld.de", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2+3;2+3;1", + "aff_unique_norm": "Paderborn University;Bielefeld University;LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Department of Computer Science;Decision and Operation Technologies Group;Institute of Informatics;Center for Machine Learning", + "aff_unique_url": "https://www.uni-paderborn.de;https://www.uni-bielefeld.de;https://www.lmu.de;https://www.munich-center-for-machine-learning.de", + "aff_unique_abbr": ";;LMU;MCML", + "aff_campus_unique_index": "1+1;1+1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0;0;0;0+0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-27045", + "title": "ACCD: An Adaptive Clustering-Based Collusion Detector in Crowdsourcing (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Crowdsourcing is a popular method for crowd workers to collaborate on tasks. However, workers coordinate and share answers during the crowdsourcing process. The term for this is \"collusion\". Copies from others and repeated submissions are detrimental to the quality of the assignments. The majority of the existing research on collusion detection is limited to ground truth problems (e.g., labeling tasks) and requires a predetermined threshold to be established in advance. In this paper, we aim to detect collusion behavior of workers in an adaptive way, and propose an Adaptive Clustering Based Collusion Detection approach (ACCD) for a broad range of task types and data types solved via crowdsourcing (e.g., continuous rating with or without distributions). Extensive experiments on both real-world and synthetic datasets show the superiority of ACCD over state-of-the-art approaches.", + "primary_area": "", + "author": "Ruoyu Xu; Gaoxiang Li; Wei Jin; Austin Chen; Victor S. Sheng", + "authorids": "", + "aff": "Computer Science Departement, Texas Tech University; Computer Science Departement, Texas Tech University; Computer Science and Engineering Departement, University of North Texas; Lubbock High School; Computer Science Departement, Texas Tech University", + "bibtex": "@article{Xu_Li_Jin_Chen_Sheng_2024, title={ACCD: An Adaptive Clustering-Based Collusion Detector in Crowdsourcing (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27045}, DOI={10.1609/aaai.v37i13.27045}, abstractNote={Crowdsourcing is a popular method for crowd workers to collaborate on tasks. However, workers coordinate and share answers during the crowdsourcing process. The term for this is "collusion". Copies from others and repeated submissions are detrimental to the quality of the assignments. The majority of the existing research on collusion detection is limited to ground truth problems (e.g., labeling tasks) and requires a predetermined threshold to be established in advance. In this paper, we aim to detect collusion behavior of workers in an adaptive way, and propose an Adaptive Clustering Based Collusion Detection approach (ACCD) for a broad range of task types and data types solved via crowdsourcing (e.g., continuous rating with or without distributions). Extensive experiments on both real-world and synthetic datasets show the superiority of ACCD over state-of-the-art approaches.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Ruoyu and Li, Gaoxiang and Jin, Wei and Chen, Austin and Sheng, Victor S.}, year={2024}, month={Jul.}, pages={16370-16371} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27045/26817", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27045", + "pdf_size": 256663, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:A_viEFBP1hAJ:scholar.google.com/&scioq=ACCD:+An+Adaptive+Clustering-Based+Collusion+Detector+in+Crowdsourcing+(Student+Abstract)&hl=en&as_sdt=0,44", + "gs_version_total": 3, + "aff_domain": "ttu.edu;ttu.edu;unt.edu;gmail.com;ttu.edu", + "email": "ttu.edu;ttu.edu;unt.edu;gmail.com;ttu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Texas Tech University;University of North Texas;Lubbock High School", + "aff_unique_dep": "Computer Science Departement;Computer Science and Engineering Departement;", + "aff_unique_url": "https://www.ttu.edu;https://www.unt.edu;", + "aff_unique_abbr": "TTU;UNT;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26028", + "title": "ACE: Cooperative Multi-Agent Q-learning with Bidirectional Action-Dependency", + "track": "main", + "status": "Technical", + "abstract": "Multi-agent reinforcement learning (MARL) suffers from the non-stationarity problem, which is the ever-changing targets at every iteration when multiple agents update their policies at the same time. Starting from first principle, in this paper, we manage to solve the non-stationarity problem by proposing bidirectional action-dependent Q-learning (ACE). Central to the development of ACE is the sequential decision making process wherein only one agent is allowed to take action at one time. Within this process, each agent maximizes its value function given the actions taken by the preceding agents at the inference stage. In the learning phase, each agent minimizes the TD error that is dependent on how the subsequent agents have reacted to their chosen action. Given the design of bidirectional dependency, ACE effectively turns a multi-agent MDP into a single-agent MDP. We implement the ACE framework by identifying the proper network representation to formulate the action dependency, so that the sequential decision process is computed implicitly in one forward pass. To validate ACE, we compare it with strong baselines on two MARL benchmarks. Empirical experiments demonstrate that ACE outperforms the state-of-the-art algorithms on Google Research Football and StarCraft Multi-Agent Challenge by a large margin. In particular, on SMAC tasks, ACE achieves 100% success rate on almost all the hard and super hard maps. We further study extensive research problems regarding ACE, including extension, generalization and practicability.", + "primary_area": "machine learning ii", + "author": "Chuming Li; Jie Liu; Yinmin Zhang; Yuhong Wei; Yazhe Niu; Yaodong Yang; Yu Liu; Wanli Ouyang", + "authorids": "", + "aff": "The University of Sydney + SenseTime Computer Vision Group, Australia; Shanghai Artificial Intelligence Laboratory; The University of Sydney + SenseTime Computer Vision Group, Australia; SenseTime Group LTD; Shanghai Artificial Intelligence Laboratory + SenseTime Group LTD; Institute for AI, Peking University; Shanghai Artificial Intelligence Laboratory + SenseTime Group LTD; The University of Sydney + SenseTime Computer Vision Group, Australia", + "bibtex": "@article{Li_Liu_Zhang_Wei_Niu_Yang_Liu_Ouyang_2023, title={ACE: Cooperative Multi-Agent Q-learning with Bidirectional Action-Dependency}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26028}, DOI={10.1609/aaai.v37i7.26028}, abstractNote={Multi-agent reinforcement learning (MARL) suffers from the non-stationarity problem, which is the ever-changing targets at every iteration when multiple agents update their policies at the same time. Starting from first principle, in this paper, we manage to solve the non-stationarity problem by proposing bidirectional action-dependent Q-learning (ACE). Central to the development of ACE is the sequential decision making process wherein only one agent is allowed to take action at one time. Within this process, each agent maximizes its value function given the actions taken by the preceding agents at the inference stage. In the learning phase, each agent minimizes the TD error that is dependent on how the subsequent agents have reacted to their chosen action. Given the design of bidirectional dependency, ACE effectively turns a multi-agent MDP into a single-agent MDP. We implement the ACE framework by identifying the proper network representation to formulate the action dependency, so that the sequential decision process is computed implicitly in one forward pass. To validate ACE, we compare it with strong baselines on two MARL benchmarks. Empirical experiments demonstrate that ACE outperforms the state-of-the-art algorithms on Google Research Football and StarCraft Multi-Agent Challenge by a large margin. In particular, on SMAC tasks, ACE achieves 100% success rate on almost all the hard and super hard maps. We further study extensive research problems regarding ACE, including extension, generalization and practicability.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Chuming and Liu, Jie and Zhang, Yinmin and Wei, Yuhong and Niu, Yazhe and Yang, Yaodong and Liu, Yu and Ouyang, Wanli}, year={2023}, month={Jun.}, pages={8536-8544} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26028/25800", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26028", + "pdf_size": 436180, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6861084426804042328&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "uni.sydney.edu.au;pjlab.org.cn;sydney.edu.au;sensetime.com;sensetime.com;pku.edu.cn;gmail.com;sydney.edu.au", + "email": "uni.sydney.edu.au;pjlab.org.cn;sydney.edu.au;sensetime.com;sensetime.com;pku.edu.cn;gmail.com;sydney.edu.au", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;0+1;3;2+3;4;2+3;0+1", + "aff_unique_norm": "University of Sydney;SenseTime;Shanghai Artificial Intelligence Laboratory;SenseTime Group;Peking University", + "aff_unique_dep": ";Computer Vision Group;;;Institute for AI", + "aff_unique_url": "https://www.sydney.edu.au;https://www.sensetime.com;http://www.shailab.org/;https://www.sensetime.com;http://www.pku.edu.cn", + "aff_unique_abbr": "USYD;SenseTime;Shanghai AI Lab;SenseTime;PKU", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;0+0;1;1+1;1;1+1;0+0", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-25382", + "title": "ACL-Net: Semi-supervised Polyp Segmentation via Affinity Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Automatic polyp segmentation from colonoscopy images is an essential prerequisite for the development of computer-assisted therapy. However, the complex semantic information and the blurred edges of polyps make segmentation extremely difficult. In this paper, we propose a novel semi-supervised polyp segmentation framework using affinity contrastive learning (ACL-Net), which is implemented between student and teacher networks to consistently refine the pseudo-labels for semi-supervised polyp segmentation. By aligning the affinity maps between the two branches, a better polyp region activation can be obtained to fully exploit the appearance-level context encoded in the feature maps, thereby improving the capability of capturing not only global localization and shape context, but also the local textural and boundary details. By utilizing the rich inter-image affinity context and establishing a global affinity context based on the memory bank, a cross-image affinity aggregation (CAA) module is also implemented to further refine the affinity aggregation between the two branches. By continuously and adaptively refining pseudo-labels with optimized affinity, we can improve the semi-supervised polyp segmentation based on the mutually reinforced knowledge interaction among contrastive learning and consistency learning iterations. Extensive experiments on five benchmark datasets, including Kvasir-SEG, CVC-ClinicDB, CVC-300, CVC-ColonDB and ETIS, demonstrate the effectiveness and superiority of our method. Codes are available at https://github.com/xiewende/ACL-Net.", + "primary_area": "computer vision iii", + "author": "Huisi Wu; Wende Xie; Jingyin Lin; Xinrong Guo", + "authorids": "", + "aff": "College of Computer Science and Software Engineering, Shenzhen University; College of Computer Science and Software Engineering, Shenzhen University; College of Computer Science and Software Engineering, Shenzhen University; College of Computer Science and Software Engineering, Shenzhen University", + "bibtex": "@article{Wu_Xie_Lin_Guo_2023, title={ACL-Net: Semi-supervised Polyp Segmentation via Affinity Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25382}, DOI={10.1609/aaai.v37i3.25382}, abstractNote={Automatic polyp segmentation from colonoscopy images is an essential prerequisite for the development of computer-assisted therapy. However, the complex semantic information and the blurred edges of polyps make segmentation extremely difficult. In this paper, we propose a novel semi-supervised polyp segmentation framework using affinity contrastive learning (ACL-Net), which is implemented between student and teacher networks to consistently refine the pseudo-labels for semi-supervised polyp segmentation. By aligning the affinity maps between the two branches, a better polyp region activation can be obtained to fully exploit the appearance-level context encoded in the feature maps, thereby improving the capability of capturing not only global localization and shape context, but also the local textural and boundary details. By utilizing the rich inter-image affinity context and establishing a global affinity context based on the memory bank, a cross-image affinity aggregation (CAA) module is also implemented to further refine the affinity aggregation between the two branches. By continuously and adaptively refining pseudo-labels with optimized affinity, we can improve the semi-supervised polyp segmentation based on the mutually reinforced knowledge interaction among contrastive learning and consistency learning iterations. Extensive experiments on five benchmark datasets, including Kvasir-SEG, CVC-ClinicDB, CVC-300, CVC-ColonDB and ETIS, demonstrate the effectiveness and superiority of our method. Codes are available at https://github.com/xiewende/ACL-Net.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Huisi and Xie, Wende and Lin, Jingyin and Guo, Xinrong}, year={2023}, month={Jun.}, pages={2812-2820} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25382/25154", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25382", + "pdf_size": 3743912, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2343622556153354503&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "szu.edu.cn; ; ; ", + "email": "szu.edu.cn; ; ; ", + "github": "https://github.com/xiewende/ACL-Net", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shenzhen University", + "aff_unique_dep": "College of Computer Science and Software Engineering", + "aff_unique_url": "https://www.szu.edu.cn", + "aff_unique_abbr": "SZU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26279", + "title": "ADEPT: A DEbiasing PrompT Framework", + "track": "main", + "status": "Technical", + "abstract": "Several works have proven that finetuning is an applicable approach for debiasing contextualized word embeddings. Similarly, discrete prompts with semantic meanings have shown to be effective in debiasing tasks. With unfixed mathematical representation at the token level, continuous prompts usually surpass discrete ones at providing a pre-trained language model (PLM) with additional task-specific information. Despite this, relatively few efforts have been made to debias PLMs by prompt tuning with continuous prompts compared to its discrete counterpart. Furthermore, for most debiasing methods that alter a PLM's original parameters, a major problem is the need to not only decrease the bias in the PLM but also to ensure that the PLM does not lose its representation ability. Finetuning methods typically have a hard time maintaining this balance, as they tend to violently remove meanings of attribute words (like the words developing our concepts of \"male\" and \"female\" for gender), which also leads to an unstable and unpredictable training process. In this paper, we propose ADEPT, a method to debias PLMs using prompt tuning while maintaining the delicate balance between removing biases and ensuring representation ability. To achieve this, we propose a new training criterion inspired by manifold learning and equip it with an explicit debiasing term to optimize prompt tuning. In addition, we conduct several experiments with regard to the reliability, quality, and quantity of a previously proposed attribute training corpus in order to obtain a clearer prototype of a certain attribute, which indicates the attribute's position and relative distances to other words on the manifold. We evaluate ADEPT on several widely acknowledged debiasing benchmarks and downstream tasks, and find that it achieves competitive results while maintaining (and in some cases even improving) the PLM's representation ability. We further visualize words' correlation before and after debiasing a PLM, and give some possible explanations for the visible effects.", + "primary_area": "machine learning iv", + "author": "Ke Yang; Charles Yu; Yi R. Fung; Manling Li; Heng Ji", + "authorids": "", + "aff": "Tsinghua University; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign", + "bibtex": "@article{Yang_Yu_Fung_Li_Ji_2023, title={ADEPT: A DEbiasing PrompT Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26279}, DOI={10.1609/aaai.v37i9.26279}, abstractNote={Several works have proven that finetuning is an applicable approach for debiasing contextualized word embeddings. Similarly, discrete prompts with semantic meanings have shown to be effective in debiasing tasks. With unfixed mathematical representation at the token level, continuous prompts usually surpass discrete ones at providing a pre-trained language model (PLM) with additional task-specific information. Despite this, relatively few efforts have been made to debias PLMs by prompt tuning with continuous prompts compared to its discrete counterpart. Furthermore, for most debiasing methods that alter a PLM\u2019s original parameters, a major problem is the need to not only decrease the bias in the PLM but also to ensure that the PLM does not lose its representation ability. Finetuning methods typically have a hard time maintaining this balance, as they tend to violently remove meanings of attribute words (like the words developing our concepts of "male" and "female" for gender), which also leads to an unstable and unpredictable training process. In this paper, we propose ADEPT, a method to debias PLMs using prompt tuning while maintaining the delicate balance between removing biases and ensuring representation ability. To achieve this, we propose a new training criterion inspired by manifold learning and equip it with an explicit debiasing term to optimize prompt tuning. In addition, we conduct several experiments with regard to the reliability, quality, and quantity of a previously proposed attribute training corpus in order to obtain a clearer prototype of a certain attribute, which indicates the attribute\u2019s position and relative distances to other words on the manifold. We evaluate ADEPT on several widely acknowledged debiasing benchmarks and downstream tasks, and find that it achieves competitive results while maintaining (and in some cases even improving) the PLM\u2019s representation ability. We further visualize words\u2019 correlation before and after debiasing a PLM, and give some possible explanations for the visible effects.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Ke and Yu, Charles and Fung, Yi R. and Li, Manling and Ji, Heng}, year={2023}, month={Jun.}, pages={10780-10788} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26279/26051", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26279", + "pdf_size": 5911941, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15915947857299161522&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.tsinghua.edu.cn;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "mails.tsinghua.edu.cn;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/EmpathYang/ADEPT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Tsinghua University;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://illinois.edu", + "aff_unique_abbr": "THU;UIUC", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25620", + "title": "ADMoE: Anomaly Detection with Mixture-of-Experts from Noisy Labels", + "track": "main", + "status": "Technical", + "abstract": "Existing works on anomaly detection (AD) rely on clean labels from human annotators that are expensive to acquire in practice. In this work, we propose a method to leverage weak/noisy labels (e.g., risk scores generated by machine rules for detecting malware) that are cheaper to obtain for anomaly detection. Specifically, we propose ADMoE, the first framework for anomaly detection algorithms to learn from noisy labels. In a nutshell, ADMoE leverages mixture-of-experts (MoE) architecture to encourage specialized and scalable learning from multiple noisy sources. It captures the similarities among noisy labels by sharing most model parameters, while encouraging specialization by building \"expert\" sub-networks. To further juice out the signals from noisy labels, ADMoE uses them as input features to facilitate expert learning. Extensive results on eight datasets (including a proprietary enterprise security dataset) demonstrate the effectiveness of ADMoE, where it brings up to 34% performance improvement over not using it. Also, it outperforms a total of 13 leading baselines with equivalent network parameters and FLOPS. Notably, ADMoE is model-agnostic to enable any neural network-based detection methods to handle noisy labels, where we showcase its results on both multiple-layer perceptron (MLP) and the leading AD method DeepSAD.", + "primary_area": "data mining and knowledge management", + "author": "Yue Zhao; Guoqing Zheng; Subhabrata Mukherjee; Robert McCann; Ahmed Awadallah", + "authorids": "", + "aff": "Carnegie Mellon University; Microsoft; Microsoft; Microsoft; Microsoft", + "bibtex": "@article{Zhao_Zheng_Mukherjee_McCann_Awadallah_2023, title={ADMoE: Anomaly Detection with Mixture-of-Experts from Noisy Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25620}, DOI={10.1609/aaai.v37i4.25620}, abstractNote={Existing works on anomaly detection (AD) rely on clean labels from human annotators that are expensive to acquire in practice. In this work, we propose a method to leverage weak/noisy labels (e.g., risk scores generated by machine rules for detecting malware) that are cheaper to obtain for anomaly detection. Specifically, we propose ADMoE, the first framework for anomaly detection algorithms to learn from noisy labels. In a nutshell, ADMoE leverages mixture-of-experts (MoE) architecture to encourage specialized and scalable learning from multiple noisy sources. It captures the similarities among noisy labels by sharing most model parameters, while encouraging specialization by building "expert" sub-networks. To further juice out the signals from noisy labels, ADMoE uses them as input features to facilitate expert learning. Extensive results on eight datasets (including a proprietary enterprise security dataset) demonstrate the effectiveness of ADMoE, where it brings up to 34% performance improvement over not using it. Also, it outperforms a total of 13 leading baselines with equivalent network parameters and FLOPS. Notably, ADMoE is model-agnostic to enable any neural network-based detection methods to handle noisy labels, where we showcase its results on both multiple-layer perceptron (MLP) and the leading AD method DeepSAD.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yue and Zheng, Guoqing and Mukherjee, Subhabrata and McCann, Robert and Awadallah, Ahmed}, year={2023}, month={Jun.}, pages={4937-4945} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25620/25392", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25620", + "pdf_size": 403543, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14641795875719826345&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "cmu.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "cmu.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Carnegie Mellon University;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com", + "aff_unique_abbr": "CMU;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26208", + "title": "AEC-GAN: Adversarial Error Correction GANs for Auto-Regressive Long Time-Series Generation", + "track": "main", + "status": "Technical", + "abstract": "Large-scale high-quality data is critical for training modern deep neural networks. However, data acquisition can be costly or time-consuming for many time-series applications, thus researchers turn to generative models for generating synthetic time-series data. In particular, recent generative adversarial networks (GANs) have achieved remarkable success in time-series generation. Despite their success, existing GAN models typically generate the sequences in an auto-regressive manner, and we empirically observe that they suffer from severe distribution shifts and bias amplification, especially when generating long sequences. To resolve this problem, we propose Adversarial Error Correction GAN (AEC-GAN), which is capable of dynamically correcting the bias in the past generated data to alleviate the risk of distribution shifts and thus can generate high-quality long sequences. AEC-GAN contains two main innovations: (1) We develop an error correction module to mitigate the bias. In the training phase, we adversarially perturb the realistic time-series data and then optimize this module to reconstruct the original data. In the generation phase, this module can act as an efficient regulator to detect and mitigate the bias. (2) We propose an augmentation method to facilitate GAN's training by introducing adversarial examples. Thus, AEC-GAN can generate high-quality sequences of arbitrary lengths, and the synthetic data can be readily applied to downstream tasks to boost their performance. We conduct extensive experiments on six widely used datasets and three state-of-the-art time-series forecasting models to evaluate the quality of our synthetic time-series data in different lengths and downstream tasks. Both the qualitative and quantitative experimental results demonstrate the superior performance of AEC-GAN over other deep generative models for time-series generation.", + "primary_area": "machine learning iii", + "author": "Lei Wang; Liang Zeng; Jian Li", + "authorids": "", + "aff": "Institute for Interdisciplinary Information Sciences (IIIS), Tsinghua University; Institute for Interdisciplinary Information Sciences (IIIS), Tsinghua University; Institute for Interdisciplinary Information Sciences (IIIS), Tsinghua University", + "bibtex": "@article{Wang_Zeng_Li_2023, title={AEC-GAN: Adversarial Error Correction GANs for Auto-Regressive Long Time-Series Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26208}, DOI={10.1609/aaai.v37i8.26208}, abstractNote={Large-scale high-quality data is critical for training modern deep neural networks. However, data acquisition can be costly or time-consuming for many time-series applications, thus researchers turn to generative models for generating synthetic time-series data. In particular, recent generative adversarial networks (GANs) have achieved remarkable success in time-series generation. Despite their success, existing GAN models typically generate the sequences in an auto-regressive manner, and we empirically observe that they suffer from severe distribution shifts and bias amplification, especially when generating long sequences. To resolve this problem, we propose Adversarial Error Correction GAN (AEC-GAN), which is capable of dynamically correcting the bias in the past generated data to alleviate the risk of distribution shifts and thus can generate high-quality long sequences. AEC-GAN contains two main innovations: (1) We develop an error correction module to mitigate the bias. In the training phase, we adversarially perturb the realistic time-series data and then optimize this module to reconstruct the original data. In the generation phase, this module can act as an efficient regulator to detect and mitigate the bias. (2) We propose an augmentation method to facilitate GAN\u2019s training by introducing adversarial examples. Thus, AEC-GAN can generate high-quality sequences of arbitrary lengths, and the synthetic data can be readily applied to downstream tasks to boost their performance. We conduct extensive experiments on six widely used datasets and three state-of-the-art time-series forecasting models to evaluate the quality of our synthetic time-series data in different lengths and downstream tasks. Both the qualitative and quantitative experimental results demonstrate the superior performance of AEC-GAN over other deep generative models for time-series generation.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Lei and Zeng, Liang and Li, Jian}, year={2023}, month={Jun.}, pages={10140-10148} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26208/25980", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26208", + "pdf_size": 1548037, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8933739737855825937&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Institute for Interdisciplinary Information Sciences (IIIS)", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Tsinghua", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26852", + "title": "AHPA: Adaptive Horizontal Pod Autoscaling Systems on Alibaba Cloud Container Service for Kubernetes", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The existing resource allocation policy for application instances in Kubernetes cannot dynamically adjust according to the requirement of business, which would cause an enormous waste of resources during fluctuations. Moreover, the emergence of new cloud services puts higher resource management requirements. This paper discusses horizontal POD resources management in Alibaba Cloud Container Services with a newly deployed AI algorithm framework named AHPA - the adaptive horizontal pod auto-scaling system. Based on a robust decomposition forecasting algorithm and performance training model, AHPA offers an optimal pod number adjustment plan that could reduce POD resources\nand maintain business stability. Since being deployed in April 2021, this system has expanded to multiple customer scenarios, including logistics, social networks, AI audio and video, e-commerce, etc. Compared with the previous algorithms, AHPA solves the elastic lag problem, increasing CPU usage by 10% and reducing resource cost by more than 20%. In addition, AHPA can automatically perform flexible planning according to the predicted business volume without manual intervention, significantly saving operation and maintenance costs.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Zhiqiang Zhou; Chaoli Zhang; Lingna Ma; Jing Gu; Huajie Qian; Qingsong Wen; Liang Sun; Peng Li; Zhimin Tang", + "authorids": "", + "aff": "DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; Alibaba Cloud, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Bellevue, WA, USA; DAMO Academy, Alibaba Group, Bellevue, WA, USA; DAMO Academy, Alibaba Group, Bellevue, WA, USA; Alibaba Cloud, Alibaba Group, Hangzhou, China; Alibaba Cloud, Alibaba Group, Hangzhou, China", + "bibtex": "@article{Zhou_Zhang_Ma_Gu_Qian_Wen_Sun_Li_Tang_2024, title={AHPA: Adaptive Horizontal Pod Autoscaling Systems on Alibaba Cloud Container Service for Kubernetes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26852}, DOI={10.1609/aaai.v37i13.26852}, abstractNote={The existing resource allocation policy for application instances in Kubernetes cannot dynamically adjust according to the requirement of business, which would cause an enormous waste of resources during fluctuations. Moreover, the emergence of new cloud services puts higher resource management requirements. This paper discusses horizontal POD resources management in Alibaba Cloud Container Services with a newly deployed AI algorithm framework named AHPA - the adaptive horizontal pod auto-scaling system. Based on a robust decomposition forecasting algorithm and performance training model, AHPA offers an optimal pod number adjustment plan that could reduce POD resources\nand maintain business stability. Since being deployed in April 2021, this system has expanded to multiple customer scenarios, including logistics, social networks, AI audio and video, e-commerce, etc. Compared with the previous algorithms, AHPA solves the elastic lag problem, increasing CPU usage by 10% and reducing resource cost by more than 20%. In addition, AHPA can automatically perform flexible planning according to the predicted business volume without manual intervention, significantly saving operation and maintenance costs.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Zhiqiang and Zhang, Chaoli and Ma, Lingna and Gu, Jing and Qian, Huajie and Wen, Qingsong and Sun, Liang and Li, Peng and Tang, Zhimin}, year={2024}, month={Jul.}, pages={15621-15629} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26852/26624", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26852", + "pdf_size": 3040880, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17720241771182959526&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "DAMO Academy", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "0;0;0;0;1;1;1;0;0", + "aff_campus_unique": "Hangzhou;Bellevue", + "aff_country_unique_index": "0;0;0;0;1;1;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26897", + "title": "AI Audit: A Card Game to Reflect on Everyday AI Systems", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "An essential element of K-12 AI literacy is educating learners about the ethical and societal implications of AI systems. Previous work in AI ethics literacy have developed curriculum and classroom activities that engage learners in reflecting on the ethical implications of AI systems and developing responsible AI. There is little work in using game-based learning methods in AI literacy. Games are known to be compelling media to teach children about complex STEM concepts. In this work, we developed a competitive card game for middle and high school students called \u201cAI Audit\u201d where they play as AI start-up founders building novel AI-powered technology. Players can challenge other players with potential harms of their technology or defend their own businesses by features that mitigate these harms. The game mechanics reward systems that are ethically developed or that take steps to mitigate potential harms. In this paper, we present the game design, teacher resources for classroom deployment and early playtesting results. We discuss our reflections about using games as teaching tools for AI literacy in K-12 classrooms.", + "primary_area": "", + "author": "Safinah Ali; Vishesh Kumar; Cynthia Breazeal", + "authorids": "", + "aff": "Massachusetts Institute of Technology; Northwestern University; Massachusetts Institute of Technology", + "bibtex": "@article{Ali_Kumar_Breazeal_2024, title={AI Audit: A Card Game to Reflect on Everyday AI Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26897}, DOI={10.1609/aaai.v37i13.26897}, abstractNote={An essential element of K-12 AI literacy is educating learners about the ethical and societal implications of AI systems. Previous work in AI ethics literacy have developed curriculum and classroom activities that engage learners in reflecting on the ethical implications of AI systems and developing responsible AI. There is little work in using game-based learning methods in AI literacy. Games are known to be compelling media to teach children about complex STEM concepts. In this work, we developed a competitive card game for middle and high school students called \u201cAI Audit\u201d where they play as AI start-up founders building novel AI-powered technology. Players can challenge other players with potential harms of their technology or defend their own businesses by features that mitigate these harms. The game mechanics reward systems that are ethically developed or that take steps to mitigate potential harms. In this paper, we present the game design, teacher resources for classroom deployment and early playtesting results. We discuss our reflections about using games as teaching tools for AI literacy in K-12 classrooms.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Safinah and Kumar, Vishesh and Breazeal, Cynthia}, year={2024}, month={Jul.}, pages={15981-15989} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26897/26669", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26897", + "pdf_size": 2233811, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13005342048167552266&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "media.mit.edu;northwestern.edu;media.mit.edu", + "email": "media.mit.edu;northwestern.edu;media.mit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Northwestern University", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.northwestern.edu", + "aff_unique_abbr": "MIT;NU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26882", + "title": "AI Made by Youth: A Conversational AI Curriculum for Middle School Summer Camps", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "As artificial intelligence permeates our lives through various tools and services, there is an increasing need to consider how to teach young learners about AI in a relevant and engaging way. One way to do so is to leverage familiar and pervasive technologies such as conversational AIs. By learning about conversational AIs, learners are introduced to AI concepts such as computers\u2019 perception of natural language, the need for training datasets, and the design of AI-human interactions. In this experience report, we describe a summer camp curriculum designed for middle school learners composed of general AI lessons, unplugged activities, conversational AI lessons, and project activities in which the campers develop their own conversational agents. The results show that this summer camp experience fostered significant increases in learners\u2019 ability beliefs, willingness to share their learning experience, and intent to persist in AI learning. We conclude with a discussion of how conversational AI can be used as an entry point to K-12 AI education.", + "primary_area": "", + "author": "Yukyeong Song; Gloria Ashiya Katuka; Joanne Barrett; Xiaoyi Tian; Amit Kumar; Tom McKlin; Mehmet Celepkolu; Kristy Elizabeth Boyer; Maya Israel", + "authorids": "", + "aff": "University of Florida; University of Florida; University of Florida; University of Florida; University of Florida; The Findings Group; University of Florida; University of Florida; University of Florida", + "bibtex": "@article{Song_Ashiya Katuka_Barrett_Tian_Kumar_McKlin_Celepkolu_Boyer_Israel_2024, title={AI Made by Youth: A Conversational AI Curriculum for Middle School Summer Camps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26882}, DOI={10.1609/aaai.v37i13.26882}, abstractNote={As artificial intelligence permeates our lives through various tools and services, there is an increasing need to consider how to teach young learners about AI in a relevant and engaging way. One way to do so is to leverage familiar and pervasive technologies such as conversational AIs. By learning about conversational AIs, learners are introduced to AI concepts such as computers\u2019 perception of natural language, the need for training datasets, and the design of AI-human interactions. In this experience report, we describe a summer camp curriculum designed for middle school learners composed of general AI lessons, unplugged activities, conversational AI lessons, and project activities in which the campers develop their own conversational agents. The results show that this summer camp experience fostered significant increases in learners\u2019 ability beliefs, willingness to share their learning experience, and intent to persist in AI learning. We conclude with a discussion of how conversational AI can be used as an entry point to K-12 AI education.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Yukyeong and Ashiya Katuka, Gloria and Barrett, Joanne and Tian, Xiaoyi and Kumar, Amit and McKlin, Tom and Celepkolu, Mehmet and Boyer, Kristy Elizabeth and Israel, Maya}, year={2024}, month={Jul.}, pages={15851-15859} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26882/26654", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26882", + "pdf_size": 1405180, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6870375539098305217&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ufl.edu;ufl.edu;ufl.edu;ufl.edu;ufl.edu;thefindingsgroup.org;ufl.edu;ufl.edu;ufl.edu", + "email": "ufl.edu;ufl.edu;ufl.edu;ufl.edu;ufl.edu;thefindingsgroup.org;ufl.edu;ufl.edu;ufl.edu", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;0;0;0", + "aff_unique_norm": "University of Florida;The Findings Group", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ufl.edu;", + "aff_unique_abbr": "UF;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "article-27081", + "title": "AI Model Factory: Scaling AI for Industry 4.0 Applications", + "track": "demonstrations", + "status": "Technical", + "abstract": "This demo paper discusses a scalable platform for emerging Data-Driven AI Applications targeted toward predictive maintenance solutions. We propose a common AI software architecture stack for building diverse AI Applications such as Anomaly Detection, Failure Pattern Analysis, Asset Health Forecasting, etc. for more than a 100K industrial assets of similar class. As a part of the AI system demonstration, we have identified the following three key topics for discussion: Scaling model training across multiple assets, Joint execution of multiple AI applications; and Bridge the gap between current open source software tools and the emerging need for AI Applications. To demonstrate the benefits, AI Model Factory has been tested to build the models for various industrial assets such as Wind turbines, Oil wells, etc. The system is deployed on API Hub for demonstration.", + "primary_area": "", + "author": "Dhaval Patel; Shuxin Lin; Dhruv Shah; Srideepika Jayaraman; Joern Ploennigs; Anuradha Bhamidipati; Jayant Kalagnanam", + "authorids": "", + "aff": "IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "bibtex": "@article{Patel_Lin_Shah_Jayaraman_Ploennigs_Bhamidipati_Kalagnanam_2024, title={AI Model Factory: Scaling AI for Industry 4.0 Applications}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27081}, DOI={10.1609/aaai.v37i13.27081}, abstractNote={This demo paper discusses a scalable platform for emerging Data-Driven AI Applications targeted toward predictive maintenance solutions. We propose a common AI software architecture stack for building diverse AI Applications such as Anomaly Detection, Failure Pattern Analysis, Asset Health Forecasting, etc. for more than a 100K industrial assets of similar class. As a part of the AI system demonstration, we have identified the following three key topics for discussion: Scaling model training across multiple assets, Joint execution of multiple AI applications; and Bridge the gap between current open source software tools and the emerging need for AI Applications. To demonstrate the benefits, AI Model Factory has been tested to build the models for various industrial assets such as Wind turbines, Oil wells, etc. The system is deployed on API Hub for demonstration.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Patel, Dhaval and Lin, Shuxin and Shah, Dhruv and Jayaraman, Srideepika and Ploennigs, Joern and Bhamidipati, Anuradha and Kalagnanam, Jayant}, year={2024}, month={Jul.}, pages={16467-16469} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27081/26853", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27081", + "pdf_size": 180749, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15873550171417016206&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "us.ibm.com;ibm.com;ibm.com;ibm.com;ie.ibm.com;us.ibm.com;us.ibm.com", + "email": "us.ibm.com;ibm.com;ibm.com;ibm.com;ie.ibm.com;us.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26876", + "title": "AI and Parallelism in CS1: Experiences and Analysis", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "This work considers the use of AI and parallelism as a context for learning typical programming concepts in an introductory programming course (CS1). The course includes exercises in decision trees, a novel game called Find the Gnomes to introduce supervised learning, the construction and application of a vectorized neural network unit class, and obtaining speedup in training through parallelism. The exercises are designed to teach students typical introductory programming concepts while also providing a preview and motivating example of advanced CS topics. Students' understanding and motivation are considered through a detailed analysis of pre- and post-survey data gathered in several sections of the course each taught by one of four instructors across five semesters.", + "primary_area": "", + "author": "Steven Bogaerts", + "authorids": "", + "aff": "DePauw University", + "bibtex": "@article{Bogaerts_2024, title={AI and Parallelism in CS1: Experiences and Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26876}, DOI={10.1609/aaai.v37i13.26876}, abstractNote={This work considers the use of AI and parallelism as a context for learning typical programming concepts in an introductory programming course (CS1). The course includes exercises in decision trees, a novel game called Find the Gnomes to introduce supervised learning, the construction and application of a vectorized neural network unit class, and obtaining speedup in training through parallelism. The exercises are designed to teach students typical introductory programming concepts while also providing a preview and motivating example of advanced CS topics. Students\u2019 understanding and motivation are considered through a detailed analysis of pre- and post-survey data gathered in several sections of the course each taught by one of four instructors across five semesters.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bogaerts, Steven}, year={2024}, month={Jul.}, pages={15798-15806} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26876/26648", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26876", + "pdf_size": 145392, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:DZTUECNwI8UJ:scholar.google.com/&scioq=AI+and+Parallelism+in+CS1:+Experiences+and+Analysis&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "depauw.edu", + "email": "depauw.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "DePauw University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.depauw.edu", + "aff_unique_abbr": "DePauw", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26826", + "title": "AI for Equitable, Data-Driven Decisions in Public Health", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "As exemplified by the COVID-19 pandemic, our health and wellbeing depend on a difficult-to-measure web of societal factors and individual behaviors. This effort requires new algorithmic and data-driven paradigms which span the full process of gathering costly data, learning models to understand and predict such interactions, and optimizing the use of limited resources in interventions. In response to these needs, I present methodological developments at the intersection of machine learning, optimization, and social networks which are motivated by on-the-ground collaborations on HIV prevention, tuberculosis treatment, and the COVID-19 response. Here, I give an overview of two lines of work.", + "primary_area": "", + "author": "Bryan Wilder", + "authorids": "", + "aff": "Machine Learning Department, Carnegie Mellon University", + "bibtex": "@article{Wilder_2024, title={AI for Equitable, Data-Driven Decisions in Public Health}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26826}, DOI={10.1609/aaai.v37i13.26826}, abstractNote={As exemplified by the COVID-19 pandemic, our health and wellbeing depend on a difficult-to-measure web of societal factors and individual behaviors. This effort requires new algorithmic and data-driven paradigms which span the full process of gathering costly data, learning models to understand and predict such interactions, and optimizing the use of limited resources in interventions. In response to these needs, I present methodological developments at the intersection of machine learning, optimization, and social networks which are motivated by on-the-ground collaborations on HIV prevention, tuberculosis treatment, and the COVID-19 response. Here, I give an overview of two lines of work.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wilder, Bryan}, year={2024}, month={Jul.}, pages={15459-15459} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26826/26598", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26826", + "pdf_size": 46122, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:7FFkSrrBUvIJ:scholar.google.com/&scioq=AI+for+Equitable,+Data-Driven+Decisions+in+Public+Health&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "andrew.cmu.edu", + "email": "andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Machine Learning Department", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-27061", + "title": "AI-SNIPS: A Platform for Network Intelligence-Based Pharmaceutical Security", + "track": "demonstrations", + "status": "Technical", + "abstract": "This paper presents AI-SNIPS (AI Support for Network Intelligence-based Pharmaceutical Security), a production-ready platform that enables stakeholder decision-making, secure data sharing, and interdisciplinary research in the fight against Illicit, Substandard, and Falsified Medical Products (ISFMP). AI-SNIPS takes as input cases: a case consists of one or more URLs suspected of ISFMP activity. Cases can be supplemented with ground-truth structured data (labeled keywords) such as seller PII or case notes. First, AI-SNIPS scrapes and stores relevant images and text from the provided URLs without any user intervention. Salient features for predicting case similarity are extracted from the aggregated data using a combination of rule-based and machine-learning techniques and used to construct a seller network, with the nodes representing cases (sellers) and the edges representing the similarity between two sellers. Network analysis and community detection techniques are applied to extract seller clusters ranked by profitability and their potential to harm society. Lastly, AI-SNIPS provides interpretability by distilling common word/image similarities for each cluster into signature vectors. We validate the importance of AI-SNIPS's features for distinguishing large pharmaceutical affiliate networks from small ISFMP operations using an actual ISFMP lead sheet.", + "primary_area": "", + "author": "Timothy A. Burt; Nikos Passas; Ioannis A. Kakadiaris", + "authorids": "", + "aff": "Computational Biomedicine Lab (CBL), University of Houston, Houston, TX USA+Dept. of Physics, University of Houston, Houston, TX USA; School of Criminology and Criminal Justice, Northeastern University, Boston, MA USA; Computational Biomedicine Lab (CBL), University of Houston, Houston, TX USA+Dept. of Computer Science, University of Houston, Houston, TX USA", + "bibtex": "@article{Burt_Passas_Kakadiaris_2024, title={AI-SNIPS: A Platform for Network Intelligence-Based Pharmaceutical Security}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27061}, DOI={10.1609/aaai.v37i13.27061}, abstractNote={This paper presents AI-SNIPS (AI Support for Network Intelligence-based Pharmaceutical Security), a production-ready platform that enables stakeholder decision-making, secure data sharing, and interdisciplinary research in the fight against Illicit, Substandard, and Falsified Medical Products (ISFMP). AI-SNIPS takes as input cases: a case consists of one or more URLs suspected of ISFMP activity. Cases can be supplemented with ground-truth structured data (labeled keywords) such as seller PII or case notes. First, AI-SNIPS scrapes and stores relevant images and text from the provided URLs without any user intervention. Salient features for predicting case similarity are extracted from the aggregated data using a combination of rule-based and machine-learning techniques and used to construct a seller network, with the nodes representing cases (sellers) and the edges representing the similarity between two sellers. Network analysis and community detection techniques are applied to extract seller clusters ranked by profitability and their potential to harm society. Lastly, AI-SNIPS provides interpretability by distilling common word/image similarities for each cluster into signature vectors. We validate the importance of AI-SNIPS\u2019s features for distinguishing large pharmaceutical affiliate networks from small ISFMP operations using an actual ISFMP lead sheet.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Burt, Timothy A. and Passas, Nikos and Kakadiaris, Ioannis A.}, year={2024}, month={Jul.}, pages={16407-16409} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27061/26833", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27061", + "pdf_size": 11419492, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:s83Oj-KUKwYJ:scholar.google.com/&scioq=AI-SNIPS:+A+Platform+for+Network+Intelligence-Based+Pharmaceutical+Security&hl=en&as_sdt=0,44", + "gs_version_total": 4, + "aff_domain": "uh.edu; ;uh.edu", + "email": "uh.edu; ;uh.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;1;0+0", + "aff_unique_norm": "University of Houston;Northeastern University", + "aff_unique_dep": "Computational Biomedicine Lab (CBL);School of Criminology and Criminal Justice", + "aff_unique_url": "https://www.uh.edu;https://www.northeastern.edu", + "aff_unique_abbr": "UH;NU", + "aff_campus_unique_index": "0+0;1;0+0", + "aff_campus_unique": "Houston;Boston", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26101", + "title": "AIO-P: Expanding Neural Performance Predictors beyond Image Classification", + "track": "main", + "status": "Technical", + "abstract": "Evaluating neural network performance is critical to deep neural network design but a costly procedure. Neural predictors provide an efficient solution by treating architectures as samples and learning to estimate their performance on a given task. However, existing predictors are task-dependent, predominantly estimating neural network performance on image classification benchmarks. They are also search-space dependent; each predictor is designed to make predictions for a specific architecture search space with predefined topologies and set of operations. In this paper, we propose a novel All-in-One Predictor (AIO-P), which aims to pretrain neural predictors on architecture examples from multiple, separate computer vision (CV) task domains and multiple architecture spaces, and then transfer to unseen downstream CV tasks or neural architectures. We describe our proposed techniques for general graph representation, efficient predictor pretraining and knowledge infusion techniques, as well as methods to transfer to downstream tasks/spaces. Extensive experimental results show that AIO-P can achieve Mean Absolute Error (MAE) and Spearman\u2019s Rank Correlation (SRCC) below 1p% and above 0.5, respectively, on a breadth of target downstream CV tasks with or without fine-tuning, outperforming a number of baselines. Moreover, AIO-P can directly transfer to new architectures not seen during training, accurately rank them and serve as an effective performance estimator when paired with an algorithm designed to preserve performance while reducing FLOPs.", + "primary_area": "machine learning iii", + "author": "Keith G. Mills; Di Niu; Mohammad Salameh; Weichen Qiu; Fred X. Han; Puyuan Liu; Jialin Zhang; Wei Lu; Shangling Jui", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, University of Alberta + Huawei Technologies, Edmonton, Alberta, Canada; Department of Electrical and Computer Engineering, University of Alberta; Huawei Technologies, Edmonton, Alberta, Canada; Department of Electrical and Computer Engineering, University of Alberta; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Kirin Solution, Shanghai, China; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Kirin Solution, Shanghai, China", + "bibtex": "@article{Mills_Niu_Salameh_Qiu_Han_Liu_Zhang_Lu_Jui_2023, title={AIO-P: Expanding Neural Performance Predictors beyond Image Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26101}, DOI={10.1609/aaai.v37i8.26101}, abstractNote={Evaluating neural network performance is critical to deep neural network design but a costly procedure. Neural predictors provide an efficient solution by treating architectures as samples and learning to estimate their performance on a given task. However, existing predictors are task-dependent, predominantly estimating neural network performance on image classification benchmarks. They are also search-space dependent; each predictor is designed to make predictions for a specific architecture search space with predefined topologies and set of operations. In this paper, we propose a novel All-in-One Predictor (AIO-P), which aims to pretrain neural predictors on architecture examples from multiple, separate computer vision (CV) task domains and multiple architecture spaces, and then transfer to unseen downstream CV tasks or neural architectures. We describe our proposed techniques for general graph representation, efficient predictor pretraining and knowledge infusion techniques, as well as methods to transfer to downstream tasks/spaces. Extensive experimental results show that AIO-P can achieve Mean Absolute Error (MAE) and Spearman\u2019s Rank Correlation (SRCC) below 1p% and above 0.5, respectively, on a breadth of target downstream CV tasks with or without fine-tuning, outperforming a number of baselines. Moreover, AIO-P can directly transfer to new architectures not seen during training, accurately rank them and serve as an effective performance estimator when paired with an algorithm designed to preserve performance while reducing FLOPs.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mills, Keith G. and Niu, Di and Salameh, Mohammad and Qiu, Weichen and Han, Fred X. and Liu, Puyuan and Zhang, Jialin and Lu, Wei and Jui, Shangling}, year={2023}, month={Jun.}, pages={9180-9189} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26101/25873", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26101", + "pdf_size": 384686, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15115435541755632498&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "ualberta.ca;ualberta.ca;huawei.com;ualberta.ca;huawei.com;huawei.com;hisilicon.com;hisilicon.com;huawei.com", + "email": "ualberta.ca;ualberta.ca;huawei.com;ualberta.ca;huawei.com;huawei.com;hisilicon.com;hisilicon.com;huawei.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0;1;0;1;1;2;1;2", + "aff_unique_norm": "University of Alberta;Huawei Technologies;Huawei", + "aff_unique_dep": "Department of Electrical and Computer Engineering;;Kirin Solution", + "aff_unique_url": "https://www.ualberta.ca;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "UAlberta;Huawei;Huawei", + "aff_campus_unique_index": "1;1;1;1;2;1;2", + "aff_campus_unique": ";Edmonton;Shanghai", + "aff_country_unique_index": "0+0;0;0;0;0;0;1;0;1", + "aff_country_unique": "Canada;China" + }, + { + "id": "article-26615", + "title": "AMOM: Adaptive Masking over Masking for Conditional Masked Language Model", + "track": "main", + "status": "Technical", + "abstract": "Transformer-based autoregressive (AR) methods have achieved appealing performance for varied sequence-to-sequence generation tasks, e.g., neural machine translation, summarization, and code generation, but suffer from low inference efficiency. To speed up the inference stage, many non-autoregressive (NAR) strategies have been proposed in the past few years. Among them, the conditional masked language model (CMLM) is one of the most versatile frameworks, as it can support many different sequence generation scenarios and achieve very competitive performance on these tasks. In this paper, we further introduce a simple yet effective adaptive masking over masking strategy to enhance the refinement capability of the decoder and make the encoder optimization easier. Experiments on 3 different tasks (neural machine translation, summarization, and code generation) with 15 datasets in total confirm that our proposed simple method achieves significant performance improvement over the strong CMLM model. Surprisingly, our proposed model yields state-of-the-art performance on neural machine translation (34.62 BLEU on WMT16 EN to RO, 34.82 BLEU on WMT16 RO to EN, and 34.84 BLEU on IWSLT De to En) and even better performance than the AR Transformer on 7 benchmark datasets with at least 2.2x speedup. Our code is available at GitHub.", + "primary_area": "speech natural language processing", + "author": "Yisheng Xiao; Ruiyang Xu; Lijun Wu; Juntao Li; Tao Qin; Tie-Yan Liu; Min Zhang", + "authorids": "", + "aff": "Institute of Computer Science and Technology, Soochow University; Institute of Computer Science and Technology, Soochow University; Microsoft Research Asia; Institute of Computer Science and Technology, Soochow University; Microsoft Research Asia; Microsoft Research Asia; Institute of Computer Science and Technology, Soochow University", + "bibtex": "@article{Xiao_Xu_Wu_Li_Qin_Liu_Zhang_2023, title={AMOM: Adaptive Masking over Masking for Conditional Masked Language Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26615}, DOI={10.1609/aaai.v37i11.26615}, abstractNote={Transformer-based autoregressive (AR) methods have achieved appealing performance for varied sequence-to-sequence generation tasks, e.g., neural machine translation, summarization, and code generation, but suffer from low inference efficiency. To speed up the inference stage, many non-autoregressive (NAR) strategies have been proposed in the past few years. Among them, the conditional masked language model (CMLM) is one of the most versatile frameworks, as it can support many different sequence generation scenarios and achieve very competitive performance on these tasks. In this paper, we further introduce a simple yet effective adaptive masking over masking strategy to enhance the refinement capability of the decoder and make the encoder optimization easier. Experiments on 3 different tasks (neural machine translation, summarization, and code generation) with 15 datasets in total confirm that our proposed simple method achieves significant performance improvement over the strong CMLM model. Surprisingly, our proposed model yields state-of-the-art performance on neural machine translation (34.62 BLEU on WMT16 EN to RO, 34.82 BLEU on WMT16 RO to EN, and 34.84 BLEU on IWSLT De to En) and even better performance than the AR Transformer on 7 benchmark datasets with at least 2.2x speedup. Our code is available at GitHub.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Yisheng and Xu, Ruiyang and Wu, Lijun and Li, Juntao and Qin, Tao and Liu, Tie-Yan and Zhang, Min}, year={2023}, month={Jun.}, pages={13789-13797} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26615/26387", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26615", + "pdf_size": 164777, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3985260990428713713&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.suda.edu.cn;stu.suda.edu.cn;microsoft.com;suda.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "email": "stu.suda.edu.cn;stu.suda.edu.cn;microsoft.com;suda.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/amom-nar/AMOM", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;1;1;0", + "aff_unique_norm": "Soochow University;Microsoft Research", + "aff_unique_dep": "Institute of Computer Science and Technology;Research", + "aff_unique_url": "https://eng.suda.edu.cn/;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "Soochow U;MSR Asia", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26571", + "title": "AUC Maximization for Low-Resource Named Entity Recognition", + "track": "main", + "status": "Technical", + "abstract": "Current work in named entity recognition (NER) uses either cross entropy (CE) or conditional random fields (CRF) as the objective/loss functions to optimize the underlying NER model. Both of these traditional objective functions for the NER problem generally produce adequate performance when the data distribution is balanced and there are sufficient annotated training examples. But since NER is inherently an imbalanced tagging problem, the model performance under the low-resource settings could suffer using these standard objective functions. Based on recent advances in area under the ROC curve (AUC) maximization, we propose to optimize the NER model by maximizing the AUC score. We give evidence that by simply combining two binary-classifiers that maximize the AUC score, significant performance improvement over traditional loss functions is achieved under low-resource NER settings. We also conduct extensive experiments to demonstrate the advantages of our method under the low-resource and highly-imbalanced data distribution settings. To the best of our knowledge, this is the first work that brings AUC maximization to the NER setting. Furthermore, we show that our method is agnostic to different types of NER embeddings, models and domains. The code of this work is available at https://github.com/dngu0061/NER-AUC-2T.", + "primary_area": "speech natural language processing", + "author": "Ngoc Dang Nguyen; Wei Tan; Lan Du; Wray Buntine; RIchard Beare; Changyou Chen", + "authorids": "", + "aff": "Department of Data Science and Artificial Intelligence, Monash University; Department of Data Science and Artificial Intelligence, Monash University; Department of Data Science and Artificial Intelligence, Monash University; College of Engineering and Computer Science, VinUniversity; Department of Data Science and Artificial Intelligence, Monash University; Department of Computer Science and Engineering, University at Buffalo", + "bibtex": "@article{Nguyen_Tan_Du_Buntine_Beare_Chen_2023, title={AUC Maximization for Low-Resource Named Entity Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26571}, DOI={10.1609/aaai.v37i11.26571}, abstractNote={Current work in named entity recognition (NER) uses either cross entropy (CE) or conditional random fields (CRF) as the objective/loss functions to optimize the underlying NER model. Both of these traditional objective functions for the NER problem generally produce adequate performance when the data distribution is balanced and there are sufficient annotated training examples. But since NER is inherently an imbalanced tagging problem, the model performance under the low-resource settings could suffer using these standard objective functions. Based on recent advances in area under the ROC curve (AUC) maximization, we propose to optimize the NER model by maximizing the AUC score. We give evidence that by simply combining two binary-classifiers that maximize the AUC score, significant performance improvement over traditional loss functions is achieved under low-resource NER settings. We also conduct extensive experiments to demonstrate the advantages of our method under the low-resource and highly-imbalanced data distribution settings. To the best of our knowledge, this is the first work that brings AUC maximization to the NER setting. Furthermore, we show that our method is agnostic to different types of NER embeddings, models and domains. The code of this work is available at https://github.com/dngu0061/NER-AUC-2T.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Ngoc Dang and Tan, Wei and Du, Lan and Buntine, Wray and Beare, RIchard and Chen, Changyou}, year={2023}, month={Jun.}, pages={13389-13399} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26571/26343", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26571", + "pdf_size": 1195681, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=774480959363276156&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/dngu0061/NER-AUC-2T", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;2", + "aff_unique_norm": "Monash University;VinUniversity;University at Buffalo", + "aff_unique_dep": "Department of Data Science and Artificial Intelligence;College of Engineering and Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.monash.edu;https://vinuni.edu.vn;https://www.buffalo.edu", + "aff_unique_abbr": "Monash;VinUni;UB", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Buffalo", + "aff_country_unique_index": "0;0;0;1;0;2", + "aff_country_unique": "Australia;Vietnam;United States" + }, + { + "id": "article-25078", + "title": "AVCAffe: A Large Scale Audio-Visual Dataset of Cognitive Load and Affect for Remote Work", + "track": "main", + "status": "Technical", + "abstract": "We introduce AVCAffe, the first Audio-Visual dataset consisting of Cognitive load and Affect attributes. We record AVCAffe by simulating remote work scenarios over a video-conferencing platform, where subjects collaborate to complete a number of cognitively engaging tasks. AVCAffe is the largest originally collected (not collected from the Internet) affective dataset in English language. We recruit 106 participants from 18 different countries of origin, spanning an age range of 18 to 57 years old, with a balanced male-female ratio. AVCAffe comprises a total of 108 hours of video, equivalent to more than 58,000 clips along with task-based self-reported ground truth labels for arousal, valence, and cognitive load attributes such as mental demand, temporal demand, effort, and a few others. We believe AVCAffe would be a challenging benchmark for the deep learning research community given the inherent difficulty of classifying affect and cognitive load in particular. Moreover, our dataset fills an existing timely gap by facilitating the creation of learning systems for better self-management of remote work meetings, and further study of hypotheses regarding the impact of remote work on cognitive load and affective states.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Pritam Sarkar; Aaron Posen; Ali Etemad", + "authorids": "", + "aff": "Queen\u2019s University, Canada + Vector Institute; Queen\u2019s University, Canada; Queen\u2019s University, Canada", + "bibtex": "@article{Sarkar_Posen_Etemad_2023, title={AVCAffe: A Large Scale Audio-Visual Dataset of Cognitive Load and Affect for Remote Work}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25078}, DOI={10.1609/aaai.v37i1.25078}, abstractNote={We introduce AVCAffe, the first Audio-Visual dataset consisting of Cognitive load and Affect attributes. We record AVCAffe by simulating remote work scenarios over a video-conferencing platform, where subjects collaborate to complete a number of cognitively engaging tasks. AVCAffe is the largest originally collected (not collected from the Internet) affective dataset in English language. We recruit 106 participants from 18 different countries of origin, spanning an age range of 18 to 57 years old, with a balanced male-female ratio. AVCAffe comprises a total of 108 hours of video, equivalent to more than 58,000 clips along with task-based self-reported ground truth labels for arousal, valence, and cognitive load attributes such as mental demand, temporal demand, effort, and a few others. We believe AVCAffe would be a challenging benchmark for the deep learning research community given the inherent difficulty of classifying affect and cognitive load in particular. Moreover, our dataset fills an existing timely gap by facilitating the creation of learning systems for better self-management of remote work meetings, and further study of hypotheses regarding the impact of remote work on cognitive load and affective states.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sarkar, Pritam and Posen, Aaron and Etemad, Ali}, year={2023}, month={Jun.}, pages={76-85} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25078/24850", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25078", + "pdf_size": 8879013, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3179024612677624335&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "queensu.ca;queensu.ca;queensu.ca", + "email": "queensu.ca;queensu.ca;queensu.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Queen's University;Vector Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.queensu.ca;https://vectorinstitute.ai/", + "aff_unique_abbr": "Queen's U;Vector Institute", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25766", + "title": "Abstract Argumentation Framework with Conditional Preferences", + "track": "main", + "status": "Technical", + "abstract": "Dung's abstract Argumentation Framework (AF) has emerged as a central formalism in the area of knowledge representation and reasoning.\nPreferences in AF allow to represent the comparative strength of arguments in a simple yet expressive way. \nPreference-based AF (PAF) has been proposed to extend AF with preferences of the form a > b, whose intuitive meaning is that argument a is better than b. \nIn this paper we generalize PAF by introducing conditional preferences of the form a > b \\leftarrow body that informally state that a is better than b whenever the condition expressed by body is true.\nThe resulting framework, namely Conditional Preference-based AF (CPAF), extends the PAF semantics under three well-known preference criteria, i.e. democratic, elitist, and KTV. \nAfter introducing CPAF, we study the complexity of the verification problem (deciding whether a set of arguments is a ``best'' extension) as well as of the credulous and skeptical acceptance problems (deciding whether a given argument belongs to any or all ``best'' extensions, respectively) under multiple-status semantics (that is, complete, preferred, stable, and semi-stable semantics) for the above-mentioned preference criteria.", + "primary_area": "knowledge representation and reasoning", + "author": "Gianvincenzo Alfano; Sergio Greco; Francesco Parisi; Irina Trubitsyna", + "authorids": "", + "aff": "Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Italy; Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Italy; Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Italy; Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Italy", + "bibtex": "@article{Alfano_Greco_Parisi_Trubitsyna_2023, title={Abstract Argumentation Framework with Conditional Preferences}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25766}, DOI={10.1609/aaai.v37i5.25766}, abstractNote={Dung\u2019s abstract Argumentation Framework (AF) has emerged as a central formalism in the area of knowledge representation and reasoning.\nPreferences in AF allow to represent the comparative strength of arguments in a simple yet expressive way. Preference-based AF (PAF) has been proposed to extend AF with preferences of the form a > b, whose intuitive meaning is that argument a is better than b. In this paper we generalize PAF by introducing conditional preferences of the form a > b \\leftarrow body that informally state that a is better than b whenever the condition expressed by body is true.\nThe resulting framework, namely Conditional Preference-based AF (CPAF), extends the PAF semantics under three well-known preference criteria, i.e. democratic, elitist, and KTV. After introducing CPAF, we study the complexity of the verification problem (deciding whether a set of arguments is a ``best\u2019\u2019 extension) as well as of the credulous and skeptical acceptance problems (deciding whether a given argument belongs to any or all ``best\u2019\u2019 extensions, respectively) under multiple-status semantics (that is, complete, preferred, stable, and semi-stable semantics) for the above-mentioned preference criteria.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alfano, Gianvincenzo and Greco, Sergio and Parisi, Francesco and Trubitsyna, Irina}, year={2023}, month={Jun.}, pages={6218-6227} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25766/25538", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25766", + "pdf_size": 186882, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4991971397364699699&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "dimes.unical.it;dimes.unical.it;dimes.unical.it;dimes.unical.it", + "email": "dimes.unical.it;dimes.unical.it;dimes.unical.it;dimes.unical.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Calabria", + "aff_unique_dep": "Department of Informatics, Modeling, Electronics and System Engineering", + "aff_unique_url": "https://www.unical.it", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26719", + "title": "Accelerating Inverse Learning via Intelligent Localization with Exploratory Sampling", + "track": "aaai special track", + "status": "Technical", + "abstract": "In the scope of \"AI for Science\", solving inverse problems is a longstanding challenge in materials and drug discovery, where the goal is to determine the hidden structures given a set of desirable properties. Deep generative models are recently proposed to solve inverse problems, but these are currently struggling in expensive forward operators, precisely localizing the exact solutions and fully exploring the parameter spaces without missing solutions. In this work, we propose a novel approach (called iPage) to accelerate the inverse learning process by leveraging probabilistic inference from deep invertible models and deterministic optimization via fast gradient descent. Given a target property, the learned invertible model provides a posterior over the parameter space; we identify these posterior samples as an intelligent prior initialization which enables us to narrow down the search space. We then perform gradient descent to calibrate the inverse solutions within a local region. Meanwhile, a space-filling sampling is imposed on the latent space to better explore and capture all possible solutions. We evaluate our approach on three benchmark tasks and create two datasets of real-world applications from quantum chemistry and additive manufacturing and find our method achieves superior performance compared to several state-of-the-art baseline methods. The iPage code is available at https://github.com/jxzhangjhu/MatDesINNe.", + "primary_area": "safe and robust ai", + "author": "Sirui Bi; Victor Fung; Jiaxin Zhang", + "authorids": "", + "aff": "Walmart Global Tech; Georgia Institute of Technology; Intuit AI Research", + "bibtex": "@article{Bi_Fung_Zhang_2023, title={Accelerating Inverse Learning via Intelligent Localization with Exploratory Sampling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26719}, DOI={10.1609/aaai.v37i12.26719}, abstractNote={In the scope of "AI for Science", solving inverse problems is a longstanding challenge in materials and drug discovery, where the goal is to determine the hidden structures given a set of desirable properties. Deep generative models are recently proposed to solve inverse problems, but these are currently struggling in expensive forward operators, precisely localizing the exact solutions and fully exploring the parameter spaces without missing solutions. In this work, we propose a novel approach (called iPage) to accelerate the inverse learning process by leveraging probabilistic inference from deep invertible models and deterministic optimization via fast gradient descent. Given a target property, the learned invertible model provides a posterior over the parameter space; we identify these posterior samples as an intelligent prior initialization which enables us to narrow down the search space. We then perform gradient descent to calibrate the inverse solutions within a local region. Meanwhile, a space-filling sampling is imposed on the latent space to better explore and capture all possible solutions. We evaluate our approach on three benchmark tasks and create two datasets of real-world applications from quantum chemistry and additive manufacturing and find our method achieves superior performance compared to several state-of-the-art baseline methods. The iPage code is available at https://github.com/jxzhangjhu/MatDesINNe.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bi, Sirui and Fung, Victor and Zhang, Jiaxin}, year={2023}, month={Jun.}, pages={14711-14719} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26719/26491", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26719", + "pdf_size": 2758981, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10586152775667448683&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gatech.edu;gmail.com", + "email": "gmail.com;gatech.edu;gmail.com", + "github": "https://github.com/jxzhangjhu/MatDesINNe", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Walmart Global Tech;Georgia Institute of Technology;Intuit", + "aff_unique_dep": ";;Intuit AI Research", + "aff_unique_url": "https://www.walmart.com/careers/globaltech;https://www.gatech.edu;https://intuit.com/", + "aff_unique_abbr": "Walmart GT;Georgia Tech;Intuit", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25246", + "title": "Accelerating the Training of Video Super-resolution Models", + "track": "main", + "status": "Technical", + "abstract": "Despite that convolution neural networks (CNN) have recently demonstrated high-quality reconstruction for video super-resolution (VSR), efficiently training competitive VSR models remains a challenging problem. It usually takes an order of magnitude more time than training their counterpart image models, leading to long research cycles. Existing VSR methods typically train models with fixed spatial and temporal sizes from beginning to end. The fixed sizes are usually set to large values for good performance, resulting to slow training. However, is such a rigid training strategy necessary for VSR? In this work, we show that it is possible to gradually train video models from small to large spatial/temporal sizes, \\ie, in an easy-to-hard manner. In particular, the whole training is divided into several stages and the earlier stage has smaller training spatial shape. Inside each stage, the temporal size also varies from short to long while the spatial size remains unchanged. Training is accelerated by such a multigrid training strategy, as most of computation is performed on smaller spatial and shorter temporal shapes. For further acceleration with GPU parallelization, we also investigate the large minibatch training without the loss in accuracy. Extensive experiments demonstrate that our method is capable of largely speeding up training (up to $6.2\\times$ speedup in wall-clock training time) without performance drop for various VSR models.", + "primary_area": "computer vision ii", + "author": "Lijian Lin; Xintao Wang; Zhongang Qi; Ying Shan", + "authorids": "", + "aff": "ARC Lab, Tencent PCG; ARC Lab, Tencent PCG; ARC Lab, Tencent PCG; ARC Lab, Tencent PCG", + "bibtex": "@article{Lin_Wang_Qi_Shan_2023, title={Accelerating the Training of Video Super-resolution Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25246}, DOI={10.1609/aaai.v37i2.25246}, abstractNote={Despite that convolution neural networks (CNN) have recently demonstrated high-quality reconstruction for video super-resolution (VSR), efficiently training competitive VSR models remains a challenging problem. It usually takes an order of magnitude more time than training their counterpart image models, leading to long research cycles. Existing VSR methods typically train models with fixed spatial and temporal sizes from beginning to end. The fixed sizes are usually set to large values for good performance, resulting to slow training. However, is such a rigid training strategy necessary for VSR? In this work, we show that it is possible to gradually train video models from small to large spatial/temporal sizes, \\ie, in an easy-to-hard manner. In particular, the whole training is divided into several stages and the earlier stage has smaller training spatial shape. Inside each stage, the temporal size also varies from short to long while the spatial size remains unchanged. Training is accelerated by such a multigrid training strategy, as most of computation is performed on smaller spatial and shorter temporal shapes. For further acceleration with GPU parallelization, we also investigate the large minibatch training without the loss in accuracy. Extensive experiments demonstrate that our method is capable of largely speeding up training (up to $6.2\\times$ speedup in wall-clock training time) without performance drop for various VSR models.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Lijian and Wang, Xintao and Qi, Zhongang and Shan, Ying}, year={2023}, month={Jun.}, pages={1595-1603} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25246/25018", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25246", + "pdf_size": 1695961, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13445757956799406848&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xmu.edu.cn;gmail.com;tencent.com;tencent.com", + "email": "stu.xmu.edu.cn;gmail.com;tencent.com;tencent.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "ARC Lab", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26321", + "title": "Acceleration of Large Transformer Model Training by Sensitivity-Based Layer Dropping", + "track": "main", + "status": "Technical", + "abstract": "Transformer models are widely used in AI applications such as Natural Language Processing (NLP), Computer Vision (CV), etc. However, enormous computation workload be-comes an obstacle to train large transformer models efficiently. Recently, some methods focus on reducing the computation workload during the training by skipping some layers. How-ever, these methods use simple probability distribution and coarse-grained probability calculation, which significantly affect the model accuracy. To address the issue, in this paper we propose a novel method to accelerate training\u2014Sensitivity-Based Layer Dropping (SBLD). SBLD uses lay-er-wise sensitivity data to switch on/off transformer layers in proper order to keep high accuracy. Besides, we adjust the probability of skipping transformer layers with a scheduler to accelerate training speed and get faster convergence. Our results show that SBLD solves the accuracy drop issue com-pared with prior layer dropping methods. Our SBLD method can decrease end-to-end training time by 19.67% during training of GPT-3 Medium model, the same time increasing the accuracy by 1.65% w.r.t. baseline. Furthermore, for SwinV2-L model the obtained Top-1 and Top-5 accuracies are also higher vs. the baseline. Thus, the proposed method is efficient and practical to improve the large transformer model training.", + "primary_area": "machine learning iv", + "author": "Yujie Zeng; Wenlong He; Ihor Vasyltsov; Jiali Pang; Lin Chen", + "authorids": "", + "aff": "Samsung R&D Institute China Xian; Samsung Advanced Institute of Technology; Samsung R&D Institute China Xian; Samsung R&D Institute China Xian; Samsung R&D Institute China Xian", + "bibtex": "@article{Zeng_He_Vasyltsov_Pang_Chen_2023, title={Acceleration of Large Transformer Model Training by Sensitivity-Based Layer Dropping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26321}, DOI={10.1609/aaai.v37i9.26321}, abstractNote={Transformer models are widely used in AI applications such as Natural Language Processing (NLP), Computer Vision (CV), etc. However, enormous computation workload be-comes an obstacle to train large transformer models efficiently. Recently, some methods focus on reducing the computation workload during the training by skipping some layers. How-ever, these methods use simple probability distribution and coarse-grained probability calculation, which significantly affect the model accuracy. To address the issue, in this paper we propose a novel method to accelerate training\u2014Sensitivity-Based Layer Dropping (SBLD). SBLD uses lay-er-wise sensitivity data to switch on/off transformer layers in proper order to keep high accuracy. Besides, we adjust the probability of skipping transformer layers with a scheduler to accelerate training speed and get faster convergence. Our results show that SBLD solves the accuracy drop issue com-pared with prior layer dropping methods. Our SBLD method can decrease end-to-end training time by 19.67% during training of GPT-3 Medium model, the same time increasing the accuracy by 1.65% w.r.t. baseline. Furthermore, for SwinV2-L model the obtained Top-1 and Top-5 accuracies are also higher vs. the baseline. Thus, the proposed method is efficient and practical to improve the large transformer model training.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Yujie and He, Wenlong and Vasyltsov, Ihor and Pang, Jiali and Chen, Lin}, year={2023}, month={Jun.}, pages={11156-11163} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26321/26093", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26321", + "pdf_size": 688275, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3228501166104080396&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com", + "email": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Samsung R&D Institute;Samsung Advanced Institute of Technology", + "aff_unique_dep": "R&D;", + "aff_unique_url": "https://www.samsung.com/cn;https://www.sait.samsung.com", + "aff_unique_abbr": "SRI-Xian;SAIT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Xian;", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "China;South Korea" + }, + { + "id": "article-26153", + "title": "Accommodating Audio Modality in CLIP for Multimodal Processing", + "track": "main", + "status": "Technical", + "abstract": "Multimodal processing has attracted much attention lately especially with the success of pre-training. However, the exploration has mainly focused on vision-language pre-training, as introducing more modalities can greatly complicate model design and optimization. In this paper, we extend the state-of-the-art Vision-Language model CLIP to accommodate the audio modality for Vision-Language-Audio multimodal processing. Specifically, we apply inter-modal and intra-modal contrastive learning to explore the correlation between audio and other modalities in addition to the inner characteristics of the audio modality. Moreover, we further design an audio type token to dynamically learn different audio information type for different scenarios, as both verbal and nonverbal heterogeneous information is conveyed in general audios. Our proposed CLIP4VLA model is validated in different downstream tasks including video retrieval and video captioning, and achieves the state-of-the-art performance on the benchmark datasets of MSR-VTT, VATEX, and Audiocaps.The corresponding code and checkpoints will be released at https://github.com/ludanruan/CLIP4VLA.", + "primary_area": "machine learning iii", + "author": "Ludan Ruan; Anwen Hu; Yuqing Song; Liang Zhang; Sipeng Zheng; Qin Jin", + "authorids": "", + "aff": "School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China", + "bibtex": "@article{Ruan_Hu_Song_Zhang_Zheng_Jin_2023, title={Accommodating Audio Modality in CLIP for Multimodal Processing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26153}, DOI={10.1609/aaai.v37i8.26153}, abstractNote={Multimodal processing has attracted much attention lately especially with the success of pre-training. However, the exploration has mainly focused on vision-language pre-training, as introducing more modalities can greatly complicate model design and optimization. In this paper, we extend the state-of-the-art Vision-Language model CLIP to accommodate the audio modality for Vision-Language-Audio multimodal processing. Specifically, we apply inter-modal and intra-modal contrastive learning to explore the correlation between audio and other modalities in addition to the inner characteristics of the audio modality. Moreover, we further design an audio type token to dynamically learn different audio information type for different scenarios, as both verbal and nonverbal heterogeneous information is conveyed in general audios. Our proposed CLIP4VLA model is validated in different downstream tasks including video retrieval and video captioning, and achieves the state-of-the-art performance on the benchmark datasets of MSR-VTT, VATEX, and Audiocaps.The corresponding code and checkpoints will be released at https://github.com/ludanruan/CLIP4VLA.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ruan, Ludan and Hu, Anwen and Song, Yuqing and Zhang, Liang and Zheng, Sipeng and Jin, Qin}, year={2023}, month={Jun.}, pages={9641-9649} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26153/25925", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26153", + "pdf_size": 1712437, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9317148094068407682&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/ludanruan/CLIP4VLA", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "School of Information", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26806", + "title": "Accountability Layers: Explaining Complex System Failures by Parts", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "With the rise of AI used for critical decision-making, many important predictions are made by complex and opaque AI algorithms. The aim of eXplainable Artificial Intelligence (XAI) is to make these opaque decision-making algorithms more transparent and trustworthy. This is often done by constructing an ``explainable model'' for a single modality or subsystem. However, this approach fails for complex systems that are made out of multiple parts. In this paper, I discuss how to explain complex system failures. I represent a complex machine as a hierarchical model of introspective sub-systems working together towards a common goal. The subsystems communicate in a common symbolic language. This work creates a set of explanatory accountability layers for trustworthy AI.", + "primary_area": "", + "author": "Leilani H. Gilpin", + "authorids": "", + "aff": "UC Santa Cruz", + "bibtex": "@article{Gilpin_2024, title={Accountability Layers: Explaining Complex System Failures by Parts}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26806}, DOI={10.1609/aaai.v37i13.26806}, abstractNote={With the rise of AI used for critical decision-making, many important predictions are made by complex and opaque AI algorithms. The aim of eXplainable Artificial Intelligence (XAI) is to make these opaque decision-making algorithms more transparent and trustworthy. This is often done by constructing an ``explainable model\u2019\u2019 for a single modality or subsystem. However, this approach fails for complex systems that are made out of multiple parts. In this paper, I discuss how to explain complex system failures. I represent a complex machine as a hierarchical model of introspective sub-systems working together towards a common goal. The subsystems communicate in a common symbolic language. This work creates a set of explanatory accountability layers for trustworthy AI.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gilpin, Leilani H.}, year={2024}, month={Jul.}, pages={15439-15439} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26806/26578", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26806", + "pdf_size": 44869, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:gY_G04vedXUJ:scholar.google.com/&scioq=Accountability+Layers:+Explaining+Complex+System+Failures+by+Parts&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "ucsc.edu", + "email": "ucsc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of California, Santa Cruz", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsc.edu", + "aff_unique_abbr": "UCSC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Santa Cruz", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26834", + "title": "Accurate Detection of Weld Seams for Laser Welding in Real-World Manufacturing", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Welding is a fabrication process used to join or fuse two mechanical parts. Modern welding machines have automated lasers that follow a pre-defined weld seam path between the two parts to create a bond. Previous efforts have used simple computer vision edge detectors to automatically detect the weld seam edge on an image at the junction of two metals to be welded. However, these systems lack reliability and accuracy resulting in manual human verification of the detected edges. This paper presents a neural network architecture that automatically detects the weld seam edge between two metals with high accuracy. We augment this system with a pre-classifier that filters out anomalous workpieces (e.g., incorrect placement). Finally, we justify our design choices by evaluating against several existing deep network pipelines as well as proof through real-world use. We also describe in detail the process of deploying this system in a real-world shop floor including evaluation and monitoring. We make public a large, well-labeled laser seam dataset to perform deep learning-based edge detection in industrial settings.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Rabia Ali; Muhammad Sarmad; Jawad Tayyub; Alexander Vogel", + "authorids": "", + "aff": "Norwegian University Of Science and Technology, Trondheim, Norway; Norwegian University Of Science and Technology, Trondheim, Norway; Endress + Hauser, Germany; Endress + Hauser, Germany", + "bibtex": "@article{Ali_Sarmad_Tayyub_Vogel_2024, title={Accurate Detection of Weld Seams for Laser Welding in Real-World Manufacturing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26834}, DOI={10.1609/aaai.v37i13.26834}, abstractNote={Welding is a fabrication process used to join or fuse two mechanical parts. Modern welding machines have automated lasers that follow a pre-defined weld seam path between the two parts to create a bond. Previous efforts have used simple computer vision edge detectors to automatically detect the weld seam edge on an image at the junction of two metals to be welded. However, these systems lack reliability and accuracy resulting in manual human verification of the detected edges. This paper presents a neural network architecture that automatically detects the weld seam edge between two metals with high accuracy. We augment this system with a pre-classifier that filters out anomalous workpieces (e.g., incorrect placement). Finally, we justify our design choices by evaluating against several existing deep network pipelines as well as proof through real-world use. We also describe in detail the process of deploying this system in a real-world shop floor including evaluation and monitoring. We make public a large, well-labeled laser seam dataset to perform deep learning-based edge detection in industrial settings.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Rabia and Sarmad, Muhammad and Tayyub, Jawad and Vogel, Alexander}, year={2024}, month={Jul.}, pages={15468-15475} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26834/26606", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26834", + "pdf_size": 2456747, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12235928974443822394&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;ntnu.no;endress.com;endress.com", + "email": "gmail.com;ntnu.no;endress.com;endress.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+2;1+2", + "aff_unique_norm": "Norwegian University of Science and Technology;Endress;Hauser", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ntnu.no;;", + "aff_unique_abbr": "NTNU;;", + "aff_campus_unique_index": "0;0;;", + "aff_campus_unique": "Trondheim;", + "aff_country_unique_index": "0;0;2;2", + "aff_country_unique": "Norway;;Germany" + }, + { + "id": "article-26674", + "title": "Accurate Fairness: Improving Individual Fairness without Trading Accuracy", + "track": "aaai special track", + "status": "Technical", + "abstract": "Accuracy and individual fairness are both crucial for trustworthy machine learning, but these two aspects are often incompatible with each other so that enhancing one aspect may sacrifice the other inevitably with side effects of true bias or false fairness. We propose in this paper a new fairness criterion, accurate fairness, to align individual fairness with accuracy. Informally, it requires the treatments of an individual and the individual's similar counterparts to conform to a uniform target, i.e., the ground truth of the individual. We prove that accurate fairness also implies typical group fairness criteria over a union of similar sub-populations. We then present a Siamese fairness in-processing approach to minimize the accuracy and fairness losses of a machine learning model under the accurate fairness constraints. To the best of our knowledge, this is the first time that a Siamese approach is adapted for bias mitigation. We also propose fairness confusion matrix-based metrics, fair-precision, fair-recall, and fair-F1 score, to quantify a trade-off between accuracy and individual fairness. Comparative case studies with popular fairness datasets show that our Siamese fairness approach can achieve on average 1.02%-8.78% higher individual fairness (in terms of fairness through awareness) and 8.38%-13.69% higher accuracy, as well as 10.09%-20.57% higher true fair rate, and 5.43%-10.01% higher fair-F1 score, than the state-of-the-art bias mitigation techniques. This demonstrates that our Siamese fairness approach can indeed improve individual fairness without trading accuracy. Finally, the accurate fairness criterion and Siamese fairness approach are applied to mitigate the possible service discrimination with a real Ctrip dataset, by on average fairly serving 112.33% more customers (specifically, 81.29% more customers in an accurately fair way) than baseline models.", + "primary_area": "ai for social impact", + "author": "Xuran Li; Peng Wu; Jing Su", + "authorids": "", + "aff": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China + University of Chinese Academy of Sciences, Beijing, China; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China + University of Chinese Academy of Sciences, Beijing, China; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China + University of Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Li_Wu_Su_2023, title={Accurate Fairness: Improving Individual Fairness without Trading Accuracy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26674}, DOI={10.1609/aaai.v37i12.26674}, abstractNote={Accuracy and individual fairness are both crucial for trustworthy machine learning, but these two aspects are often incompatible with each other so that enhancing one aspect may sacrifice the other inevitably with side effects of true bias or false fairness. We propose in this paper a new fairness criterion, accurate fairness, to align individual fairness with accuracy. Informally, it requires the treatments of an individual and the individual\u2019s similar counterparts to conform to a uniform target, i.e., the ground truth of the individual. We prove that accurate fairness also implies typical group fairness criteria over a union of similar sub-populations. We then present a Siamese fairness in-processing approach to minimize the accuracy and fairness losses of a machine learning model under the accurate fairness constraints. To the best of our knowledge, this is the first time that a Siamese approach is adapted for bias mitigation. We also propose fairness confusion matrix-based metrics, fair-precision, fair-recall, and fair-F1 score, to quantify a trade-off between accuracy and individual fairness. Comparative case studies with popular fairness datasets show that our Siamese fairness approach can achieve on average 1.02%-8.78% higher individual fairness (in terms of fairness through awareness) and 8.38%-13.69% higher accuracy, as well as 10.09%-20.57% higher true fair rate, and 5.43%-10.01% higher fair-F1 score, than the state-of-the-art bias mitigation techniques. This demonstrates that our Siamese fairness approach can indeed improve individual fairness without trading accuracy. Finally, the accurate fairness criterion and Siamese fairness approach are applied to mitigate the possible service discrimination with a real Ctrip dataset, by on average fairly serving 112.33% more customers (specifically, 81.29% more customers in an accurately fair way) than baseline models.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xuran and Wu, Peng and Su, Jing}, year={2023}, month={Jun.}, pages={14312-14320} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26674/26446", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26674", + "pdf_size": 257369, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=548851104867158675&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ios.ac.cn;ios.ac.cn;ios.ac.cn", + "email": "ios.ac.cn;ios.ac.cn;ios.ac.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Software;", + "aff_unique_url": "http://www.ios.ac.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25826", + "title": "Achieving Zero Constraint Violation for Constrained Reinforcement Learning via Conservative Natural Policy Gradient Primal-Dual Algorithm", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of constrained Markov decision process (CMDP) in continuous state actions spaces where the goal is to maximize the expected cumulative reward subject to some constraints. We propose a novel Conservative Natural Policy Gradient Primal Dual Algorithm (CNPGPD) to achieve zero constraint violation while achieving state of the art convergence results for the objective value function. For general policy parametrization, we prove convergence of value function to global optimal upto an approximation error due to restricted policy class. We improve the sample complexity of existing constrained NPGPD algorithm. To the best of our knowledge, this is the first work to establish zero constraint violation with Natural policy gradient style algorithms for infinite horizon discounted CMDPs. We demonstrate the merits of proposed algorithm via experimental evaluations.", + "primary_area": "machine learning i", + "author": "Qinbo Bai; Amrit Singh Bedi; Vaneet Aggarwal", + "authorids": "", + "aff": "Purdue University; University of Maryland; Purdue University", + "bibtex": "@article{Bai_Singh Bedi_Aggarwal_2023, title={Achieving Zero Constraint Violation for Constrained Reinforcement Learning via Conservative Natural Policy Gradient Primal-Dual Algorithm}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25826}, DOI={10.1609/aaai.v37i6.25826}, abstractNote={We consider the problem of constrained Markov decision process (CMDP) in continuous state actions spaces where the goal is to maximize the expected cumulative reward subject to some constraints. We propose a novel Conservative Natural Policy Gradient Primal Dual Algorithm (CNPGPD) to achieve zero constraint violation while achieving state of the art convergence results for the objective value function. For general policy parametrization, we prove convergence of value function to global optimal upto an approximation error due to restricted policy class. We improve the sample complexity of existing constrained NPGPD algorithm. To the best of our knowledge, this is the first work to establish zero constraint violation with Natural policy gradient style algorithms for infinite horizon discounted CMDPs. We demonstrate the merits of proposed algorithm via experimental evaluations.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Qinbo and Singh Bedi, Amrit and Aggarwal, Vaneet}, year={2023}, month={Jun.}, pages={6737-6744} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25826/25598", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25826", + "pdf_size": 199267, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9012852044066200005&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "purdue.edu; ;purdue.edu", + "email": "purdue.edu; ;purdue.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Purdue University;University of Maryland", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.purdue.edu;https://www/umd.edu", + "aff_unique_abbr": "Purdue;UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25308", + "title": "Action-Conditioned Generation of Bimanual Object Manipulation Sequences", + "track": "main", + "status": "Technical", + "abstract": "The generation of bimanual object manipulation sequences given a semantic action label has broad applications in collaborative robots or augmented reality. This relatively new problem differs from existing works that generate whole-body motions without any object interaction as it now requires the model to additionally learn the spatio-temporal relationship that exists between the human joints and object motion given said label. To tackle this task, we leverage the varying degree each muscle or joint is involved during object manipulation. For instance, the wrists act as the prime movers for the objects while the finger joints are angled to provide a firm grip. The remaining body joints are the least involved in that they are positioned as naturally and comfortably as possible. We thus design an architecture that comprises 3 main components: (i) a graph recurrent network that generates the wrist and object motion, (ii) an attention-based recurrent network that estimates the required finger joint angles given the graph configuration, and (iii) a recurrent network that reconstructs the body pose given the locations of the wrist. We evaluate our approach on the KIT Motion Capture and KIT RGBD Bimanual Manipulation datasets and show improvements over a simplified approach that treats the entire body as a single entity, and existing whole-body-only methods.", + "primary_area": "computer vision ii", + "author": "Haziq Razali; Yiannis Demiris", + "authorids": "", + "aff": "Personal Robotics Lab, Dept. of Electrical and Electronic Engineering, Imperial College London; Personal Robotics Lab, Dept. of Electrical and Electronic Engineering, Imperial College London", + "bibtex": "@article{Razali_Demiris_2023, title={Action-Conditioned Generation of Bimanual Object Manipulation Sequences}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25308}, DOI={10.1609/aaai.v37i2.25308}, abstractNote={The generation of bimanual object manipulation sequences given a semantic action label has broad applications in collaborative robots or augmented reality. This relatively new problem differs from existing works that generate whole-body motions without any object interaction as it now requires the model to additionally learn the spatio-temporal relationship that exists between the human joints and object motion given said label. To tackle this task, we leverage the varying degree each muscle or joint is involved during object manipulation. For instance, the wrists act as the prime movers for the objects while the finger joints are angled to provide a firm grip. The remaining body joints are the least involved in that they are positioned as naturally and comfortably as possible. We thus design an architecture that comprises 3 main components: (i) a graph recurrent network that generates the wrist and object motion, (ii) an attention-based recurrent network that estimates the required finger joint angles given the graph configuration, and (iii) a recurrent network that reconstructs the body pose given the locations of the wrist. We evaluate our approach on the KIT Motion Capture and KIT RGBD Bimanual Manipulation datasets and show improvements over a simplified approach that treats the entire body as a single entity, and existing whole-body-only methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Razali, Haziq and Demiris, Yiannis}, year={2023}, month={Jun.}, pages={2146-2154} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25308/25080", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25308", + "pdf_size": 8946575, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14099094782920486473&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "imperial.ac.uk;imperial.ac.uk", + "email": "imperial.ac.uk;imperial.ac.uk", + "github": "", + "project": "www.imperial.ac.uk/personal-robotics/software", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Imperial College London", + "aff_unique_dep": "Dept. of Electrical and Electronic Engineering", + "aff_unique_url": "https://www.imperial.ac.uk", + "aff_unique_abbr": "ICL", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25243", + "title": "Actional Atomic-Concept Learning for Demystifying Vision-Language Navigation", + "track": "main", + "status": "Technical", + "abstract": "Vision-Language Navigation (VLN) is a challenging task which requires an agent to align complex visual observations to language instructions to reach the goal position. Most existing VLN agents directly learn to align the raw directional features and visual features trained using one-hot labels to linguistic instruction features. However, the big semantic gap among these multi-modal inputs makes the alignment difficult and therefore limits the navigation performance. In this paper, we propose Actional Atomic-Concept Learning (AACL), which maps visual observations to actional atomic concepts for facilitating the alignment. Specifically, an actional atomic concept is a natural language phrase containing an atomic action and an object, e.g., ``go up stairs''. These actional atomic concepts, which serve as the bridge between observations and instructions, can effectively mitigate the semantic gap and simplify the alignment. AACL contains three core components: 1) a concept mapping module to map the observations to the actional atomic concept representations through the VLN environment and the recently proposed Contrastive Language-Image Pretraining (CLIP) model, 2) a concept refining adapter to encourage more instruction-oriented object concept extraction by re-ranking the predicted object concepts by CLIP, and 3) an observation co-embedding module which utilizes concept representations to regularize the observation representations. Our AACL establishes new state-of-the-art results on both fine-grained (R2R) and high-level (REVERIE and R2R-Last) VLN benchmarks. Moreover, the visualization shows that AACL significantly improves the interpretability in action decision. Code will be available at https://gitee.com/mindspore/models/tree/master/research/cv/VLN-AACL.", + "primary_area": "computer vision ii", + "author": "Bingqian Lin; Yi Zhu; Xiaodan Liang; Liang Lin; Jianzhuang Liu", + "authorids": "", + "aff": "Shenzhen Campus of Sun Yat-sen University; Huawei Noah\u2019s Ark Lab; PengCheng Laboratory + Shenzhen Campus of Sun Yat-sen University; Sun Yat-sen University; Huawei Noah\u2019s Ark Lab", + "bibtex": "@article{Lin_Zhu_Liang_Lin_Liu_2023, title={Actional Atomic-Concept Learning for Demystifying Vision-Language Navigation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25243}, DOI={10.1609/aaai.v37i2.25243}, abstractNote={Vision-Language Navigation (VLN) is a challenging task which requires an agent to align complex visual observations to language instructions to reach the goal position. Most existing VLN agents directly learn to align the raw directional features and visual features trained using one-hot labels to linguistic instruction features. However, the big semantic gap among these multi-modal inputs makes the alignment difficult and therefore limits the navigation performance. In this paper, we propose Actional Atomic-Concept Learning (AACL), which maps visual observations to actional atomic concepts for facilitating the alignment. Specifically, an actional atomic concept is a natural language phrase containing an atomic action and an object, e.g., ``go up stairs\u2019\u2019. These actional atomic concepts, which serve as the bridge between observations and instructions, can effectively mitigate the semantic gap and simplify the alignment. AACL contains three core components: 1) a concept mapping module to map the observations to the actional atomic concept representations through the VLN environment and the recently proposed Contrastive Language-Image Pretraining (CLIP) model, 2) a concept refining adapter to encourage more instruction-oriented object concept extraction by re-ranking the predicted object concepts by CLIP, and 3) an observation co-embedding module which utilizes concept representations to regularize the observation representations. Our AACL establishes new state-of-the-art results on both fine-grained (R2R) and high-level (REVERIE and R2R-Last) VLN benchmarks. Moreover, the visualization shows that AACL significantly improves the interpretability in action decision. Code will be available at https://gitee.com/mindspore/models/tree/master/research/cv/VLN-AACL.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Bingqian and Zhu, Yi and Liang, Xiaodan and Lin, Liang and Liu, Jianzhuang}, year={2023}, month={Jun.}, pages={1568-1576} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25243/25015", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25243", + "pdf_size": 849214, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11358184427216483736&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail2.sysu.edu.cn;huawei.com;mail.sysu.edu.cn;ieee.org;huawei.com", + "email": "mail2.sysu.edu.cn;huawei.com;mail.sysu.edu.cn;ieee.org;huawei.com", + "github": "", + "project": "https://gitee.com/mindspore/models/tree/master/research/cv/VLN-AACL", + "author_num": 5, + "aff_unique_index": "0;1;2+0;0;1", + "aff_unique_norm": "Sun Yat-sen University;Huawei;PengCheng Laboratory", + "aff_unique_dep": ";Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.sysu.edu.cn/;https://www.huawei.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "SYSU;Huawei;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25237", + "title": "Actionness Inconsistency-Guided Contrastive Learning for Weakly-Supervised Temporal Action Localization", + "track": "main", + "status": "Technical", + "abstract": "Weakly-supervised temporal action localization (WTAL) aims to detect action instances given only video-level labels. To address the challenge, recent methods commonly employ a two-branch framework, consisting of a class-aware branch and a class-agnostic branch. In principle, the two branches are supposed to produce the same actionness activation. However, we observe that there are actually many inconsistent activation regions. These inconsistent regions usually contain some challenging segments whose semantic information (action or background) is ambiguous. In this work, we propose a novel Actionness Inconsistency-guided Contrastive Learning (AICL) method which utilizes the consistent segments to boost the representation learning of the inconsistent segments. Specifically, we first define the consistent and inconsistent segments by comparing the predictions of two branches and then construct positive and negative pairs between consistent segments and inconsistent segments for contrastive learning. In addition, to avoid the trivial case where there is no consistent sample, we introduce an action consistency constraint to control the difference between the two branches. We conduct extensive experiments on THUMOS14, ActivityNet v1.2, and ActivityNet v1.3 datasets, and the results show the effectiveness of AICL with state-of-the-art performance. Our code is available at https://github.com/lizhilin-ustc/AAAI2023-AICL.", + "primary_area": "computer vision ii", + "author": "Zhilin Li; Zilei Wang; Qinying Liu", + "authorids": "", + "aff": "University of Science and Technology of China, Hefei, China; University of Science and Technology of China, Hefei, China; University of Science and Technology of China, Hefei, China", + "bibtex": "@article{Li_Wang_Liu_2023, title={Actionness Inconsistency-Guided Contrastive Learning for Weakly-Supervised Temporal Action Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25237}, DOI={10.1609/aaai.v37i2.25237}, abstractNote={Weakly-supervised temporal action localization (WTAL) aims to detect action instances given only video-level labels. To address the challenge, recent methods commonly employ a two-branch framework, consisting of a class-aware branch and a class-agnostic branch. In principle, the two branches are supposed to produce the same actionness activation. However, we observe that there are actually many inconsistent activation regions. These inconsistent regions usually contain some challenging segments whose semantic information (action or background) is ambiguous. In this work, we propose a novel Actionness Inconsistency-guided Contrastive Learning (AICL) method which utilizes the consistent segments to boost the representation learning of the inconsistent segments. Specifically, we first define the consistent and inconsistent segments by comparing the predictions of two branches and then construct positive and negative pairs between consistent segments and inconsistent segments for contrastive learning. In addition, to avoid the trivial case where there is no consistent sample, we introduce an action consistency constraint to control the difference between the two branches. We conduct extensive experiments on THUMOS14, ActivityNet v1.2, and ActivityNet v1.3 datasets, and the results show the effectiveness of AICL with state-of-the-art performance. Our code is available at https://github.com/lizhilin-ustc/AAAI2023-AICL.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zhilin and Wang, Zilei and Liu, Qinying}, year={2023}, month={Jun.}, pages={1513-1521} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25237/25009", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25237", + "pdf_size": 652969, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17980625581922023766&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn", + "github": "https://github.com/lizhilin-ustc/AAAI2023-AICL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hefei", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25376", + "title": "Active Token Mixer", + "track": "main", + "status": "Technical", + "abstract": "The three existing dominant network families, i.e., CNNs, Transformers and MLPs, differ from each other mainly in the ways of fusing spatial contextual information, leaving designing more effective token-mixing mechanisms at the core of backbone architecture development. In this work, we propose an innovative token-mixer, dubbed Active Token Mixer (ATM), to actively incorporate contextual information from other tokens in the global scope into the given query token. This fundamental operator actively predicts where to capture useful contexts and learns how to fuse the captured contexts with the query token at channel level. In this way, the spatial range of token-mixing can be expanded to a global scope with limited computational complexity, where the way of token-mixing is reformed. We take ATMs as the primary operators and assemble them into a cascade architecture, dubbed ATMNet. Extensive experiments demonstrate that ATMNet is generally applicable and comprehensively surpasses different families of SOTA vision backbones by a clear margin on a broad range of vision tasks, including visual recognition and dense prediction tasks. Code is available at https://github.com/microsoft/ActiveMLP.", + "primary_area": "computer vision iii", + "author": "Guoqiang Wei; Zhizheng Zhang; Cuiling Lan; Yan Lu; Zhibo Chen", + "authorids": "", + "aff": "University of Science and Technology of China; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; University of Science and Technology of China", + "bibtex": "@article{Wei_Zhang_Lan_Lu_Chen_2023, title={Active Token Mixer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25376}, DOI={10.1609/aaai.v37i3.25376}, abstractNote={The three existing dominant network families, i.e., CNNs, Transformers and MLPs, differ from each other mainly in the ways of fusing spatial contextual information, leaving designing more effective token-mixing mechanisms at the core of backbone architecture development. In this work, we propose an innovative token-mixer, dubbed Active Token Mixer (ATM), to actively incorporate contextual information from other tokens in the global scope into the given query token. This fundamental operator actively predicts where to capture useful contexts and learns how to fuse the captured contexts with the query token at channel level. In this way, the spatial range of token-mixing can be expanded to a global scope with limited computational complexity, where the way of token-mixing is reformed. We take ATMs as the primary operators and assemble them into a cascade architecture, dubbed ATMNet. Extensive experiments demonstrate that ATMNet is generally applicable and comprehensively surpasses different families of SOTA vision backbones by a clear margin on a broad range of vision tasks, including visual recognition and dense prediction tasks. Code is available at https://github.com/microsoft/ActiveMLP.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wei, Guoqiang and Zhang, Zhizheng and Lan, Cuiling and Lu, Yan and Chen, Zhibo}, year={2023}, month={Jun.}, pages={2759-2767} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25376/25148", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25376", + "pdf_size": 1441567, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5327205768073110545&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn", + "email": "mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn", + "github": "https://github.com/microsoft/ActiveMLP", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "USTC;MSR Asia", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26033", + "title": "AdaBoost.C2: Boosting Classifiers Chains for Multi-Label Classification", + "track": "main", + "status": "Technical", + "abstract": "During the last decades, multi-label classification (MLC) has attracted the attention of more and more researchers due to its wide real-world applications. Many boosting methods for MLC have been proposed and achieved great successes. However, these methods only extend existing boosting frameworks to MLC and take loss functions in multi-label version to guide the iteration. These loss functions generally give a comprehensive evaluation on the label set entirety, and thus the characteristics of different labels are ignored. In this paper, we propose a multi-path AdaBoost framework specific to MLC, where each boosting path is established for distinct label and the combination of them is able to provide a maximum optimization to Hamming Loss. In each iteration, classifiers chain is taken as the base classifier to strengthen the connection between multiple AdaBoost paths and exploit the label correlation. Extensive experiments demonstrate the effectiveness of the proposed method.", + "primary_area": "machine learning ii", + "author": "Jiaxuan Li; Xiaoyan Zhu; Jiayin Wang", + "authorids": "", + "aff": "School of Computer Science and Technology, Xi\u2019an Jiaotong University, Xi\u2019an, China; School of Computer Science and Technology, Xi\u2019an Jiaotong University, Xi\u2019an, China; School of Computer Science and Technology, Xi\u2019an Jiaotong University, Xi\u2019an, China", + "bibtex": "@article{Li_Zhu_Wang_2023, title={AdaBoost.C2: Boosting Classifiers Chains for Multi-Label Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26033}, DOI={10.1609/aaai.v37i7.26033}, abstractNote={During the last decades, multi-label classification (MLC) has attracted the attention of more and more researchers due to its wide real-world applications. Many boosting methods for MLC have been proposed and achieved great successes. However, these methods only extend existing boosting frameworks to MLC and take loss functions in multi-label version to guide the iteration. These loss functions generally give a comprehensive evaluation on the label set entirety, and thus the characteristics of different labels are ignored. In this paper, we propose a multi-path AdaBoost framework specific to MLC, where each boosting path is established for distinct label and the combination of them is able to provide a maximum optimization to Hamming Loss. In each iteration, classifiers chain is taken as the base classifier to strengthen the connection between multiple AdaBoost paths and exploit the label correlation. Extensive experiments demonstrate the effectiveness of the proposed method.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jiaxuan and Zhu, Xiaoyan and Wang, Jiayin}, year={2023}, month={Jun.}, pages={8580-8587} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26033/25805", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26033", + "pdf_size": 944054, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17248660995614107796&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xjtu.edu.cn;xjtu.edu.cn;mail.xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;xjtu.edu.cn;mail.xjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Xi'an Jiaotong University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "https://www.xjtu.edu.cn", + "aff_unique_abbr": "XJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25248", + "title": "AdaCM: Adaptive ColorMLP for Real-Time Universal Photo-Realistic Style Transfer", + "track": "main", + "status": "Technical", + "abstract": "Photo-realistic style transfer aims at migrating the artistic style from an exemplar style image to a content image, producing a result image without spatial distortions or unrealistic artifacts. Impressive results have been achieved by recent deep models. However, deep neural network based methods are too expensive to run in real-time. Meanwhile, bilateral grid based methods are much faster but still contain artifacts like overexposure. In this work, we propose the Adaptive ColorMLP (AdaCM), an effective and efficient framework for universal photo-realistic style transfer. First, we find the complex non-linear color mapping between input and target domain can be efficiently modeled by a small multi-layer perceptron (ColorMLP) model. Then, in AdaCM, we adopt a CNN encoder to adaptively predict all parameters for the ColorMLP conditioned on each input content and style image pair. Experimental results demonstrate that AdaCM can generate vivid and high-quality stylization results. Meanwhile, our AdaCM is ultrafast and can process a 4K resolution image in 6ms on one V100 GPU.", + "primary_area": "computer vision ii", + "author": "Tianwei Lin; Honglin Lin; Fu Li; Dongliang He; Wenhao Wu; Meiling Wang; Xin Li; Yong Liu", + "authorids": "", + "aff": "Baidu Inc.+Zhejiang University; Zhejiang University+The University of Sydney; Baidu Inc.; Baidu Inc.; The University of Sydney+Baidu Inc.; Baidu Inc.; Baidu Inc.; Zhejiang University", + "bibtex": "@article{Lin_Lin_Li_He_Wu_Wang_Li_Liu_2023, title={AdaCM: Adaptive ColorMLP for Real-Time Universal Photo-Realistic Style Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25248}, DOI={10.1609/aaai.v37i2.25248}, abstractNote={Photo-realistic style transfer aims at migrating the artistic style from an exemplar style image to a content image, producing a result image without spatial distortions or unrealistic artifacts. Impressive results have been achieved by recent deep models. However, deep neural network based methods are too expensive to run in real-time. Meanwhile, bilateral grid based methods are much faster but still contain artifacts like overexposure. In this work, we propose the Adaptive ColorMLP (AdaCM), an effective and efficient framework for universal photo-realistic style transfer. First, we find the complex non-linear color mapping between input and target domain can be efficiently modeled by a small multi-layer perceptron (ColorMLP) model. Then, in AdaCM, we adopt a CNN encoder to adaptively predict all parameters for the ColorMLP conditioned on each input content and style image pair. Experimental results demonstrate that AdaCM can generate vivid and high-quality stylization results. Meanwhile, our AdaCM is ultrafast and can process a 4K resolution image in 6ms on one V100 GPU.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Tianwei and Lin, Honglin and Li, Fu and He, Dongliang and Wu, Wenhao and Wang, Meiling and Li, Xin and Liu, Yong}, year={2023}, month={Jun.}, pages={1613-1621} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25248/25020", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25248", + "pdf_size": 9886814, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6106807978728567571&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;zju.edu.cn; ; ; ; ; ; ", + "email": "gmail.com;zju.edu.cn; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1+2;0;0;2+0;0;0;1", + "aff_unique_norm": "Baidu Inc.;Zhejiang University;University of Sydney", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.baidu.com;https://www.zju.edu.cn;https://www.sydney.edu.au", + "aff_unique_abbr": "Baidu;ZJU;USYD", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+1;0;0;1+0;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26275", + "title": "AdaTask: A Task-Aware Adaptive Learning Rate Approach to Multi-Task Learning", + "track": "main", + "status": "Technical", + "abstract": "Multi-task learning (MTL) models have demonstrated impressive results in computer vision, natural language processing, and recommender systems. Even though many approaches have been proposed, how well these approaches balance different tasks on each parameter still remains unclear. In this paper, we propose to measure the task dominance degree of a parameter by the total updates of each task on this parameter. Specifically, we compute the total updates by the exponentially decaying Average of the squared Updates (AU) on a parameter from the corresponding task. Based on this novel metric, we observe that many parameters in existing MTL methods, especially those in the higher shared layers, are still dominated by one or several tasks. The dominance of AU is mainly due to the dominance of accumulative gradients from one or several tasks. Motivated by this, we propose a Task-wise Adaptive learning rate approach, AdaTask in short, to separate the accumulative gradients and hence the learning rate of each task for each parameter in adaptive learning rate approaches (e.g., AdaGrad, RMSProp, and Adam). Comprehensive experiments on computer vision and recommender system MTL datasets demonstrate that AdaTask significantly improves the performance of dominated tasks, resulting SOTA average task-wise performance. Analysis on both synthetic and real-world datasets shows AdaTask balance parameters in every shared layer well.", + "primary_area": "machine learning iv", + "author": "Enneng Yang; Junwei Pan; Ximei Wang; Haibin Yu; Li Shen; Xihua Chen; Lei Xiao; Jie Jiang; Guibing Guo", + "authorids": "", + "aff": "Northeastern University, China; Tencent Inc, China; Tencent Inc, China; Tencent Inc, China; JD Explore Academy, China; Tencent Inc, China; Tencent Inc, China; Tencent Inc, China; Northeastern University, China", + "bibtex": "@article{Yang_Pan_Wang_Yu_Shen_Chen_Xiao_Jiang_Guo_2023, title={AdaTask: A Task-Aware Adaptive Learning Rate Approach to Multi-Task Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26275}, DOI={10.1609/aaai.v37i9.26275}, abstractNote={Multi-task learning (MTL) models have demonstrated impressive results in computer vision, natural language processing, and recommender systems. Even though many approaches have been proposed, how well these approaches balance different tasks on each parameter still remains unclear. In this paper, we propose to measure the task dominance degree of a parameter by the total updates of each task on this parameter. Specifically, we compute the total updates by the exponentially decaying Average of the squared Updates (AU) on a parameter from the corresponding task. Based on this novel metric, we observe that many parameters in existing MTL methods, especially those in the higher shared layers, are still dominated by one or several tasks. The dominance of AU is mainly due to the dominance of accumulative gradients from one or several tasks. Motivated by this, we propose a Task-wise Adaptive learning rate approach, AdaTask in short, to separate the accumulative gradients and hence the learning rate of each task for each parameter in adaptive learning rate approaches (e.g., AdaGrad, RMSProp, and Adam). Comprehensive experiments on computer vision and recommender system MTL datasets demonstrate that AdaTask significantly improves the performance of dominated tasks, resulting SOTA average task-wise performance. Analysis on both synthetic and real-world datasets shows AdaTask balance parameters in every shared layer well.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Enneng and Pan, Junwei and Wang, Ximei and Yu, Haibin and Shen, Li and Chen, Xihua and Xiao, Lei and Jiang, Jie and Guo, Guibing}, year={2023}, month={Jun.}, pages={10745-10753} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26275/26047", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26275", + "pdf_size": 410156, + "gs_citation": 62, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15514362514069786454&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stumail.neu.edu.cn;tencent.com;tencent.com;tencent.com;gmail.com;tencent.com;tencent.com;tencent.com;swc.neu.edu.cn", + "email": "stumail.neu.edu.cn;tencent.com;tencent.com;tencent.com;gmail.com;tencent.com;tencent.com;tencent.com;swc.neu.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;2;1;1;1;0", + "aff_unique_norm": "Northeastern University;Tencent;JD Explore Academy", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.tencent.com;", + "aff_unique_abbr": "NEU;Tencent;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25660", + "title": "AdapSafe: Adaptive and Safe-Certified Deep Reinforcement Learning-Based Frequency Control for Carbon-Neutral Power Systems", + "track": "main", + "status": "Technical", + "abstract": "With the increasing penetration of inverter-based renewable energy resources, deep reinforcement learning (DRL) has been proposed as one of the most promising solutions to realize real-time and autonomous control for future carbon-neutral power systems. In particular, DRL-based frequency control approaches have been extensively investigated to overcome the limitations of model-based approaches, such as the computational cost and scalability for large-scale systems. Nevertheless, the real-world implementation of DRLbased frequency control methods is facing the following fundamental challenges: 1) safety guarantee during the learning and decision-making processes; 2) adaptability against the dynamic system operating conditions. To this end, this is the first work that proposes an Adaptive and Safe-Certified DRL (AdapSafe) algorithm for frequency control to simultaneously address the aforementioned challenges. In particular, a novel self-tuning control barrier function is designed to actively compensate the unsafe frequency control strategies under variational safety constraints and thus achieve guaranteed safety. Furthermore, the concept of meta-reinforcement learning is integrated to significantly enhance its adaptiveness in non-stationary power system environments without sacrificing the safety cost. Experiments are conducted based on GB 2030 power system, and the results demonstrate that the proposed AdapSafe exhibits superior performance in terms of its guaranteed safety in both training and test phases, as well as its considerable adaptability against the dynamics changes of system parameters.", + "primary_area": "domain s of application", + "author": "Xu Wan; Mingyang Sun; Boli Chen; Zhongda Chu; Fei Teng", + "authorids": "", + "aff": "Zhejiang University + Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; Zhejiang University + Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; University College London; Imperial College London; Imperial College London", + "bibtex": "@article{Wan_Sun_Chen_Chu_Teng_2023, title={AdapSafe: Adaptive and Safe-Certified Deep Reinforcement Learning-Based Frequency Control for Carbon-Neutral Power Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25660}, DOI={10.1609/aaai.v37i4.25660}, abstractNote={With the increasing penetration of inverter-based renewable energy resources, deep reinforcement learning (DRL) has been proposed as one of the most promising solutions to realize real-time and autonomous control for future carbon-neutral power systems. In particular, DRL-based frequency control approaches have been extensively investigated to overcome the limitations of model-based approaches, such as the computational cost and scalability for large-scale systems. Nevertheless, the real-world implementation of DRLbased frequency control methods is facing the following fundamental challenges: 1) safety guarantee during the learning and decision-making processes; 2) adaptability against the dynamic system operating conditions. To this end, this is the first work that proposes an Adaptive and Safe-Certified DRL (AdapSafe) algorithm for frequency control to simultaneously address the aforementioned challenges. In particular, a novel self-tuning control barrier function is designed to actively compensate the unsafe frequency control strategies under variational safety constraints and thus achieve guaranteed safety. Furthermore, the concept of meta-reinforcement learning is integrated to significantly enhance its adaptiveness in non-stationary power system environments without sacrificing the safety cost. Experiments are conducted based on GB 2030 power system, and the results demonstrate that the proposed AdapSafe exhibits superior performance in terms of its guaranteed safety in both training and test phases, as well as its considerable adaptability against the dynamics changes of system parameters.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Xu and Sun, Mingyang and Chen, Boli and Chu, Zhongda and Teng, Fei}, year={2023}, month={Jun.}, pages={5294-5302} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25660/25432", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25660", + "pdf_size": 4158081, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1312377449083004227&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;zju.edu.cn;ucl.ac.uk;ic.ac.uk;imperial.ac.uk", + "email": "zju.edu.cn;zju.edu.cn;ucl.ac.uk;ic.ac.uk;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1;2;2", + "aff_unique_norm": "Zhejiang University;University College London;Imperial College London", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.ucl.ac.uk;https://www.imperial.ac.uk", + "aff_unique_abbr": "ZJU;UCL;ICL", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;1;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25288", + "title": "Adapting Object Size Variance and Class Imbalance for Semi-supervised Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Semi-supervised object detection (SSOD) attracts extensive research interest due to its great significance in reducing the data annotation effort. Collecting high-quality and category-balanced pseudo labels for unlabeled images is critical to addressing the SSOD problem. However, most of the existing pseudo-labeling-based methods depend on a large and fixed threshold to select high-quality pseudo labels from the predictions of a teacher model. Considering different object classes usually have different detection difficulty levels due to scale variance and data distribution imbalance, conventional pseudo-labeling-based methods are arduous to explore the value of unlabeled data sufficiently. To address these issues, we propose an adaptive pseudo labeling strategy, which can assign thresholds to classes with respect to their \u201chardness\u201d. This is beneficial for ensuring the high quality of easier classes and increasing the quantity of harder classes simultaneously. Besides, label refinement modules are set up based on box jittering for guaranteeing the localization quality of pseudo labels. To further improve the algorithm\u2019s robustness against scale variance and make the most of pseudo labels, we devise a joint feature-level and prediction-level consistency learning pipeline for transferring the information of the teacher model to the student model. Extensive experiments on COCO and VOC datasets indicate that our method achieves state-of-the-art performance. Especially, it brings mean average precision gains of 2.08 and 1.28 on MS-COCO dataset with 5% and 10% labeled images, respectively.", + "primary_area": "computer vision ii", + "author": "Yuxiang Nie; Chaowei Fang; Lechao Cheng; Liang Lin; Guanbin Li", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China; School of Artificial Intelligence, Xidian University, Xi\u2019an, China; Zhejiang Lab, Hangzhou, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Research Institute, Sun Yat-sen University, Shenzhen, China", + "bibtex": "@article{Nie_Fang_Cheng_Lin_Li_2023, title={Adapting Object Size Variance and Class Imbalance for Semi-supervised Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25288}, DOI={10.1609/aaai.v37i2.25288}, abstractNote={Semi-supervised object detection (SSOD) attracts extensive research interest due to its great significance in reducing the data annotation effort. Collecting high-quality and category-balanced pseudo labels for unlabeled images is critical to addressing the SSOD problem. However, most of the existing pseudo-labeling-based methods depend on a large and fixed threshold to select high-quality pseudo labels from the predictions of a teacher model. Considering different object classes usually have different detection difficulty levels due to scale variance and data distribution imbalance, conventional pseudo-labeling-based methods are arduous to explore the value of unlabeled data sufficiently. To address these issues, we propose an adaptive pseudo labeling strategy, which can assign thresholds to classes with respect to their \u201chardness\u201d. This is beneficial for ensuring the high quality of easier classes and increasing the quantity of harder classes simultaneously. Besides, label refinement modules are set up based on box jittering for guaranteeing the localization quality of pseudo labels. To further improve the algorithm\u2019s robustness against scale variance and make the most of pseudo labels, we devise a joint feature-level and prediction-level consistency learning pipeline for transferring the information of the teacher model to the student model. Extensive experiments on COCO and VOC datasets indicate that our method achieves state-of-the-art performance. Especially, it brings mean average precision gains of 2.08 and 1.28 on MS-COCO dataset with 5% and 10% labeled images, respectively.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nie, Yuxiang and Fang, Chaowei and Cheng, Lechao and Lin, Liang and Li, Guanbin}, year={2023}, month={Jun.}, pages={1966-1974} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25288/25060", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25288", + "pdf_size": 2306690, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18307374372328647466&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "mail2.sysu.edu.cn;outlook.com;zhejianglab.com;ieee.org;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;outlook.com;zhejianglab.com;ieee.org;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0+0", + "aff_unique_norm": "Sun Yat-sen University;Xidian University;Zhejiang Lab", + "aff_unique_dep": "School of Computer Science and Engineering;School of Artificial Intelligence;", + "aff_unique_url": "http://www.sysu.edu.cn;http://www.xidian.edu.cn;http://www.zhejianglab.com", + "aff_unique_abbr": "SYSU;Xidian;", + "aff_campus_unique_index": "0;1;2;0;0+3", + "aff_campus_unique": "Guangzhou;Xi'an;Hangzhou;Shenzhen", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27048", + "title": "Adaptive Constraint Partition Based Optimization Framework for Large-Scale Integer Linear Programming (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Integer programming problems (IPs) are challenging to be solved efficiently due to the NP-hardness, especially for large-scale IPs. To solve this type of IPs, Large neighborhood search (LNS) uses an initial feasible solution and iteratively improves it by searching a large neighborhood around the current solution. However, LNS easily steps into local optima and ignores the correlation between variables to be optimized, leading to compromised performance. This paper presents a general adaptive constraint partition-based optimization framework (ACP) for large-scale IPs that can efficiently use any existing optimization solver as a subroutine. Specifically, ACP first randomly partitions the constraints into blocks, where the number of blocks is adaptively adjusted to avoid local optima. Then, ACP uses a subroutine solver to optimize the decision variables in a randomly selected block of constraints to enhance the variable correlation. ACP is compared with LNS framework with different subroutine solvers on four IPs and a real-world IP. The experimental results demonstrate that in specified wall-clock time ACP shows better performance than SCIP and Gurobi.", + "primary_area": "", + "author": "Huigen Ye; Hongyan Wang; Hua Xu; Chengming Wang; Yu Jiang", + "authorids": "", + "aff": "State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China + School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China; Meituan Inc., Block F&G, Wangjing International R&D Park, No.6 Wang Jing East Rd, Chaoyang District, Beijing, 100102, China; Meituan Inc., Block F&G, Wangjing International R&D Park, No.6 Wang Jing East Rd, Chaoyang District, Beijing, 100102, China", + "bibtex": "@article{Ye_Wang_Xu_Wang_Jiang_2024, title={Adaptive Constraint Partition Based Optimization Framework for Large-Scale Integer Linear Programming (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27048}, DOI={10.1609/aaai.v37i13.27048}, abstractNote={Integer programming problems (IPs) are challenging to be solved efficiently due to the NP-hardness, especially for large-scale IPs. To solve this type of IPs, Large neighborhood search (LNS) uses an initial feasible solution and iteratively improves it by searching a large neighborhood around the current solution. However, LNS easily steps into local optima and ignores the correlation between variables to be optimized, leading to compromised performance. This paper presents a general adaptive constraint partition-based optimization framework (ACP) for large-scale IPs that can efficiently use any existing optimization solver as a subroutine. Specifically, ACP first randomly partitions the constraints into blocks, where the number of blocks is adaptively adjusted to avoid local optima. Then, ACP uses a subroutine solver to optimize the decision variables in a randomly selected block of constraints to enhance the variable correlation. ACP is compared with LNS framework with different subroutine solvers on four IPs and a real-world IP. The experimental results demonstrate that in specified wall-clock time ACP shows better performance than SCIP and Gurobi.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Huigen and Wang, Hongyan and Xu, Hua and Wang, Chengming and Jiang, Yu}, year={2024}, month={Jul.}, pages={16376-16377} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27048/26820", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27048", + "pdf_size": 243610, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12203605463787156231&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mail2.sysu.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn;meituan.com;meituan.com", + "email": "mail2.sysu.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn;meituan.com;meituan.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;2;2", + "aff_unique_norm": "Tsinghua University;Sun Yat-sen University;Meituan Inc.", + "aff_unique_dep": "Department of Computer Science and Technology;School of Computer Science and Engineering;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.sysu.edu.cn;https://www.meituan.com", + "aff_unique_abbr": "Tsinghua;SYSU;Meituan", + "aff_campus_unique_index": "0+1;0;0", + "aff_campus_unique": "Beijing;Guangzhou;", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26061", + "title": "Adaptive Discrete Communication Bottlenecks with Dynamic Vector Quantization for Heterogeneous Representational Coarseness", + "track": "main", + "status": "Technical", + "abstract": "Vector Quantization (VQ) is a method for discretizing latent representations and has become a major part of the deep learning toolkit. It has been theoretically and empirically shown that discretization of representations leads to improved generalization, including in reinforcement learning where discretization can be used to bottleneck multi-agent communication to promote agent specialization and robustness. The discretization tightness of most VQ-based methods is defined by the number of discrete codes in the representation vector and the codebook size, which are fixed as hyperparameters. In this work, we propose learning to dynamically select discretization tightness conditioned on inputs, based on the hypothesis that data naturally contains variations in complexity that call for different levels of representational coarseness which is observed in many heterogeneous data sets. We show that dynamically varying tightness in communication bottlenecks can improve model performance on visual reasoning and reinforcement learning tasks with heterogeneity in representations.", + "primary_area": "machine learning ii", + "author": "Dianbo Liu; Alex Lamb; Xu Ji; Pascal Junior Tikeng Notsawo; Michael Mozer; Yoshua Bengio; Kenji Kawaguchi", + "authorids": "", + "aff": "Mila-Quebec AI Institute; Mila-Quebec AI Institute; Mila-Quebec AI Institute; Mila-Quebec AI Institute; Google Research, Brain Team; Mila-Quebec AI Institute + CIFAR AI Chair; National University of Singapore", + "bibtex": "@article{Liu_Lamb_Ji_Tikeng Notsawo_Mozer_Bengio_Kawaguchi_2023, title={Adaptive Discrete Communication Bottlenecks with Dynamic Vector Quantization for Heterogeneous Representational Coarseness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26061}, DOI={10.1609/aaai.v37i7.26061}, abstractNote={Vector Quantization (VQ) is a method for discretizing latent representations and has become a major part of the deep learning toolkit. It has been theoretically and empirically shown that discretization of representations leads to improved generalization, including in reinforcement learning where discretization can be used to bottleneck multi-agent communication to promote agent specialization and robustness. The discretization tightness of most VQ-based methods is defined by the number of discrete codes in the representation vector and the codebook size, which are fixed as hyperparameters. In this work, we propose learning to dynamically select discretization tightness conditioned on inputs, based on the hypothesis that data naturally contains variations in complexity that call for different levels of representational coarseness which is observed in many heterogeneous data sets. We show that dynamically varying tightness in communication bottlenecks can improve model performance on visual reasoning and reinforcement learning tasks with heterogeneity in representations.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Dianbo and Lamb, Alex and Ji, Xu and Tikeng Notsawo, Pascal Junior and Mozer, Michael and Bengio, Yoshua and Kawaguchi, Kenji}, year={2023}, month={Jun.}, pages={8825-8833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26061/25833", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26061", + "pdf_size": 406836, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18233168341265676238&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com; ; ; ; ;", + "email": "gmail.com;gmail.com; ; ; ; ;", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;0+2;3", + "aff_unique_norm": "Mila-Quebec AI Institute;Google;CIFAR;National University of Singapore", + "aff_unique_dep": "AI Institute;Google Research;AI Chair;", + "aff_unique_url": "https://mila.quebec;https://research.google;https://www.cifar.ca;https://www.nus.edu.sg", + "aff_unique_abbr": "Mila;Google;CIFAR;NUS", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;1;0+0;2", + "aff_country_unique": "Canada;United States;Singapore" + }, + { + "id": "article-25317", + "title": "Adaptive Dynamic Filtering Network for Image Denoising", + "track": "main", + "status": "Technical", + "abstract": "In image denoising networks, feature scaling is widely used to enlarge the receptive field size and reduce computational costs. This practice, however, also leads to the loss of high-frequency information and fails to consider within-scale characteristics. Recently, dynamic convolution has exhibited powerful capabilities in processing high-frequency information (e.g., edges, corners, textures), but previous works lack sufficient spatial contextual information in filter generation. To alleviate these issues, we propose to employ dynamic convolution to improve the learning of high-frequency and multi-scale features. Specifically, we design a spatially enhanced kernel generation (SEKG) module to improve dynamic convolution, enabling the learning of spatial context information with a very low computational complexity. Based on the SEKG module, we propose a dynamic convolution block (DCB) and a multi-scale dynamic convolution block (MDCB). The former enhances the high-frequency information via dynamic convolution and preserves low-frequency information via skip connections. The latter utilizes shared adaptive dynamic kernels and the idea of dilated convolution to achieve efficient multi-scale feature extraction. The proposed multi-dimension feature integration (MFI) mechanism further fuses the multi-scale features, providing precise and contextually enriched feature representations. Finally, we build an efficient denoising network with the proposed DCB and MDCB, named ADFNet. It achieves better performance with low computational complexity on real-world and synthetic Gaussian noisy datasets. The source code is available at https://github.com/it-hao/ADFNet.", + "primary_area": "computer vision ii", + "author": "Hao Shen; Zhong-Qiu Zhao; Wandi Zhang", + "authorids": "", + "aff": "School of Computer Science and Information Engineering, Hefei University of Technology (HFUT) + Intelligent Interconnected Systems Laboratory of Anhui Province (HFUT) + Intelligent Manufacturing Institute of HFUT; School of Computer Science and Information Engineering, Hefei University of Technology (HFUT) + Intelligent Interconnected Systems Laboratory of Anhui Province (HFUT) + Guangxi Academy of Sciences + Intelligent Manufacturing Institute of HFUT; School of Computer Science and Information Engineering, Hefei University of Technology (HFUT) + Intelligent Interconnected Systems Laboratory of Anhui Province (HFUT) + Intelligent Manufacturing Institute of HFUT", + "bibtex": "@article{Shen_Zhao_Zhang_2023, title={Adaptive Dynamic Filtering Network for Image Denoising}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25317}, DOI={10.1609/aaai.v37i2.25317}, abstractNote={In image denoising networks, feature scaling is widely used to enlarge the receptive field size and reduce computational costs. This practice, however, also leads to the loss of high-frequency information and fails to consider within-scale characteristics. Recently, dynamic convolution has exhibited powerful capabilities in processing high-frequency information (e.g., edges, corners, textures), but previous works lack sufficient spatial contextual information in filter generation. To alleviate these issues, we propose to employ dynamic convolution to improve the learning of high-frequency and multi-scale features. Specifically, we design a spatially enhanced kernel generation (SEKG) module to improve dynamic convolution, enabling the learning of spatial context information with a very low computational complexity. Based on the SEKG module, we propose a dynamic convolution block (DCB) and a multi-scale dynamic convolution block (MDCB). The former enhances the high-frequency information via dynamic convolution and preserves low-frequency information via skip connections. The latter utilizes shared adaptive dynamic kernels and the idea of dilated convolution to achieve efficient multi-scale feature extraction. The proposed multi-dimension feature integration (MFI) mechanism further fuses the multi-scale features, providing precise and contextually enriched feature representations. Finally, we build an efficient denoising network with the proposed DCB and MDCB, named ADFNet. It achieves better performance with low computational complexity on real-world and synthetic Gaussian noisy datasets. The source code is available at https://github.com/it-hao/ADFNet.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Hao and Zhao, Zhong-Qiu and Zhang, Wandi}, year={2023}, month={Jun.}, pages={2227-2235} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25317/25089", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25317", + "pdf_size": 4497867, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9508557146884292846&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;hfut.edu.cn;mail.hfut.edu.cn", + "email": "gmail.com;hfut.edu.cn;mail.hfut.edu.cn", + "github": "https://github.com/it-hao/ADFNet", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+0;0+1+2+0;0+1+0", + "aff_unique_norm": "Hefei University of Technology;Anhui University of Technology;Guangxi Academy of Sciences", + "aff_unique_dep": "School of Computer Science and Information Engineering;Intelligent Interconnected Systems Laboratory;", + "aff_unique_url": "http://www.hfut.edu.cn;http://www.hfut.edu.cn/;http://www.gxas.org.cn", + "aff_unique_abbr": "HFUT;HFUT;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hefei;", + "aff_country_unique_index": "0+0+0;0+0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25937", + "title": "Adaptive Hierarchy-Branch Fusion for Online Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Online Knowledge Distillation (OKD) is designed to alleviate the dilemma that the high-capacity pre-trained teacher model is not available. However, the existing methods mostly focus on improving the ensemble prediction accuracy from multiple students (a.k.a. branches), which often overlook the homogenization problem that makes student model saturate quickly and hurts the performance. We assume that the intrinsic bottleneck of the homogenization problem comes from the identical branch architecture and coarse ensemble strategy. We propose a novel Adaptive Hierarchy-Branch Fusion framework for Online Knowledge Distillation, termed AHBF-OKD, which designs hierarchical branches and adaptive hierarchy-branch fusion module to boost the model diversity and aggregate complementary knowledge. Specifically, we first introduce hierarchical branch architectures to construct diverse peers by increasing the depth of branches monotonously on the basis of target branch. To effectively transfer knowledge from the most complex branch to the simplest target branch, we propose an adaptive hierarchy-branch fusion module to create hierarchical teacher assistants recursively, which regards the target branch as the smallest teacher assistant. During the training, the teacher assistant from the previous hierarchy is explicitly distilled by the teacher assistant and the branch from the current hierarchy. Thus, the important scores to different branches are effectively and adaptively allocated to reduce the branch homogenization. Extensive experiments demonstrate the effectiveness of AHBF-OKD on different datasets, including CIFAR-10/100 and ImageNet 2012. For example, on ImageNet 2012, the distilled ResNet-18 achieves Top-1 error of 29.28\\%, which significantly outperforms the state-of-the-art methods. The source code is available at https://github.com/linruigong965/AHBF.", + "primary_area": "machine learning i", + "author": "Linrui Gong; Shaohui Lin; Baochang Zhang; Yunhang Shen; Ke Li; Ruizhi Qiao; Bo Ren; Muqing Li; Zhou Yu; Lizhuang Ma", + "authorids": "", + "aff": "East China Normal University; East China Normal University; Beihang University; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; East China Normal University+Key Laboratory of Advanced Theory and Application in Statistics and Data Science - MOE; East China Normal University", + "bibtex": "@article{Gong_Lin_Zhang_Shen_Li_Qiao_Ren_Li_Yu_Ma_2023, title={Adaptive Hierarchy-Branch Fusion for Online Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25937}, DOI={10.1609/aaai.v37i6.25937}, abstractNote={Online Knowledge Distillation (OKD) is designed to alleviate the dilemma that the high-capacity pre-trained teacher model is not available. However, the existing methods mostly focus on improving the ensemble prediction accuracy from multiple students (a.k.a. branches), which often overlook the homogenization problem that makes student model saturate quickly and hurts the performance. We assume that the intrinsic bottleneck of the homogenization problem comes from the identical branch architecture and coarse ensemble strategy. We propose a novel Adaptive Hierarchy-Branch Fusion framework for Online Knowledge Distillation, termed AHBF-OKD, which designs hierarchical branches and adaptive hierarchy-branch fusion module to boost the model diversity and aggregate complementary knowledge. Specifically, we first introduce hierarchical branch architectures to construct diverse peers by increasing the depth of branches monotonously on the basis of target branch. To effectively transfer knowledge from the most complex branch to the simplest target branch, we propose an adaptive hierarchy-branch fusion module to create hierarchical teacher assistants recursively, which regards the target branch as the smallest teacher assistant. During the training, the teacher assistant from the previous hierarchy is explicitly distilled by the teacher assistant and the branch from the current hierarchy. Thus, the important scores to different branches are effectively and adaptively allocated to reduce the branch homogenization. Extensive experiments demonstrate the effectiveness of AHBF-OKD on different datasets, including CIFAR-10/100 and ImageNet 2012. For example, on ImageNet 2012, the distilled ResNet-18 achieves Top-1 error of 29.28\\%, which significantly outperforms the state-of-the-art methods. The source code is available at https://github.com/linruigong965/AHBF.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gong, Linrui and Lin, Shaohui and Zhang, Baochang and Shen, Yunhang and Li, Ke and Qiao, Ruizhi and Ren, Bo and Li, Muqing and Yu, Zhou and Ma, Lizhuang}, year={2023}, month={Jun.}, pages={7731-7739} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25937/25709", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25937", + "pdf_size": 1253526, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4046568472573957684&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;cs.ecnu.edu.cn;buaa.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;stat.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "gmail.com;cs.ecnu.edu.cn;buaa.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;stat.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "https://github.com/linruigong965/AHBF", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;1;2;2;2;2;2;0+3;0", + "aff_unique_norm": "East China Normal University;Beihang University;Tencent;MOE Key Laboratory of Advanced Theory and Application in Statistics and Data Science", + "aff_unique_dep": ";;Youtu Lab;Department of Statistics and Data Science", + "aff_unique_url": "http://www.ecnu.edu.cn;http://www.buaa.edu.cn/;https://www.tencent.com;", + "aff_unique_abbr": "ECNU;BUAA;Tencent;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25564", + "title": "Adaptive Low-Precision Training for Embeddings in Click-Through Rate Prediction", + "track": "main", + "status": "Technical", + "abstract": "Embedding tables are usually huge in click-through rate (CTR) prediction models. To train and deploy the CTR models efficiently and economically, it is necessary to compress their embedding tables. To this end, we formulate a novel quantization training paradigm to compress the embeddings from the training stage, termed low-precision training (LPT). Also, we provide theoretical analysis on its convergence. The results show that stochastic weight quantization has a faster convergence rate and a smaller convergence error than deterministic weight quantization in LPT. Further, to reduce accuracy degradation, we propose adaptive low-precision training (ALPT) which learns the step size (i.e., the quantization resolution). Experiments on two real-world datasets confirm our analysis and show that ALPT can significantly improve the prediction accuracy, especially at extremely low bit width. For the first time in CTR models, we successfully train 8-bit embeddings without sacrificing prediction accuracy.", + "primary_area": "data mining and knowledge management", + "author": "Shiwei Li; Huifeng Guo; Lu Hou; Wei Zhang; Xing Tang; Ruiming Tang; Rui Zhang; Ruixuan Li", + "authorids": "", + "aff": "Huazhong University of Science and Technology; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Tsinghua University; Huazhong University of Science and Technology", + "bibtex": "@article{Li_Guo_Hou_Zhang_Tang_Tang_Zhang_Li_2023, title={Adaptive Low-Precision Training for Embeddings in Click-Through Rate Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25564}, DOI={10.1609/aaai.v37i4.25564}, abstractNote={Embedding tables are usually huge in click-through rate (CTR) prediction models. To train and deploy the CTR models efficiently and economically, it is necessary to compress their embedding tables. To this end, we formulate a novel quantization training paradigm to compress the embeddings from the training stage, termed low-precision training (LPT). Also, we provide theoretical analysis on its convergence. The results show that stochastic weight quantization has a faster convergence rate and a smaller convergence error than deterministic weight quantization in LPT. Further, to reduce accuracy degradation, we propose adaptive low-precision training (ALPT) which learns the step size (i.e., the quantization resolution). Experiments on two real-world datasets confirm our analysis and show that ALPT can significantly improve the prediction accuracy, especially at extremely low bit width. For the first time in CTR models, we successfully train 8-bit embeddings without sacrificing prediction accuracy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shiwei and Guo, Huifeng and Hou, Lu and Zhang, Wei and Tang, Xing and Tang, Ruiming and Zhang, Rui and Li, Ruixuan}, year={2023}, month={Jun.}, pages={4435-4443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25564/25336", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25564", + "pdf_size": 579027, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10559462599525132413&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 6, + "aff_domain": "mail2.sysu.edu.cn;huawei.com;huawei.com; ;huawei.com;huawei.com;tsinghua.edu.cn;mail.hust.edu.cn", + "email": "mail2.sysu.edu.cn;huawei.com;huawei.com; ;huawei.com;huawei.com;tsinghua.edu.cn;mail.hust.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;1;2;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Huawei;Tsinghua University", + "aff_unique_dep": ";Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.hust.edu.cn;https://www.huawei.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "HUST;Huawei;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26176", + "title": "Adaptive Mixing of Auxiliary Losses in Supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "In many supervised learning scenarios, auxiliary losses are used in order to introduce additional information or constraints into the supervised learning objective. For instance, knowledge distillation aims to mimic outputs of a powerful teacher model; similarly, in rule-based approaches, weak labeling information is provided by labeling functions which may be noisy rule-based approximations to true labels. We tackle the problem of learning to combine these losses in a principled manner. Our proposal, AMAL, uses a bi-level optimization criterion on validation data to learn optimal mixing weights, at an instance-level, over the training data. We describe a meta-learning approach towards solving this bi-level objective, and show how it can be applied to different scenarios in supervised learning. Experiments in a number of knowledge distillation and rule denoising domains show that AMAL provides noticeable gains over competitive baselines in those domains. We empirically analyze our method and share insights into the mechanisms through which it provides performance gains. The code for AMAL is at: https://github.com/durgas16/AMAL.git.", + "primary_area": "machine learning iii", + "author": "Durga Sivasubramanian; Ayush Maheshwari; Prathosh AP; Pradeep Shenoy; Ganesh Ramakrishnan", + "authorids": "", + "aff": "Indian Institute of Technology Bombay + Google Research, India; Indian Institute of Technology Bombay; Indian Institute of Science, Bengaluru; Google Research, India; Indian Institute of Technology Bombay", + "bibtex": "@article{Sivasubramanian_Maheshwari_AP_Shenoy_Ramakrishnan_2023, title={Adaptive Mixing of Auxiliary Losses in Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26176}, DOI={10.1609/aaai.v37i8.26176}, abstractNote={In many supervised learning scenarios, auxiliary losses are used in order to introduce additional information or constraints into the supervised learning objective. For instance, knowledge distillation aims to mimic outputs of a powerful teacher model; similarly, in rule-based approaches, weak labeling information is provided by labeling functions which may be noisy rule-based approximations to true labels. We tackle the problem of learning to combine these losses in a principled manner. Our proposal, AMAL, uses a bi-level optimization criterion on validation data to learn optimal mixing weights, at an instance-level, over the training data. We describe a meta-learning approach towards solving this bi-level objective, and show how it can be applied to different scenarios in supervised learning. Experiments in a number of knowledge distillation and rule denoising domains show that AMAL provides noticeable gains over competitive baselines in those domains. We empirically analyze our method and share insights into the mechanisms through which it provides performance gains. The code for AMAL is at: https://github.com/durgas16/AMAL.git.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sivasubramanian, Durga and Maheshwari, Ayush and AP, Prathosh and Shenoy, Pradeep and Ramakrishnan, Ganesh}, year={2023}, month={Jun.}, pages={9855-9863} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26176/25948", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26176", + "pdf_size": 321374, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7456757242537980995&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "cse.iitb.ac.in;cse.iitb.ac.in;iisc.ac.in;google.com;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;cse.iitb.ac.in;iisc.ac.in;google.com;cse.iitb.ac.in", + "github": "https://github.com/durgas16/AMAL.git", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;2;1;0", + "aff_unique_norm": "Indian Institute of Technology Bombay;Google;Indian Institute of Science", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.iitb.ac.in;https://research.google;https://www.iisc.ac.in", + "aff_unique_abbr": "IIT Bombay;Google Research;IISc", + "aff_campus_unique_index": "0+1;0;2;1;0", + "aff_campus_unique": "Bombay;India;Bengaluru", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26103", + "title": "Adaptive Perturbation-Based Gradient Estimation for Discrete Latent Variable Models", + "track": "main", + "status": "Technical", + "abstract": "The integration of discrete algorithmic components in deep learning architectures has numerous applications. Recently, Implicit Maximum Likelihood Estimation, a class of gradient estimators for discrete exponential family distributions, was proposed by combining implicit differentiation through perturbation with the path-wise gradient estimator. However, due to the finite difference approximation of the gradients, it is especially sensitive to the choice of the finite difference step size, which needs to be specified by the user. In this work, we present Adaptive IMLE (AIMLE), the first adaptive gradient estimator for complex discrete distributions: it adaptively identifies the target distribution for IMLE by trading off the density of gradient information with the degree of bias in the gradient estimates. We empirically evaluate our estimator on synthetic examples, as well as on Learning to Explain, Discrete Variational Auto-Encoders, and Neural Relational Inference tasks. In our experiments, we show that our adaptive gradient estimator can produce faithful estimates while requiring orders of magnitude fewer samples than other gradient estimators.", + "primary_area": "machine learning iii", + "author": "Pasquale Minervini; Luca Franceschi; Mathias Niepert", + "authorids": "", + "aff": "School of Informatics, University of Edinburgh, Edinburgh, United Kingdom + UCL Centre for Artificial Intelligence, London, United Kingdom; UCL Centre for Artificial Intelligence, London, United Kingdom; University of Stuttgart, Stuttgart, Germany", + "bibtex": "@article{Minervini_Franceschi_Niepert_2023, title={Adaptive Perturbation-Based Gradient Estimation for Discrete Latent Variable Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26103}, DOI={10.1609/aaai.v37i8.26103}, abstractNote={The integration of discrete algorithmic components in deep learning architectures has numerous applications. Recently, Implicit Maximum Likelihood Estimation, a class of gradient estimators for discrete exponential family distributions, was proposed by combining implicit differentiation through perturbation with the path-wise gradient estimator. However, due to the finite difference approximation of the gradients, it is especially sensitive to the choice of the finite difference step size, which needs to be specified by the user. In this work, we present Adaptive IMLE (AIMLE), the first adaptive gradient estimator for complex discrete distributions: it adaptively identifies the target distribution for IMLE by trading off the density of gradient information with the degree of bias in the gradient estimates. We empirically evaluate our estimator on synthetic examples, as well as on Learning to Explain, Discrete Variational Auto-Encoders, and Neural Relational Inference tasks. In our experiments, we show that our adaptive gradient estimator can produce faithful estimates while requiring orders of magnitude fewer samples than other gradient estimators.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Minervini, Pasquale and Franceschi, Luca and Niepert, Mathias}, year={2023}, month={Jun.}, pages={9200-9208} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26103/25875", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26103", + "pdf_size": 428715, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3047073418551183368&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ed.ac.uk; ; ", + "email": "ed.ac.uk; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;2", + "aff_unique_norm": "University of Edinburgh;University College London;University of Stuttgart", + "aff_unique_dep": "School of Informatics;Centre for Artificial Intelligence;", + "aff_unique_url": "https://www.ed.ac.uk;https://www.ucl.ac.uk;https://www.uni-stuttgart.de", + "aff_unique_abbr": "Edinburgh;UCL;Uni Stuttgart", + "aff_campus_unique_index": "0+1;1;2", + "aff_campus_unique": "Edinburgh;London;Stuttgart", + "aff_country_unique_index": "0+0;0;1", + "aff_country_unique": "United Kingdom;Germany" + }, + { + "id": "article-26345", + "title": "Adaptive Policy Learning for Offline-to-Online Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Conventional reinforcement learning (RL) needs an environment to collect fresh data, which is impractical when online interactions are costly. Offline RL provides an alternative solution by directly learning from the previously collected dataset. However, it will yield unsatisfactory performance if the quality of the offline datasets is poor. In this paper, we consider an offline-to-online setting where the agent is first learned from the offline dataset and then trained online, and propose a framework called Adaptive Policy Learning for effectively taking advantage of offline and online data. Specifically, we explicitly consider the difference between the online and offline data and apply an adaptive update scheme accordingly, that is, a pessimistic update strategy for the offline dataset and an optimistic/greedy update scheme for the online dataset. Such a simple and effective method provides a way to mix the offline and online RL and achieve the best of both worlds. We further provide two detailed algorithms for implementing the framework through embedding value or policy-based RL algorithms into it. Finally, we conduct extensive experiments on popular continuous control tasks, and results show that our algorithm can learn the expert policy with high sample efficiency even when the quality of offline dataset is poor, e.g., random dataset.", + "primary_area": "machine learning iv", + "author": "Han Zheng; Xufang Luo; Pengfei Wei; Xuan Song; Dongsheng Li; Jing Jiang", + "authorids": "", + "aff": "University of Technology Sydney; Microsoft Research Asia; National University of Singapore; Southern University of Science and Technology; Microsoft Research Asia + University of Technology Sydney; University of Technology Sydney", + "bibtex": "@article{Zheng_Luo_Wei_Song_Li_Jiang_2023, title={Adaptive Policy Learning for Offline-to-Online Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26345}, DOI={10.1609/aaai.v37i9.26345}, abstractNote={Conventional reinforcement learning (RL) needs an environment to collect fresh data, which is impractical when online interactions are costly. Offline RL provides an alternative solution by directly learning from the previously collected dataset. However, it will yield unsatisfactory performance if the quality of the offline datasets is poor. In this paper, we consider an offline-to-online setting where the agent is first learned from the offline dataset and then trained online, and propose a framework called Adaptive Policy Learning for effectively taking advantage of offline and online data. Specifically, we explicitly consider the difference between the online and offline data and apply an adaptive update scheme accordingly, that is, a pessimistic update strategy for the offline dataset and an optimistic/greedy update scheme for the online dataset. Such a simple and effective method provides a way to mix the offline and online RL and achieve the best of both worlds. We further provide two detailed algorithms for implementing the framework through embedding value or policy-based RL algorithms into it. Finally, we conduct extensive experiments on popular continuous control tasks, and results show that our algorithm can learn the expert policy with high sample efficiency even when the quality of offline dataset is poor, e.g., random dataset.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Han and Luo, Xufang and Wei, Pengfei and Song, Xuan and Li, Dongsheng and Jiang, Jing}, year={2023}, month={Jun.}, pages={11372-11380} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26345/26117", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26345", + "pdf_size": 1346371, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17342842757287538297&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;microsoft.com; ; ; ; ", + "email": "gmail.com;microsoft.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;1+0;0", + "aff_unique_norm": "University of Technology Sydney;Microsoft Research;National University of Singapore;Southern University of Science and Technology", + "aff_unique_dep": ";Research;;", + "aff_unique_url": "https://www.uts.edu.au;https://www.microsoft.com/en-us/research/group/asia;https://www.nus.edu.sg;https://www.sustech.edu.cn", + "aff_unique_abbr": "UTS;MSR Asia;NUS;SUSTech", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;1;2;1;1+0;0", + "aff_country_unique": "Australia;China;Singapore" + }, + { + "id": "article-26874", + "title": "Adaptive Temporal Planning for Multi-Robot Systems in Operations and Maintenance of Offshore Wind Farms", + "track": "iaai technical track", + "status": "Technical", + "abstract": "With the fast development of offshore wind farms as renewable energy sources, maintaining them efficiently and safely becomes necessary. The high costs of operation and maintenance (O&M) are due to the length of turbine downtime and the logistics for human technician transfer. To reduce such costs, we propose a comprehensive multi-robot system that includes unmanned aerial vehicles (UAV), autonomous surface vessels (ASV), and inspection-and-repair robots (IRR). Our system, which is capable of co-managing the farms with human operators located onshore, brings down costs and significantly reduces the Health and Safety (H&S) risks of O&M by assisting human operators in performing dangerous tasks. In this paper, we focus on using AI temporal planning to coordinate the actions of the different autonomous robots that form the multi-robot system. We devise a new, adaptive planning approach that reduces failures and replanning by performing data-driven goal and domain refinement. Our experiments in both simulated and real-world scenarios prove the effectiveness and robustness of our technique. The success of our system marks the first-step towards a large-scale, multirobot solution for wind farm O&M.", + "primary_area": "nnovative inter disciplinary ai integration", + "author": "Ferdian Jovan; Sara Bernardini", + "authorids": "", + "aff": "University of Bristol, Bristol, BS1 5DL, UK; Royal Holloway University of London, Egham, TW20 0EX, UK", + "bibtex": "@article{Jovan_Bernardini_2024, title={Adaptive Temporal Planning for Multi-Robot Systems in Operations and Maintenance of Offshore Wind Farms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26874}, DOI={10.1609/aaai.v37i13.26874}, abstractNote={With the fast development of offshore wind farms as renewable energy sources, maintaining them efficiently and safely becomes necessary. The high costs of operation and maintenance (O&M) are due to the length of turbine downtime and the logistics for human technician transfer. To reduce such costs, we propose a comprehensive multi-robot system that includes unmanned aerial vehicles (UAV), autonomous surface vessels (ASV), and inspection-and-repair robots (IRR). Our system, which is capable of co-managing the farms with human operators located onshore, brings down costs and significantly reduces the Health and Safety (H&S) risks of O&M by assisting human operators in performing dangerous tasks. In this paper, we focus on using AI temporal planning to coordinate the actions of the different autonomous robots that form the multi-robot system. We devise a new, adaptive planning approach that reduces failures and replanning by performing data-driven goal and domain refinement. Our experiments in both simulated and real-world scenarios prove the effectiveness and robustness of our technique. The success of our system marks the first-step towards a large-scale, multirobot solution for wind farm O&M.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jovan, Ferdian and Bernardini, Sara}, year={2024}, month={Jul.}, pages={15782-15788} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26874/26646", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26874", + "pdf_size": 839273, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5688690788118606030&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "bristol.ac.uk;rhul.ac.uk", + "email": "bristol.ac.uk;rhul.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Bristol;Royal Holloway University of London", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bristol.ac.uk;https://www.royalholloway.ac.uk", + "aff_unique_abbr": "UoB;RHUL", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Bristol;Egham", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25229", + "title": "Adaptive Texture Filtering for Single-Domain Generalized Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Domain generalization in semantic segmentation aims to alleviate the performance degradation on unseen domains through learning domain-invariant features. Existing methods diversify images in the source domain by adding complex or even abnormal textures to reduce the sensitivity to domain-specific features. However, these approaches depends heavily on the richness of the texture bank and training them can be time-consuming. In contrast to importing textures arbitrarily or augmenting styles randomly, we focus on the single source domain itself to achieve the generalization. In this paper, we present a novel adaptive texture filtering mechanism to suppress the influence of texture without using augmentation, thus eliminating the interference of domain-specific features. Further, we design a hierarchical guidance generalization network equipped with structure-guided enhancement modules, which purpose to learn the domain-invariant generalized knowledge. Extensive experiments together with ablation studies on widely-used datasets are conducted to verify the effectiveness of the proposed model, and reveal its superiority over other state-of-the-art alternatives.", + "primary_area": "computer vision ii", + "author": "Xinhui Li; Mingjia Li; Yaxing Wang; Chuan-Xian Ren; Xiaojie Guo", + "authorids": "", + "aff": "Tianjin University; Tianjin University; Nankai University; Sun Yat-sen University; Tianjin University", + "bibtex": "@article{Li_Li_Wang_Ren_Guo_2023, title={Adaptive Texture Filtering for Single-Domain Generalized Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25229}, DOI={10.1609/aaai.v37i2.25229}, abstractNote={Domain generalization in semantic segmentation aims to alleviate the performance degradation on unseen domains through learning domain-invariant features. Existing methods diversify images in the source domain by adding complex or even abnormal textures to reduce the sensitivity to domain-specific features. However, these approaches depends heavily on the richness of the texture bank and training them can be time-consuming. In contrast to importing textures arbitrarily or augmenting styles randomly, we focus on the single source domain itself to achieve the generalization. In this paper, we present a novel adaptive texture filtering mechanism to suppress the influence of texture without using augmentation, thus eliminating the interference of domain-specific features. Further, we design a hierarchical guidance generalization network equipped with structure-guided enhancement modules, which purpose to learn the domain-invariant generalized knowledge. Extensive experiments together with ablation studies on widely-used datasets are conducted to verify the effectiveness of the proposed model, and reveal its superiority over other state-of-the-art alternatives.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xinhui and Li, Mingjia and Wang, Yaxing and Ren, Chuan-Xian and Guo, Xiaojie}, year={2023}, month={Jun.}, pages={1442-1450} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25229/25001", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25229", + "pdf_size": 15719308, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15000809091187622675&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "tju.edu.cn;tju.edu.cn;nankai.edu.cn;mail.sysu.edu.cn;gmail.com", + "email": "tju.edu.cn;tju.edu.cn;nankai.edu.cn;mail.sysu.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Tianjin University;Nankai University;Sun Yat-sen University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.tju.edu.cn;http://www.nankai.edu.cn;http://www.sysu.edu.cn/", + "aff_unique_abbr": "TJU;NKU;SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26559", + "title": "Adjective Scale Probe: Can Language Models Encode Formal Semantics Information?", + "track": "main", + "status": "Technical", + "abstract": "It is an open question what semantic representations transformer-based language models can encode and whether they have access to more abstract aspects of semantic meaning. Here, we propose a diagnostic dataset to investigate how well language models understand the degree semantics of adjectives. In the dataset, referred as the Adjective Scale Probe (ASP), we semi-automatically generate 8 tests of Natural Language Inference (NLI) questions to test 8 key capabilities of adjective interpretation. We apply the ASP dataset to evaluate the performance of 3 language models, i.e., BERT, DeBERTa, and T0. It is found that language models perform below the majority baseline for most tests of the ASP, even when the models have been fine-tuned to achieve high performance on the large-scale MNLI dataset. But after we fine-tune the pre-trained models on a subset of the ASP, DeBERTa can achieve high performance on the untrained adjectives and untrained tests, suggesting that DeBERTa may have captured degree semantic information of adjectives through pre-training but it needs specific training data to learn how to apply such information to the current tasks. In sum, the ASP provides an easy-to-use method to test fine-grained formal semantic properties of adjectives, and reveals language models' abilities to access formal semantic information.", + "primary_area": "speech natural language processing", + "author": "Wei Liu; Ming Xiang; Nai Ding", + "authorids": "", + "aff": "College of Biomedical Engineering and Instrument Sciences, Zhejiang University; Department of Linguistics, The University of Chicago; College of Biomedical Engineering and Instrument Sciences, Zhejiang University", + "bibtex": "@article{Liu_Xiang_Ding_2023, title={Adjective Scale Probe: Can Language Models Encode Formal Semantics Information?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26559}, DOI={10.1609/aaai.v37i11.26559}, abstractNote={It is an open question what semantic representations transformer-based language models can encode and whether they have access to more abstract aspects of semantic meaning. Here, we propose a diagnostic dataset to investigate how well language models understand the degree semantics of adjectives. In the dataset, referred as the Adjective Scale Probe (ASP), we semi-automatically generate 8 tests of Natural Language Inference (NLI) questions to test 8 key capabilities of adjective interpretation. We apply the ASP dataset to evaluate the performance of 3 language models, i.e., BERT, DeBERTa, and T0. It is found that language models perform below the majority baseline for most tests of the ASP, even when the models have been fine-tuned to achieve high performance on the large-scale MNLI dataset. But after we fine-tune the pre-trained models on a subset of the ASP, DeBERTa can achieve high performance on the untrained adjectives and untrained tests, suggesting that DeBERTa may have captured degree semantic information of adjectives through pre-training but it needs specific training data to learn how to apply such information to the current tasks. In sum, the ASP provides an easy-to-use method to test fine-grained formal semantic properties of adjectives, and reveals language models\u2019 abilities to access formal semantic information.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Wei and Xiang, Ming and Ding, Nai}, year={2023}, month={Jun.}, pages={13282-13290} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26559/26331", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26559", + "pdf_size": 1009777, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14839215960238468024&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;uchicago.edu;zju.edu.cn", + "email": "zju.edu.cn;uchicago.edu;zju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Zhejiang University;The University of Chicago", + "aff_unique_dep": "College of Biomedical Engineering and Instrument Sciences;Department of Linguistics", + "aff_unique_url": "http://www.zju.edu.cn;https://www.chicago.edu", + "aff_unique_abbr": "ZJU;UChicago", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26811", + "title": "Advances in AI for Safety, Equity, and Well-Being on Web and Social Media: Detection, Robustness, Attribution, and Mitigation", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "In the talk, I shall describe my lab\u2019s recent advances in AI, applied machine learning, and data mining to combat malicious actors (sockpuppets, ban evaders, etc.) and dangerous content (misinformation, hate, etc.) on web and social media platforms. My vision is to create a trustworthy online ecosystem for everyone and create the next generation of socially-aware methods that promote health, equity, and safety. Broadly, in my research, I have created novel graph, content (NLP, multimodality), and adversarial machine learning methods leveraging terabytes of data to detect, predict, and mitigate online threats. I shall describe the advancements made in my group across four key thrusts: (1) Detection of harmful content and malicious actors across platforms, languages, and modalities, (2) Robustifying detection models against adversarial actors by predicting future malicious activities, (3) Attributing the impact of harmful content and the role of recommender systems, and (4) Developing mitigation techniques to counter misinformation by professionals and the crowd.", + "primary_area": "", + "author": "Srijan Kumar", + "authorids": "", + "aff": "Georgia Institute of Technology", + "bibtex": "@article{Kumar_2024, title={Advances in AI for Safety, Equity, and Well-Being on Web and Social Media: Detection, Robustness, Attribution, and Mitigation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26811}, DOI={10.1609/aaai.v37i13.26811}, abstractNote={In the talk, I shall describe my lab\u2019s recent advances in AI, applied machine learning, and data mining to combat malicious actors (sockpuppets, ban evaders, etc.) and dangerous content (misinformation, hate, etc.) on web and social media platforms. My vision is to create a trustworthy online ecosystem for everyone and create the next generation of socially-aware methods that promote health, equity, and safety. Broadly, in my research, I have created novel graph, content (NLP, multimodality), and adversarial machine learning methods leveraging terabytes of data to detect, predict, and mitigate online threats. I shall describe the advancements made in my group across four key thrusts: (1) Detection of harmful content and malicious actors across platforms, languages, and modalities, (2) Robustifying detection models against adversarial actors by predicting future malicious activities, (3) Attributing the impact of harmful content and the role of recommender systems, and (4) Developing mitigation techniques to counter misinformation by professionals and the crowd.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Srijan}, year={2024}, month={Jul.}, pages={15444-15444} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26811/26583", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26811", + "pdf_size": 47798, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4507282746158092831&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gatech.edu", + "email": "gatech.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25119", + "title": "Adversarial Alignment for Source Free Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Source-free object detection (SFOD) aims to transfer a detector pre-trained on a label-rich source domain to an unlabeled target domain without seeing source data. While most existing SFOD methods generate pseudo labels via a source-pretrained model to guide training, these pseudo labels usually contain high noises due to heavy domain discrepancy. In order to obtain better pseudo supervisions, we divide the target domain into source-similar and source-dissimilar parts and align them in the feature space by adversarial learning.Specifically, we design a detection variance-based criterion to divide the target domain. This criterion is motivated by a finding that larger detection variances denote higher recall and larger similarity to the source domain. Then we incorporate an adversarial module into a mean teacher framework to drive the feature spaces of these two subsets indistinguishable. Extensive experiments on multiple cross-domain object detection datasets demonstrate that our proposed method consistently outperforms the compared SFOD methods. Our implementation is available at https://github.com/ChuQiaosong.", + "primary_area": "computer vision i", + "author": "Qiaosong Chu; Shuyan Li; Guangyi Chen; Kai Li; Xiu Li", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Shenzhen, China+Tsinghua University, Beijing, China; Tsinghua Shenzhen International Graduate School, Shenzhen, China+Tsinghua University, Beijing, China; Carnegie Mellon University, Pittsburgh PA, USA+Mohamed bin Zayed University of Artificial Intelligence, Abu Dhabi, UAE; NEC LABORATORIES AMERICA, INC; Tsinghua Shenzhen International Graduate School, Shenzhen, China+Tsinghua University, Beijing, China", + "bibtex": "@article{Chu_Li_Chen_Li_Li_2023, title={Adversarial Alignment for Source Free Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25119}, DOI={10.1609/aaai.v37i1.25119}, abstractNote={Source-free object detection (SFOD) aims to transfer a detector pre-trained on a label-rich source domain to an unlabeled target domain without seeing source data. While most existing SFOD methods generate pseudo labels via a source-pretrained model to guide training, these pseudo labels usually contain high noises due to heavy domain discrepancy. In order to obtain better pseudo supervisions, we divide the target domain into source-similar and source-dissimilar parts and align them in the feature space by adversarial learning.Specifically, we design a detection variance-based criterion to divide the target domain. This criterion is motivated by a finding that larger detection variances denote higher recall and larger similarity to the source domain. Then we incorporate an adversarial module into a mean teacher framework to drive the feature spaces of these two subsets indistinguishable. Extensive experiments on multiple cross-domain object detection datasets demonstrate that our proposed method consistently outperforms the compared SFOD methods. Our implementation is available at https://github.com/ChuQiaosong.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chu, Qiaosong and Li, Shuyan and Chen, Guangyi and Li, Kai and Li, Xiu}, year={2023}, month={Jun.}, pages={452-460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25119/24891", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25119", + "pdf_size": 3312654, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3777004265512378062&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.tsinghua.edu.cn;cam.ac.uk;gmail.com;gmail.com;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;cam.ac.uk;gmail.com;gmail.com;sz.tsinghua.edu.cn", + "github": "https://github.com/ChuQiaosong", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1+2;3;0+0", + "aff_unique_norm": "Tsinghua University;Carnegie Mellon University;Mohamed bin Zayed University of Artificial Intelligence;NEC Laboratories America, Inc.", + "aff_unique_dep": "International Graduate School;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.cmu.edu;https://www.mbzuai.ac.ae;https://www.nec-labs.com", + "aff_unique_abbr": "THU;CMU;MBZUAI;NEC Labs America", + "aff_campus_unique_index": "0+1;0+1;2+3;0+1", + "aff_campus_unique": "Shenzhen;Beijing;Pittsburgh;Abu Dhabi;", + "aff_country_unique_index": "0+0;0+0;1+2;1;0+0", + "aff_country_unique": "China;United States;United Arab Emirates" + }, + { + "id": "article-26009", + "title": "Adversarial Robust Deep Reinforcement Learning Requires Redefining Robustness", + "track": "main", + "status": "Technical", + "abstract": "Learning from raw high dimensional data via interaction with a given environment has been effectively achieved through the utilization of deep neural networks. Yet the observed degradation in policy performance caused by imperceptible worst-case policy dependent translations along high sensitivity directions (i.e. adversarial perturbations) raises concerns on the robustness of deep reinforcement learning policies. In our paper, we show that these high sensitivity directions do not lie only along particular worst-case directions, but rather are more abundant in the deep neural policy landscape and can be found via more natural means in a black-box setting. Furthermore, we show that vanilla training techniques intriguingly result in learning more robust policies compared to the policies learnt via the state-of-the-art adversarial training techniques. We believe our work lays out intriguing properties of the deep reinforcement learning policy manifold and our results can help to build robust and generalizable deep reinforcement learning policies.", + "primary_area": "machine learning ii", + "author": "Ezgi Korkmaz", + "authorids": "", + "aff": "", + "bibtex": "@article{Korkmaz_2023, title={Adversarial Robust Deep Reinforcement Learning Requires Redefining Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26009}, DOI={10.1609/aaai.v37i7.26009}, abstractNote={Learning from raw high dimensional data via interaction with a given environment has been effectively achieved through the utilization of deep neural networks. Yet the observed degradation in policy performance caused by imperceptible worst-case policy dependent translations along high sensitivity directions (i.e. adversarial perturbations) raises concerns on the robustness of deep reinforcement learning policies. In our paper, we show that these high sensitivity directions do not lie only along particular worst-case directions, but rather are more abundant in the deep neural policy landscape and can be found via more natural means in a black-box setting. Furthermore, we show that vanilla training techniques intriguingly result in learning more robust policies compared to the policies learnt via the state-of-the-art adversarial training techniques. We believe our work lays out intriguing properties of the deep reinforcement learning policy manifold and our results can help to build robust and generalizable deep reinforcement learning policies.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Korkmaz, Ezgi}, year={2023}, month={Jun.}, pages={8369-8377} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26009/25781", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26009", + "pdf_size": 10624944, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2029852540851972984&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;cam.ac.uk;gmail.com;gmail.com;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;cam.ac.uk;gmail.com;gmail.com;sz.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 1 + }, + { + "id": "article-26608", + "title": "Adversarial Self-Attention for Language Understanding", + "track": "main", + "status": "Technical", + "abstract": "Deep neural models (e.g. Transformer) naturally learn spurious features, which create a ``shortcut'' between the labels and inputs, thus impairing the generalization and robustness. This paper advances self-attention mechanism to its robust variant for Transformer-based pre-trained language models (e.g. BERT). We propose Adversarial Self-Attention mechanism (ASA), which adversarially biases the attentions to effectively suppress the model reliance on features (e.g. specific keywords) and encourage its exploration of broader semantics. We conduct comprehensive evaluation across a wide range of tasks for both pre-training and fine-tuning stages. For pre-training, ASA unfolds remarkable performance gain compared to naive training for longer steps. For fine-tuning, ASA-empowered models outweigh naive models by a large margin considering both generalization and robustness.", + "primary_area": "speech natural language processing", + "author": "Hongqiu Wu; Ruixue Ding; Hai Zhao; Pengjun Xie; Fei Huang; Min Zhang", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Damo Academy, Alibaba Group; Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Damo Academy, Alibaba Group; Damo Academy, Alibaba Group; School of Computer Science and Technology, Soochow University", + "bibtex": "@article{Wu_Ding_Zhao_Xie_Huang_Zhang_2023, title={Adversarial Self-Attention for Language Understanding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26608}, DOI={10.1609/aaai.v37i11.26608}, abstractNote={Deep neural models (e.g. Transformer) naturally learn spurious features, which create a ``shortcut\u2019\u2019 between the labels and inputs, thus impairing the generalization and robustness. This paper advances self-attention mechanism to its robust variant for Transformer-based pre-trained language models (e.g. BERT). We propose Adversarial Self-Attention mechanism (ASA), which adversarially biases the attentions to effectively suppress the model reliance on features (e.g. specific keywords) and encourage its exploration of broader semantics. We conduct comprehensive evaluation across a wide range of tasks for both pre-training and fine-tuning stages. For pre-training, ASA unfolds remarkable performance gain compared to naive training for longer steps. For fine-tuning, ASA-empowered models outweigh naive models by a large margin considering both generalization and robustness.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Hongqiu and Ding, Ruixue and Zhao, Hai and Xie, Pengjun and Huang, Fei and Zhang, Min}, year={2023}, month={Jun.}, pages={13727-13735} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26608/26380", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26608", + "pdf_size": 1745067, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11043738160002446562&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;alibaba-inc.com;cs.sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "email": "sjtu.edu.cn;alibaba-inc.com;cs.sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "github": "https://github.com/gingasan/adversarialSA", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1;0+0;1;1;2", + "aff_unique_norm": "Shanghai Jiao Tong University;Alibaba Group;Soochow University", + "aff_unique_dep": "Department of Computer Science and Engineering;Damo Academy;School of Computer Science and Technology", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.alibaba-group.com;https://eng.suda.edu.cn/", + "aff_unique_abbr": "SJTU;Alibaba;Soochow U", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26239", + "title": "Adversarial Weight Perturbation Improves Generalization in Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "A lot of theoretical and empirical evidence shows that the flatter local minima tend to improve generalization. Adversarial Weight Perturbation (AWP) is an emerging technique to efficiently and effectively find such minima. In AMP we minimize the loss w.r.t. a bounded worst-case perturbation of the model parameters thereby favoring local minima with a small loss in a neighborhood around them.\nThe benefits of AWP, and more generally the connections between flatness and generalization, have been extensively studied for i.i.d. data such as images. In this paper, we extensively study this phenomenon for graph data. Along the way, we first derive a generalization bound for non-i.i.d. node classification tasks. Then we identify a vanishing-gradient issue with all existing formulations of AWP and we propose a new Weighted Truncated AWP (WT-AWP) to alleviate this issue. We show that regularizing graph neural networks with WT-AWP consistently improves both natural and robust generalization across many different graph learning tasks and models.", + "primary_area": "machine learning iv", + "author": "Yihan Wu; Aleksandar Bojchevski; Heng Huang", + "authorids": "", + "aff": "Electrical and Computer Engineering, University of Pittsburgh, PA, USA; CISPA Helmholtz Center for Information Security; Electrical and Computer Engineering, University of Pittsburgh, PA, USA", + "bibtex": "@article{Wu_Bojchevski_Huang_2023, title={Adversarial Weight Perturbation Improves Generalization in Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26239}, DOI={10.1609/aaai.v37i9.26239}, abstractNote={A lot of theoretical and empirical evidence shows that the flatter local minima tend to improve generalization. Adversarial Weight Perturbation (AWP) is an emerging technique to efficiently and effectively find such minima. In AMP we minimize the loss w.r.t. a bounded worst-case perturbation of the model parameters thereby favoring local minima with a small loss in a neighborhood around them.\nThe benefits of AWP, and more generally the connections between flatness and generalization, have been extensively studied for i.i.d. data such as images. In this paper, we extensively study this phenomenon for graph data. Along the way, we first derive a generalization bound for non-i.i.d. node classification tasks. Then we identify a vanishing-gradient issue with all existing formulations of AWP and we propose a new Weighted Truncated AWP (WT-AWP) to alleviate this issue. We show that regularizing graph neural networks with WT-AWP consistently improves both natural and robust generalization across many different graph learning tasks and models.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yihan and Bojchevski, Aleksandar and Huang, Heng}, year={2023}, month={Jun.}, pages={10417-10425} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26239/26011", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26239", + "pdf_size": 285958, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10319622101226258401&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "pitt.edu;cispa.de;gmail.com", + "email": "pitt.edu;cispa.de;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Pittsburgh;CISPA Helmholtz Center for Information Security", + "aff_unique_dep": "Electrical and Computer Engineering;", + "aff_unique_url": "https://www.pitt.edu;https://www.cispa.de/", + "aff_unique_abbr": "Pitt;CISPA", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pittsburgh;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "article-26486", + "title": "Adversarial Word Dilution as Text Data Augmentation in Low-Resource Regime", + "track": "main", + "status": "Technical", + "abstract": "Data augmentation is widely used in text classification, especially in the low-resource regime where a few examples for each class are available during training. Despite the success, generating data augmentations as hard positive examples that may increase their effectiveness is under-explored. This paper proposes an Adversarial Word Dilution (AWD) method that can generate hard positive examples as text data augmentations to train the low-resource text classification model efficiently. Our idea of augmenting the text data is to dilute the embedding of strong positive words by weighted mixing with unknown-word embedding, making the augmented inputs hard to be recognized as positive by the classification model. We adversarially learn the dilution weights through a constrained min-max optimization process with the guidance of the labels. Empirical studies on three benchmark datasets show that AWD can generate more effective data augmentations and outperform the state-of-the-art text data augmentation methods. The additional analysis demonstrates that the data augmentations generated by AWD are interpretable and can flexibly extend to new examples without further training.", + "primary_area": "speech natural language processing", + "author": "Junfan Chen; Richong Zhang; Zheyan Luo; Chunming Hu; Yongyi Mao", + "authorids": "", + "aff": "SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; School of Electrical Engineering and Computer Science, University of Ottawa, Ottawa, Canada", + "bibtex": "@article{Chen_Zhang_Luo_Hu_Mao_2023, title={Adversarial Word Dilution as Text Data Augmentation in Low-Resource Regime}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26486}, DOI={10.1609/aaai.v37i11.26486}, abstractNote={Data augmentation is widely used in text classification, especially in the low-resource regime where a few examples for each class are available during training. Despite the success, generating data augmentations as hard positive examples that may increase their effectiveness is under-explored. This paper proposes an Adversarial Word Dilution (AWD) method that can generate hard positive examples as text data augmentations to train the low-resource text classification model efficiently. Our idea of augmenting the text data is to dilute the embedding of strong positive words by weighted mixing with unknown-word embedding, making the augmented inputs hard to be recognized as positive by the classification model. We adversarially learn the dilution weights through a constrained min-max optimization process with the guidance of the labels. Empirical studies on three benchmark datasets show that AWD can generate more effective data augmentations and outperform the state-of-the-art text data augmentation methods. The additional analysis demonstrates that the data augmentations generated by AWD are interpretable and can flexibly extend to new examples without further training.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Junfan and Zhang, Richong and Luo, Zheyan and Hu, Chunming and Mao, Yongyi}, year={2023}, month={Jun.}, pages={12626-12634} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26486/26258", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26486", + "pdf_size": 1201703, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9082243377797669438&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0+1;2", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Ottawa", + "aff_unique_dep": "School of Computer Science and Engineering;;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.uottawa.ca", + "aff_unique_abbr": "BUAA;;U Ottawa", + "aff_campus_unique_index": "0;0;0;0;2", + "aff_campus_unique": "Beijing;;Ottawa", + "aff_country_unique_index": "0;0+0;0;0+0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-25485", + "title": "Aesthetically Relevant Image Captioning", + "track": "main", + "status": "Technical", + "abstract": "Image aesthetic quality assessment (AQA) aims to assign numerical aesthetic ratings to images whilst image aesthetic captioning (IAC) aims to generate textual descriptions of the aesthetic aspects of images. In this paper, we study image AQA and IAC together and present a new IAC method termed Aesthetically Relevant Image Captioning (ARIC). Based on the observation that most textual comments of an image are about objects and their interactions rather than aspects of aesthetics, we first introduce the concept of Aesthetic Relevance Score (ARS) of a sentence and have developed a model to automatically label a sentence with its ARS. We then use the ARS to design the ARIC model which includes an ARS weighted IAC loss function and an ARS based diverse aesthetic caption selector (DACS). We present extensive experimental results to show the soundness of the ARS concept and the effectiveness of the ARIC model by demonstrating that texts with higher ARS\u2019s can predict the aesthetic ratings more accurately and that the new ARIC model can generate more accurate, aesthetically more relevant and more diverse image captions. Furthermore, a large new research database containing 510K images with over 5 million comments and 350K aesthetic scores, and code for implementing ARIC, are available at https://github.com/PengZai/ARIC", + "primary_area": "computer vision iii", + "author": "Zhipeng Zhong; Fei Zhou; Guoping Qiu", + "authorids": "", + "aff": "College of Electronics and Information Engineering, Shenzhen University, China+Peng Cheng National Laboratory, Shenzhen, China+Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen, China+Shenzhen Institute for Artificial Intelligence and Robotics for Society, China+Guangdong-Hong Kong Joint Laboratory for Big Data Imaging and Communication, Shenzhen, China; College of Electronics and Information Engineering, Shenzhen University, China+Peng Cheng National Laboratory, Shenzhen, China+Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen, China+Shenzhen Institute for Artificial Intelligence and Robotics for Society, China+Guangdong-Hong Kong Joint Laboratory for Big Data Imaging and Communication, Shenzhen, China; School of Computer Science, The University of Nottingham, UK", + "bibtex": "@article{Zhong_Zhou_Qiu_2023, title={Aesthetically Relevant Image Captioning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25485}, DOI={10.1609/aaai.v37i3.25485}, abstractNote={Image aesthetic quality assessment (AQA) aims to assign numerical aesthetic ratings to images whilst image aesthetic captioning (IAC) aims to generate textual descriptions of the aesthetic aspects of images. In this paper, we study image AQA and IAC together and present a new IAC method termed Aesthetically Relevant Image Captioning (ARIC). Based on the observation that most textual comments of an image are about objects and their interactions rather than aspects of aesthetics, we first introduce the concept of Aesthetic Relevance Score (ARS) of a sentence and have developed a model to automatically label a sentence with its ARS. We then use the ARS to design the ARIC model which includes an ARS weighted IAC loss function and an ARS based diverse aesthetic caption selector (DACS). We present extensive experimental results to show the soundness of the ARS concept and the effectiveness of the ARIC model by demonstrating that texts with higher ARS\u2019s can predict the aesthetic ratings more accurately and that the new ARIC model can generate more accurate, aesthetically more relevant and more diverse image captions. Furthermore, a large new research database containing 510K images with over 5 million comments and 350K aesthetic scores, and code for implementing ARIC, are available at https://github.com/PengZai/ARIC}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Zhipeng and Zhou, Fei and Qiu, Guoping}, year={2023}, month={Jun.}, pages={3733-3741} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25485/25257", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25485", + "pdf_size": 2469274, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3425345625554687422&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "; ;nottingham.ac.uk", + "email": "; ;nottingham.ac.uk", + "github": "https://github.com/PengZai/ARIC", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+2+3+4;0+1+2+3+4;5", + "aff_unique_norm": "Shenzhen University;Peng Cheng National Laboratory;Guangdong Key Laboratory of Intelligent Information Processing;Shenzhen Institute for Artificial Intelligence and Robotics for Society;Guangdong-Hong Kong Joint Laboratory for Big Data Imaging and Communication;The University of Nottingham", + "aff_unique_dep": "College of Electronics and Information Engineering;;;;;School of Computer Science", + "aff_unique_url": "http://www.szu.edu.cn/;;;;;https://www.nottingham.ac.uk", + "aff_unique_abbr": "SZU;;;;;Nottingham", + "aff_campus_unique_index": "0+0+0+0;0+0+0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0+0+0+0;0+0+0+0+0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26676", + "title": "AirFormer: Predicting Nationwide Air Quality in China with Transformers", + "track": "aaai special track", + "status": "Technical", + "abstract": "Air pollution is a crucial issue affecting human health and livelihoods, as well as one of the barriers to economic growth. Forecasting air quality has become an increasingly important endeavor with significant social impacts, especially in emerging countries. In this paper, we present a novel Transformer termed AirFormer to predict nationwide air quality in China, with an unprecedented fine spatial granularity covering thousands of locations. AirFormer decouples the learning process into two stages: 1) a bottom-up deterministic stage that contains two new types of self-attention mechanisms to efficiently learn spatio-temporal representations; 2) a top-down stochastic stage with latent variables to capture the intrinsic uncertainty of air quality data. We evaluate AirFormer with 4-year data from 1,085 stations in Chinese Mainland. Compared to prior models, AirFormer reduces prediction errors by 5%\u223c8% on 72-hour future predictions. Our source code is available at https://github.com/yoshall/airformer.", + "primary_area": "ai for social impact", + "author": "Yuxuan Liang; Yutong Xia; Songyu Ke; Yiwei Wang; Qingsong Wen; Junbo Zhang; Yu Zheng; Roger Zimmermann", + "authorids": "", + "aff": "National University of Singapore, Singapore; National University of Singapore, Singapore; Shanghai Jiao Tong University, Shanghai, China+JD Intelligent Cities Research & JD iCity, JD Technology, Beijing, China; National University of Singapore, Singapore; DAMO Academy, Alibaba Group, Hangzhou, China; JD Intelligent Cities Research & JD iCity, JD Technology, Beijing, China; JD Intelligent Cities Research & JD iCity, JD Technology, Beijing, China; National University of Singapore, Singapore", + "bibtex": "@article{Liang_Xia_Ke_Wang_Wen_Zhang_Zheng_Zimmermann_2023, title={AirFormer: Predicting Nationwide Air Quality in China with Transformers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26676}, DOI={10.1609/aaai.v37i12.26676}, abstractNote={Air pollution is a crucial issue affecting human health and livelihoods, as well as one of the barriers to economic growth. Forecasting air quality has become an increasingly important endeavor with significant social impacts, especially in emerging countries. In this paper, we present a novel Transformer termed AirFormer to predict nationwide air quality in China, with an unprecedented fine spatial granularity covering thousands of locations. AirFormer decouples the learning process into two stages: 1) a bottom-up deterministic stage that contains two new types of self-attention mechanisms to efficiently learn spatio-temporal representations; 2) a top-down stochastic stage with latent variables to capture the intrinsic uncertainty of air quality data. We evaluate AirFormer with 4-year data from 1,085 stations in Chinese Mainland. Compared to prior models, AirFormer reduces prediction errors by 5%\u223c8% on 72-hour future predictions. Our source code is available at https://github.com/yoshall/airformer.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Yuxuan and Xia, Yutong and Ke, Songyu and Wang, Yiwei and Wen, Qingsong and Zhang, Junbo and Zheng, Yu and Zimmermann, Roger}, year={2023}, month={Jun.}, pages={14329-14337} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26676/26448", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26676", + "pdf_size": 6087737, + "gs_citation": 128, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16806127658073955769&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "outlook.com;gmail.com;outlook.com;comp.nus.edu.sg;gmail.com;outlook.com;outlook.com;comp.nus.edu.sg", + "email": "outlook.com;gmail.com;outlook.com;comp.nus.edu.sg;gmail.com;outlook.com;outlook.com;comp.nus.edu.sg", + "github": "https://github.com/yoshall/airformer", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1+2;0;3;2;2;0", + "aff_unique_norm": "National University of Singapore;Shanghai Jiao Tong University;JD Technology;Alibaba Group", + "aff_unique_dep": ";;JD Intelligent Cities Research & JD iCity;DAMO Academy", + "aff_unique_url": "https://www.nus.edu.sg;https://www.sjtu.edu.cn;https://www.jd.com;https://www.alibaba.com", + "aff_unique_abbr": "NUS;SJTU;JD Tech;Alibaba", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Shanghai;Hangzhou", + "aff_country_unique_index": "0;0;1+1;0;1;1;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25357", + "title": "Alignment-Enriched Tuning for Patch-Level Pre-trained Document Image Models", + "track": "main", + "status": "Technical", + "abstract": "Alignment between image and text has shown promising improvements on patch-level pre-trained document image models. However, investigating more effective or finer-grained alignment techniques during pre-training requires a large amount of computation cost and time. Thus, a question naturally arises: Could we fine-tune the pre-trained models adaptive to downstream tasks with alignment objectives and achieve comparable or better performance? In this paper, we propose a new model architecture with alignment-enriched tuning (dubbed AETNet) upon pre-trained document image models, to adapt downstream tasks with the joint task-specific supervised and alignment-aware contrastive objective. Specifically, we introduce an extra visual transformer as the alignment-ware image encoder and an extra text transformer as the alignment-ware text encoder before multimodal fusion. We consider alignment in the following three aspects: 1) document-level alignment by leveraging the cross-modal and intra-modal contrastive loss; 2) global-local alignment for modeling localized and structural information in document images; and 3) local-level alignment for more accurate patch-level information. Experiments on various downstream tasks show that AETNet can achieve state-of-the-art performance on various downstream tasks. Notably, AETNet consistently outperforms state-of-the-art pre-trained models, such as LayoutLMv3 with fine-tuning techniques, on three different downstream tasks. Code is available at https://github.com/MAEHCM/AET.", + "primary_area": "computer vision ii", + "author": "Lei Wang; Jiabang He; Xing Xu; Ning Liu; Hui Liu", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of Electronic Science and Technology of China, China+Singapore Management University, Singapore; School of Computer Science and Engineering, University of Electronic Science and Technology of China, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, China; Beijing Forestry University, China; Beijing Rongda Technology Co., Ltd., China", + "bibtex": "@article{Wang_He_Xu_Liu_Liu_2023, title={Alignment-Enriched Tuning for Patch-Level Pre-trained Document Image Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25357}, DOI={10.1609/aaai.v37i2.25357}, abstractNote={Alignment between image and text has shown promising improvements on patch-level pre-trained document image models. However, investigating more effective or finer-grained alignment techniques during pre-training requires a large amount of computation cost and time. Thus, a question naturally arises: Could we fine-tune the pre-trained models adaptive to downstream tasks with alignment objectives and achieve comparable or better performance? In this paper, we propose a new model architecture with alignment-enriched tuning (dubbed AETNet) upon pre-trained document image models, to adapt downstream tasks with the joint task-specific supervised and alignment-aware contrastive objective. Specifically, we introduce an extra visual transformer as the alignment-ware image encoder and an extra text transformer as the alignment-ware text encoder before multimodal fusion. We consider alignment in the following three aspects: 1) document-level alignment by leveraging the cross-modal and intra-modal contrastive loss; 2) global-local alignment for modeling localized and structural information in document images; and 3) local-level alignment for more accurate patch-level information. Experiments on various downstream tasks show that AETNet can achieve state-of-the-art performance on various downstream tasks. Notably, AETNet consistently outperforms state-of-the-art pre-trained models, such as LayoutLMv3 with fine-tuning techniques, on three different downstream tasks. Code is available at https://github.com/MAEHCM/AET.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Lei and He, Jiabang and Xu, Xing and Liu, Ning and Liu, Hui}, year={2023}, month={Jun.}, pages={2590-2598} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25357/25129", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25357", + "pdf_size": 486374, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10799862627285835731&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;outlook.com;uestc.edu.cn;bjfu.edu.cn;gmail.com", + "email": "gmail.com;outlook.com;uestc.edu.cn;bjfu.edu.cn;gmail.com", + "github": "https://github.com/MAEHCM/AET", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;2;3", + "aff_unique_norm": "University of Electronic Science and Technology of China;Singapore Management University;Beijing Forestry University;Beijing Rongda Technology Co., Ltd.", + "aff_unique_dep": "School of Computer Science and Engineering;;;", + "aff_unique_url": "http://www.uestc.edu.cn;https://www.smu.edu.sg;https://www.bfu.edu.cn;", + "aff_unique_abbr": "UESTC;SMU;BFU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26010", + "title": "Almost Cost-Free Communication in Federated Best Arm Identification", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of best arm identification in a federated learning multi-armed bandit setup with a central server and multiple clients. Each client is associated with a multi-armed bandit in which each arm yields i.i.d. rewards following a Gaussian distribution with an unknown mean and known variance. The set of arms is assumed to be the same at all the clients. We define two notions of best arm local and global. The local best arm at a client is the arm with the largest mean among the arms local to the client, whereas the global best arm is the arm with the largest average mean across all the clients. We assume that each client can only observe the rewards from its local arms and thereby estimate its local best arm. The clients communicate with a central server on uplinks that entail a cost of C>=0 units per usage per uplink. The global best arm is estimated at the server. The goal is to identify the local best arms and the global best arm with minimal total cost, defined as the sum of the total number of arm selections at all the clients and the total communication cost, subject to an upper bound on the error probability. We propose a novel algorithm FedElim that is based on successive elimination and communicates only in exponential time steps and obtain a high probability instance-dependent upper bound on its total cost. The key takeaway from our paper is that for any C>=0 and error probabilities sufficiently small, the total number of arm selections (resp. the total cost) under FedElim is at most 2 (resp. 3) times the maximum total number of arm selections under its variant that communicates in every time step. Additionally, we show that the latter is optimal in expectation up to a constant factor, thereby demonstrating that communication is almost cost-free in FedElim. We numerically validate the efficacy of FedElim on two synthetic datasets and the MovieLens dataset.", + "primary_area": "machine learning ii", + "author": "Srinivas Reddy Kota; P. N. Karthik; Vincent Y. F. Tan", + "authorids": "", + "aff": "National University of Singapore; National University of Singapore; National University of Singapore", + "bibtex": "@article{Kota_Karthik_Tan_2023, title={Almost Cost-Free Communication in Federated Best Arm Identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26010}, DOI={10.1609/aaai.v37i7.26010}, abstractNote={We study the problem of best arm identification in a federated learning multi-armed bandit setup with a central server and multiple clients. Each client is associated with a multi-armed bandit in which each arm yields i.i.d. rewards following a Gaussian distribution with an unknown mean and known variance. The set of arms is assumed to be the same at all the clients. We define two notions of best arm local and global. The local best arm at a client is the arm with the largest mean among the arms local to the client, whereas the global best arm is the arm with the largest average mean across all the clients. We assume that each client can only observe the rewards from its local arms and thereby estimate its local best arm. The clients communicate with a central server on uplinks that entail a cost of C>=0 units per usage per uplink. The global best arm is estimated at the server. The goal is to identify the local best arms and the global best arm with minimal total cost, defined as the sum of the total number of arm selections at all the clients and the total communication cost, subject to an upper bound on the error probability. We propose a novel algorithm FedElim that is based on successive elimination and communicates only in exponential time steps and obtain a high probability instance-dependent upper bound on its total cost. The key takeaway from our paper is that for any C>=0 and error probabilities sufficiently small, the total number of arm selections (resp. the total cost) under FedElim is at most 2 (resp. 3) times the maximum total number of arm selections under its variant that communicates in every time step. Additionally, we show that the latter is optimal in expectation up to a constant factor, thereby demonstrating that communication is almost cost-free in FedElim. We numerically validate the efficacy of FedElim on two synthetic datasets and the MovieLens dataset.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kota, Srinivas Reddy and Karthik, P. N. and Tan, Vincent Y. F.}, year={2023}, month={Jun.}, pages={8378-8385} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26010/25782", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26010", + "pdf_size": 552730, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2018743268729641257&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;nus.edu.sg;nus.edu.sg", + "email": "gmail.com;nus.edu.sg;nus.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26422", + "title": "AlphaRoute: Large-Scale Coordinated Route Planning via Monte Carlo Tree Search", + "track": "main", + "status": "Technical", + "abstract": "This paper proposes AlphaRoute, an AlphaGo inspired algorithm for coordinating large-scale routes, built upon graph attention reinforcement learning and Monte Carlo Tree Search (MCTS). We first partition the road network into regions and model large-scale coordinated route planning as a Markov game, where each partitioned region is treated as a player instead of each driver. Then, AlphaRoute applies a bilevel optimization framework, consisting of several region planners and a global planner, where the region planner coordinates the route choices for vehicles located in the region and generates several strategies, and the global planner evaluates the combination of strategies. \n\tAlphaRoute is built on graph attention network for evaluating each state and MCTS algorithm for dynamically visiting and simulating the future state for narrowing down the search space. AlphaRoute is capable of 1) bridging user fairness and system efficiency, 2) achieving higher search efficiency by alleviating the curse of dimensionality problems, and 3) making an effective and informed route planning by simulating over the future to capture traffic dynamics. \n\tComprehensive experiments are conducted on two real-world road networks as compared with several baselines to evaluate the performance, and results show that AlphaRoute achieves the lowest travel time, and is efficient and effective for coordinating large-scale routes and alleviating the traffic congestion problem. The code will be publicly available.", + "primary_area": "planning routing and scheduling", + "author": "Guiyang Luo; Yantao Wang; Hui Zhang; Quan Yuan; Jinglin Li", + "authorids": "", + "aff": "State Key Laboratory Of Networking And Switching Technology (Beijing University of Posts and Telecommunications) + State Key Laboratory of Integrated Services Networks (Xidian University); State Key Laboratory Of Networking And Switching Technology (Beijing University of Posts and Telecommunications) + State Key Laboratory of Integrated Services Networks (Xidian University); Beijing Jiaotong University, Beijing, China; State Key Laboratory Of Networking And Switching Technology (Beijing University of Posts and Telecommunications) + State Key Laboratory of Integrated Services Networks (Xidian University); State Key Laboratory Of Networking And Switching Technology (Beijing University of Posts and Telecommunications)", + "bibtex": "@article{Luo_Wang_Zhang_Yuan_Li_2023, title={AlphaRoute: Large-Scale Coordinated Route Planning via Monte Carlo Tree Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26422}, DOI={10.1609/aaai.v37i10.26422}, abstractNote={This paper proposes AlphaRoute, an AlphaGo inspired algorithm for coordinating large-scale routes, built upon graph attention reinforcement learning and Monte Carlo Tree Search (MCTS). We first partition the road network into regions and model large-scale coordinated route planning as a Markov game, where each partitioned region is treated as a player instead of each driver. Then, AlphaRoute applies a bilevel optimization framework, consisting of several region planners and a global planner, where the region planner coordinates the route choices for vehicles located in the region and generates several strategies, and the global planner evaluates the combination of strategies. AlphaRoute is built on graph attention network for evaluating each state and MCTS algorithm for dynamically visiting and simulating the future state for narrowing down the search space. AlphaRoute is capable of 1) bridging user fairness and system efficiency, 2) achieving higher search efficiency by alleviating the curse of dimensionality problems, and 3) making an effective and informed route planning by simulating over the future to capture traffic dynamics. Comprehensive experiments are conducted on two real-world road networks as compared with several baselines to evaluate the performance, and results show that AlphaRoute achieves the lowest travel time, and is efficient and effective for coordinating large-scale routes and alleviating the traffic congestion problem. The code will be publicly available.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Guiyang and Wang, Yantao and Zhang, Hui and Yuan, Quan and Li, Jinglin}, year={2023}, month={Jun.}, pages={12058-12067} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26422/26194", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26422", + "pdf_size": 4887179, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14831981202586575894&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bjtu.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bjtu.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0+1;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Xidian University;Beijing Jiaotong University", + "aff_unique_dep": "State Key Laboratory Of Networking And Switching Technology;State Key Laboratory of Integrated Services Networks;", + "aff_unique_url": "http://www.bupt.edu.cn/;http://www.xidian.edu.cn/;http://www.bjtu.edu.cn", + "aff_unique_abbr": "BUPT;Xidian;BJTU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26962", + "title": "AlphaSnake: Policy Iteration on a Nondeterministic NP-Hard Markov Decision Process (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Reinforcement learning has been used to approach well-known NP-hard combinatorial problems in graph theory. Among these, Hamiltonian cycle problems are exceptionally difficult to analyze, even when restricted to individual instances of structurally complex graphs. In this paper, we use Monte Carlo Tree Search (MCTS), the search algorithm behind many state-of-the-art reinforcement learning algorithms such as AlphaZero, to create autonomous agents that learn to play the game of Snake, a game centered on properties of Hamiltonian cycles on grid graphs. The game of Snake can be formulated as a single-player discounted Markov Decision Process (MDP), where the agent must behave optimally in a stochastic environment. Determining the optimal policy for Snake, defined as the policy that maximizes the probability of winning -- or win rate -- with higher priority and minimizes the expected number of time steps to win with lower priority, is conjectured to be NP-hard. Performance-wise, compared to prior work in the Snake game, our algorithm is the first to achieve a win rate over 0.5 (a uniform random policy achieves a win rate < 2.57 x 10^{-15}), demonstrating the versatility of AlphaZero in tackling NP-hard problems.", + "primary_area": "", + "author": "Kevin Du; Ian Gemp; Yi Wu; Yingying Wu", + "authorids": "", + "aff": "Harvard University; DeepMind; Institute for Interdisciplinary Information Sciences, Tsinghua University; Center of Mathematical Sciences and Applications, Harvard University + University of Houston, Department of Mathematics", + "bibtex": "@article{Du_Gemp_Wu_Wu_2024, title={AlphaSnake: Policy Iteration on a Nondeterministic NP-Hard Markov Decision Process (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26962}, DOI={10.1609/aaai.v37i13.26962}, abstractNote={Reinforcement learning has been used to approach well-known NP-hard combinatorial problems in graph theory. Among these, Hamiltonian cycle problems are exceptionally difficult to analyze, even when restricted to individual instances of structurally complex graphs. In this paper, we use Monte Carlo Tree Search (MCTS), the search algorithm behind many state-of-the-art reinforcement learning algorithms such as AlphaZero, to create autonomous agents that learn to play the game of Snake, a game centered on properties of Hamiltonian cycles on grid graphs. The game of Snake can be formulated as a single-player discounted Markov Decision Process (MDP), where the agent must behave optimally in a stochastic environment. Determining the optimal policy for Snake, defined as the policy that maximizes the probability of winning -- or win rate -- with higher priority and minimizes the expected number of time steps to win with lower priority, is conjectured to be NP-hard. Performance-wise, compared to prior work in the Snake game, our algorithm is the first to achieve a win rate over 0.5 (a uniform random policy achieves a win rate < 2.57 x 10^{-15}), demonstrating the versatility of AlphaZero in tackling NP-hard problems.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Kevin and Gemp, Ian and Wu, Yi and Wu, Yingying}, year={2024}, month={Jul.}, pages={16204-16205} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26962/26734", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26962", + "pdf_size": 190273, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:ua9isrdvNoEJ:scholar.google.com/&scioq=AlphaSnake:+Policy+Iteration+on+a+Nondeterministic+NP-Hard+Markov+Decision+Process+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "uh.edu; ; ; ", + "email": "uh.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0+3", + "aff_unique_norm": "Harvard University;DeepMind;Tsinghua University;University of Houston", + "aff_unique_dep": ";;Institute for Interdisciplinary Information Sciences;Department of Mathematics", + "aff_unique_url": "https://www.harvard.edu;https://deepmind.com;https://www.tsinghua.edu.cn;https://www.uh.edu", + "aff_unique_abbr": "Harvard;DeepMind;Tsinghua;UH", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1;2;0+0", + "aff_country_unique": "United States;United Kingdom;China" + }, + { + "id": "article-25830", + "title": "Alternating Layered Variational Quantum Circuits Can Be Classically Optimized Efficiently Using Classical Shadows", + "track": "main", + "status": "Technical", + "abstract": "Variational quantum algorithms (VQAs) are the quantum analog of classical neural networks (NNs). A VQA consists of a parameterized quantum circuit (PQC) which is composed of multiple layers of ansatzes (simpler PQCs, which are an analogy of NN layers) that differ only in selections of parameters. Previous work has identified the alternating layered ansatz as potentially a new standard ansatz in near-term quantum computing. Indeed, shallow alternating layered VQAs are easy to implement and have been shown to be both trainable and expressive. In this work, we introduce a training algorithm with an exponential reduction in training cost of such VQAs. Moreover, our algorithm uses classical shadows of quantum input data, and can hence be run on a classical computer with rigorous performance guarantees. We demonstrate 2-3 orders of magnitude improvement in the training cost using our algorithm for the example problems of finding state preparation circuits and the quantum autoencoder.", + "primary_area": "machine learning i", + "author": "Afrad Basheer; Yuan Feng; Christopher Ferrie; Sanjiang Li", + "authorids": "", + "aff": "Centre for Quantum Software and Information, University of Technology Sydney, NSW 2007, Australia; Centre for Quantum Software and Information, University of Technology Sydney, NSW 2007, Australia; Centre for Quantum Software and Information, University of Technology Sydney, NSW 2007, Australia; Centre for Quantum Software and Information, University of Technology Sydney, NSW 2007, Australia", + "bibtex": "@article{Basheer_Feng_Ferrie_Li_2023, title={Alternating Layered Variational Quantum Circuits Can Be Classically Optimized Efficiently Using Classical Shadows}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25830}, DOI={10.1609/aaai.v37i6.25830}, abstractNote={Variational quantum algorithms (VQAs) are the quantum analog of classical neural networks (NNs). A VQA consists of a parameterized quantum circuit (PQC) which is composed of multiple layers of ansatzes (simpler PQCs, which are an analogy of NN layers) that differ only in selections of parameters. Previous work has identified the alternating layered ansatz as potentially a new standard ansatz in near-term quantum computing. Indeed, shallow alternating layered VQAs are easy to implement and have been shown to be both trainable and expressive. In this work, we introduce a training algorithm with an exponential reduction in training cost of such VQAs. Moreover, our algorithm uses classical shadows of quantum input data, and can hence be run on a classical computer with rigorous performance guarantees. We demonstrate 2-3 orders of magnitude improvement in the training cost using our algorithm for the example problems of finding state preparation circuits and the quantum autoencoder.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Basheer, Afrad and Feng, Yuan and Ferrie, Christopher and Li, Sanjiang}, year={2023}, month={Jun.}, pages={6770-6778} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25830/25602", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25830", + "pdf_size": 1315478, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10921644286383499568&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "student.uts.edu.au;uts.edu.au;uts.edu.au;uts.edu.au", + "email": "student.uts.edu.au;uts.edu.au;uts.edu.au;uts.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Technology Sydney", + "aff_unique_dep": "Centre for Quantum Software and Information", + "aff_unique_url": "https://www.uts.edu.au", + "aff_unique_abbr": "UTS", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Sydney", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26837", + "title": "AmnioML: Amniotic Fluid Segmentation and Volume Prediction with Uncertainty Quantification", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Accurately predicting the volume of amniotic fluid is fundamental to assessing pregnancy risks, though the task usually requires many hours of laborious work by medical experts. In this paper, we present AmnioML, a machine learning solution that leverages deep learning and conformal prediction to output fast and accurate volume estimates and segmentation masks from fetal MRIs with Dice coefficient over 0.9. Also, we make available a novel, curated dataset for fetal MRIs with 853 exams and benchmark the performance of many recent deep learning architectures. In addition, we introduce a conformal prediction tool that yields narrow predictive intervals with theoretically guaranteed coverage, thus aiding doctors in detecting pregnancy risks and saving lives. A successful case study of AmnioML deployed in a medical setting is also reported. Real-world clinical benefits include up to 20x segmentation time reduction, with most segmentations deemed by doctors as not needing any further manual refinement. Furthermore, AmnioML's volume predictions were found to be highly accurate in practice, with mean absolute error below 56mL and tight predictive intervals, showcasing its impact in reducing pregnancy complications.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Daniel Csillag; Lucas Monteiro Paes; Thiago Ramos; Jo\u00e3o Vitor Romano; Rodrigo Schuller; Roberto B. Seixas; Roberto I. Oliveira; Paulo Orenstein", + "authorids": "", + "aff": "IMPA, Rio de Janeiro, Brazil; Harvard University, Cambridge, USA; IMPA, Rio de Janeiro, Brazil; IMPA, Rio de Janeiro, Brazil; IMPA, Rio de Janeiro, Brazil; IMPA, Rio de Janeiro, Brazil; IMPA, Rio de Janeiro, Brazil; IMPA, Rio de Janeiro, Brazil", + "bibtex": "@article{Csillag_Monteiro Paes_Ramos_Romano_Schuller_Seixas_Oliveira_Orenstein_2024, title={AmnioML: Amniotic Fluid Segmentation and Volume Prediction with Uncertainty Quantification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26837}, DOI={10.1609/aaai.v37i13.26837}, abstractNote={Accurately predicting the volume of amniotic fluid is fundamental to assessing pregnancy risks, though the task usually requires many hours of laborious work by medical experts. In this paper, we present AmnioML, a machine learning solution that leverages deep learning and conformal prediction to output fast and accurate volume estimates and segmentation masks from fetal MRIs with Dice coefficient over 0.9. Also, we make available a novel, curated dataset for fetal MRIs with 853 exams and benchmark the performance of many recent deep learning architectures. In addition, we introduce a conformal prediction tool that yields narrow predictive intervals with theoretically guaranteed coverage, thus aiding doctors in detecting pregnancy risks and saving lives. A successful case study of AmnioML deployed in a medical setting is also reported. Real-world clinical benefits include up to 20x segmentation time reduction, with most segmentations deemed by doctors as not needing any further manual refinement. Furthermore, AmnioML\u2019s volume predictions were found to be highly accurate in practice, with mean absolute error below 56mL and tight predictive intervals, showcasing its impact in reducing pregnancy complications.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Csillag, Daniel and Monteiro Paes, Lucas and Ramos, Thiago and Romano, Jo\u00e3o Vitor and Schuller, Rodrigo and Seixas, Roberto B. and Oliveira, Roberto I. and Orenstein, Paulo}, year={2024}, month={Jul.}, pages={15494-15502} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26837/26609", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26837", + "pdf_size": 6823858, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=295784273426062803&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "impa.br;g.harvard.edu;impa.br;impa.br;impa.br;impa.br;impa.br;impa.br", + "email": "impa.br;g.harvard.edu;impa.br;impa.br;impa.br;impa.br;impa.br;impa.br", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;0;0;0", + "aff_unique_norm": "Instituto Nacional de Matem\u00e1tica Pura e Aplicada;Harvard University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.impa.br;https://www.harvard.edu", + "aff_unique_abbr": "IMPA;Harvard", + "aff_campus_unique_index": "0;1;0;0;0;0;0;0", + "aff_campus_unique": "Rio de Janeiro;Cambridge", + "aff_country_unique_index": "0;1;0;0;0;0;0;0", + "aff_country_unique": "Brazil;United States" + }, + { + "id": "article-25104", + "title": "Amodal Instance Segmentation via Prior-Guided Expansion", + "track": "main", + "status": "Technical", + "abstract": "Amodal instance segmentation aims to infer the amodal mask, including both the visible part and occluded part of each object instance. Predicting the occluded parts is challenging. Existing methods often produce incomplete amodal boxes and amodal masks, probably due to lacking visual evidences to expand the boxes and masks. To this end, we propose a prior-guided expansion framework, which builds on a two-stage segmentation model (i.e., Mask R-CNN) and performs box-level (resp., pixel-level) expansion for amodal box (resp., mask) prediction, by retrieving regression (resp., flow) transformations from a memory bank of expansion prior. We conduct extensive experiments on KINS, D2SA, and COCOA cls datasets, which show the effectiveness of our method.", + "primary_area": "computer vision i", + "author": "Junjie Chen; Li Niu; Jianfu Zhang; Jianlou Si; Chen Qian; Liqing Zhang", + "authorids": "", + "aff": "The MoE Key Lab of AI, CSE department, Shanghai Jiao Tong University; The MoE Key Lab of AI, CSE department, Shanghai Jiao Tong University; The MoE Key Lab of AI, CSE department, Shanghai Jiao Tong University; SenseTime Research, SenseTime; SenseTime Research, SenseTime; The MoE Key Lab of AI, CSE department, Shanghai Jiao Tong University", + "bibtex": "@article{Chen_Niu_Zhang_Si_Qian_Zhang_2023, title={Amodal Instance Segmentation via Prior-Guided Expansion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25104}, DOI={10.1609/aaai.v37i1.25104}, abstractNote={Amodal instance segmentation aims to infer the amodal mask, including both the visible part and occluded part of each object instance. Predicting the occluded parts is challenging. Existing methods often produce incomplete amodal boxes and amodal masks, probably due to lacking visual evidences to expand the boxes and masks. To this end, we propose a prior-guided expansion framework, which builds on a two-stage segmentation model (i.e., Mask R-CNN) and performs box-level (resp., pixel-level) expansion for amodal box (resp., mask) prediction, by retrieving regression (resp., flow) transformations from a memory bank of expansion prior. We conduct extensive experiments on KINS, D2SA, and COCOA cls datasets, which show the effectiveness of our method.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Junjie and Niu, Li and Zhang, Jianfu and Si, Jianlou and Qian, Chen and Zhang, Liqing}, year={2023}, month={Jun.}, pages={313-321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25104/24876", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25104", + "pdf_size": 5223808, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16767500090120823281&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sensetime.com;sensetime.com;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sensetime.com;sensetime.com;cs.sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0", + "aff_unique_norm": "Shanghai Jiao Tong University;SenseTime", + "aff_unique_dep": "CSE department;SenseTime Research", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.sensetime.com", + "aff_unique_abbr": "SJTU;SenseTime", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25940", + "title": "An Adaptive Layer to Leverage Both Domain and Task Specific Information from Scarce Data", + "track": "main", + "status": "Technical", + "abstract": "Many companies make use of customer service chats to help the customer and try to solve their problem. However, customer service data is confidential and as such, cannot easily be shared in the research community. This also implies that these\ndata are rarely labeled, making it difficult to take advantage of it with machine learning methods. In this paper we present the first work on a customer\u2019s problem status prediction and identification of problematic conversations. Given very small\nsubsets of labeled textual conversations and unlabeled ones, we propose a semi-supervised framework dedicated to customer service data leveraging speaker role information to adapt the model to the domain and the task using a two-step process. Our framework, Task-Adaptive Fine-tuning, goes from predicting customer satisfaction to identifying the status of the customer\u2019s problem, with the latter being the main objective of the multi-task setting. It outperforms recent inductive semi-supervised approaches on this novel task while only considering a relatively low number of parameters to train on during the final target task. We believe it can not only serve models dedicated to customer service but also to any other application making use of confidential conversational data where labeled sets are rare. Source code is available at https://github.com/gguibon/taft", + "primary_area": "machine learning i", + "author": "Ga\u00ebl Guibon; Matthieu Labeau; Luce Lefeuvre; Chlo\u00e9 Clavel", + "authorids": "", + "aff": "LTCI, T\u00e9l\u00e9com-Paris, Institut Polytechnique de Paris+Direction Technologies, Innovation & Projets Groupe, SNCF; LTCI, T\u00e9l\u00e9com-Paris, Institut Polytechnique de Paris; Direction Technologies, Innovation & Projets Groupe, SNCF; LTCI, T\u00e9l\u00e9com-Paris, Institut Polytechnique de Paris", + "bibtex": "@article{Guibon_Labeau_Lefeuvre_Clavel_2023, title={An Adaptive Layer to Leverage Both Domain and Task Specific Information from Scarce Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25940}, DOI={10.1609/aaai.v37i6.25940}, abstractNote={Many companies make use of customer service chats to help the customer and try to solve their problem. However, customer service data is confidential and as such, cannot easily be shared in the research community. This also implies that these\ndata are rarely labeled, making it difficult to take advantage of it with machine learning methods. In this paper we present the first work on a customer\u2019s problem status prediction and identification of problematic conversations. Given very small\nsubsets of labeled textual conversations and unlabeled ones, we propose a semi-supervised framework dedicated to customer service data leveraging speaker role information to adapt the model to the domain and the task using a two-step process. Our framework, Task-Adaptive Fine-tuning, goes from predicting customer satisfaction to identifying the status of the customer\u2019s problem, with the latter being the main objective of the multi-task setting. It outperforms recent inductive semi-supervised approaches on this novel task while only considering a relatively low number of parameters to train on during the final target task. We believe it can not only serve models dedicated to customer service but also to any other application making use of confidential conversational data where labeled sets are rare. Source code is available at https://github.com/gguibon/taft}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guibon, Ga\u00ebl and Labeau, Matthieu and Lefeuvre, Luce and Clavel, Chlo\u00e9}, year={2023}, month={Jun.}, pages={7757-7765} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25940/25712", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25940", + "pdf_size": 265579, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3506039558151943036&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "telecom-paris.fr;telecom-paris.fr;sncf.fr;telecom-paris.fr", + "email": "telecom-paris.fr;telecom-paris.fr;sncf.fr;telecom-paris.fr", + "github": "https://github.com/gguibon/taft", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;0", + "aff_unique_norm": "T\u00e9l\u00e9com-Paris;SNCF", + "aff_unique_dep": "LTCI;Direction Technologies, Innovation & Projets", + "aff_unique_url": "https://www.telecom-paris.fr;https://www.sncf.com", + "aff_unique_abbr": "T\u00e9l\u00e9com-Paris;SNCF", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26880", + "title": "An Analysis of Engineering Students\u2019 Responses to an AI Ethics Scenario", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "In light of significant issues in the technology industry, such as algorithms that worsen racial biases, the spread of online misinformation, and the expansion of mass surveillance, it is increasingly important to teach the ethics and sociotechnical implications of developing and using artificial intelligence (AI). Using 53 survey responses from engineering undergraduates, this paper measures students' abilities to identify, mitigate, and reflect on a hypothetical AI ethics scenario. We engage with prior research on pedagogical approaches to and considerations for teaching AI ethics and highlight some of the obstacles that engineering undergraduate students experience in learning and applying AI ethics concepts.", + "primary_area": "", + "author": "Alexi Orchard; David Radke", + "authorids": "", + "aff": "Department of English Language and Literature, University of Waterloo, Waterloo, Canada; David R. Cheriton School of Computer Science, University of Waterloo, Waterloo, Canada", + "bibtex": "@article{Orchard_Radke_2024, title={An Analysis of Engineering Students\u2019 Responses to an AI Ethics Scenario}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26880}, DOI={10.1609/aaai.v37i13.26880}, abstractNote={In light of significant issues in the technology industry, such as algorithms that worsen racial biases, the spread of online misinformation, and the expansion of mass surveillance, it is increasingly important to teach the ethics and sociotechnical implications of developing and using artificial intelligence (AI). Using 53 survey responses from engineering undergraduates, this paper measures students\u2019 abilities to identify, mitigate, and reflect on a hypothetical AI ethics scenario. We engage with prior research on pedagogical approaches to and considerations for teaching AI ethics and highlight some of the obstacles that engineering undergraduate students experience in learning and applying AI ethics concepts.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Orchard, Alexi and Radke, David}, year={2024}, month={Jul.}, pages={15834-15842} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26880/26652", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26880", + "pdf_size": 384637, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13662225606744701043&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Waterloo", + "aff_unique_dep": "Department of English Language and Literature", + "aff_unique_url": "https://uwaterloo.ca", + "aff_unique_abbr": "UW", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Waterloo", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26974", + "title": "An Analysis of the Deliberation and Task Performance of an Active Logic Based Agent (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Active logic is a time-situated reasoner that can track the history of inferences, detect contradictions, and make parallel inferences in time. In this paper, we explore the behavior of an active-logic based agent on different sets of action selection axioms for a time-constrained target search task. We compare the performance of a baseline set of axioms that does not avoid redundant actions with five other axiom sets that avoid repeated actions but vary in their knowledge content. The results of these experiments show the importance of balancing boldness and caution for target search.", + "primary_area": "", + "author": "Anthony Herron; Darsana P. Josyula", + "authorids": "", + "aff": "Bowie State University; Bowie State University", + "bibtex": "@article{Herron_Josyula_2024, title={An Analysis of the Deliberation and Task Performance of an Active Logic Based Agent (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26974}, DOI={10.1609/aaai.v37i13.26974}, abstractNote={Active logic is a time-situated reasoner that can track the history of inferences, detect contradictions, and make parallel inferences in time. In this paper, we explore the behavior of an active-logic based agent on different sets of action selection axioms for a time-constrained target search task. We compare the performance of a baseline set of axioms that does not avoid redundant actions with five other axiom sets that avoid repeated actions but vary in their knowledge content. The results of these experiments show the importance of balancing boldness and caution for target search.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Herron, Anthony and Josyula, Darsana P.}, year={2024}, month={Jul.}, pages={16228-16229} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26974/26746", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26974", + "pdf_size": 148352, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2908313634685501553&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "students.bowiestate.edu;cs.umd.edu", + "email": "students.bowiestate.edu;cs.umd.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Bowie State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.bowiestate.edu", + "aff_unique_abbr": "BSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25985", + "title": "An Efficient Algorithm for Fair Multi-Agent Multi-Armed Bandit with Low Regret", + "track": "main", + "status": "Technical", + "abstract": "Recently a multi-agent variant of the classical multi-armed bandit was proposed to tackle fairness issues in online learning. Inspired by a long line of work in social choice and economics, the goal is to optimize the Nash social welfare instead of the total utility. Unfortunately previous algorithms either are not efficient or achieve sub-optimal regret in terms of the number of rounds. We propose a new efficient algorithm with lower regret than even previous inefficient ones. We also complement our efficient algorithm with an inefficient approach with regret that matches the lower bound for one agent. The experimental findings confirm the effectiveness of our efficient algorithm compared to the previous approaches.", + "primary_area": "machine learning ii", + "author": "Matthew Jones; Huy Nguyen; Thy Nguyen", + "authorids": "", + "aff": "Northeastern University; Northeastern University; Northeastern University", + "bibtex": "@article{Jones_Nguyen_Nguyen_2023, title={An Efficient Algorithm for Fair Multi-Agent Multi-Armed Bandit with Low Regret}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25985}, DOI={10.1609/aaai.v37i7.25985}, abstractNote={Recently a multi-agent variant of the classical multi-armed bandit was proposed to tackle fairness issues in online learning. Inspired by a long line of work in social choice and economics, the goal is to optimize the Nash social welfare instead of the total utility. Unfortunately previous algorithms either are not efficient or achieve sub-optimal regret in terms of the number of rounds. We propose a new efficient algorithm with lower regret than even previous inefficient ones. We also complement our efficient algorithm with an inefficient approach with regret that matches the lower bound for one agent. The experimental findings confirm the effectiveness of our efficient algorithm compared to the previous approaches.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jones, Matthew and Nguyen, Huy and Nguyen, Thy}, year={2023}, month={Jun.}, pages={8159-8167} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25985/25757", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25985", + "pdf_size": 329919, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10033072111778874357&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "northeastern.edu;northeastern.edu;northeastern.edu", + "email": "northeastern.edu;northeastern.edu;northeastern.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Northeastern University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.northeastern.edu", + "aff_unique_abbr": "NEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25722", + "title": "An Efficient Deep Reinforcement Learning Algorithm for Solving Imperfect Information Extensive-Form Games", + "track": "main", + "status": "Technical", + "abstract": "One of the most popular methods for learning Nash equilibrium (NE) in large-scale imperfect information extensive-form games (IIEFGs) is the neural variants of counterfactual regret minimization (CFR). CFR is a special case of Follow-The-Regularized-Leader (FTRL). At each iteration, the neural variants of CFR update the agent's strategy via the estimated counterfactual regrets. Then, they use neural networks to approximate the new strategy, which incurs an approximation error. These approximation errors will accumulate since the counterfactual regrets at iteration t are estimated using the agent's past approximated strategies. Such accumulated approximation error causes poor performance. To address this accumulated approximation error, we propose a novel FTRL algorithm called FTRL-ORW, which does not utilize the agent's past strategies to pick the next iteration strategy. More importantly, FTRL-ORW can update its strategy via the trajectories sampled from the game, which is suitable to solve large-scale IIEFGs since sampling multiple actions for each information set is too expensive in such games. However, it remains unclear which algorithm to use to compute the next iteration strategy for FTRL-ORW when only such sampled trajectories are revealed at iteration t. To address this problem and scale FTRL-ORW to large-scale games, we provide a model-free method called Deep FTRL-ORW, which computes the next iteration strategy using model-free Maximum Entropy Deep Reinforcement Learning. Experimental results on two-player zero-sum IIEFGs show that Deep FTRL-ORW significantly outperforms existing model-free neural methods and OS-MCCFR.", + "primary_area": "game theory and economic paradigms", + "author": "Linjian Meng; Zhenxing Ge; Pinzhuo Tian; Bo An; Yang Gao", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China", + "bibtex": "@article{Meng_Ge_Tian_An_Gao_2023, title={An Efficient Deep Reinforcement Learning Algorithm for Solving Imperfect Information Extensive-Form Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25722}, DOI={10.1609/aaai.v37i5.25722}, abstractNote={One of the most popular methods for learning Nash equilibrium (NE) in large-scale imperfect information extensive-form games (IIEFGs) is the neural variants of counterfactual regret minimization (CFR). CFR is a special case of Follow-The-Regularized-Leader (FTRL). At each iteration, the neural variants of CFR update the agent\u2019s strategy via the estimated counterfactual regrets. Then, they use neural networks to approximate the new strategy, which incurs an approximation error. These approximation errors will accumulate since the counterfactual regrets at iteration t are estimated using the agent\u2019s past approximated strategies. Such accumulated approximation error causes poor performance. To address this accumulated approximation error, we propose a novel FTRL algorithm called FTRL-ORW, which does not utilize the agent\u2019s past strategies to pick the next iteration strategy. More importantly, FTRL-ORW can update its strategy via the trajectories sampled from the game, which is suitable to solve large-scale IIEFGs since sampling multiple actions for each information set is too expensive in such games. However, it remains unclear which algorithm to use to compute the next iteration strategy for FTRL-ORW when only such sampled trajectories are revealed at iteration t. To address this problem and scale FTRL-ORW to large-scale games, we provide a model-free method called Deep FTRL-ORW, which computes the next iteration strategy using model-free Maximum Entropy Deep Reinforcement Learning. Experimental results on two-player zero-sum IIEFGs show that Deep FTRL-ORW significantly outperforms existing model-free neural methods and OS-MCCFR.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Meng, Linjian and Ge, Zhenxing and Tian, Pinzhuo and An, Bo and Gao, Yang}, year={2023}, month={Jun.}, pages={5823-5831} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25722/25494", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25722", + "pdf_size": 239353, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17723363827900501034&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;shu.edu.cn;ntu.edu.sg;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;shu.edu.cn;ntu.edu.sg;nju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Nanjing University;Shanghai University;Nanyang Technological University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;School of Computer Engineering and Science;School of Computer Science and Engineering", + "aff_unique_url": "http://www.nju.edu.cn;https://www.shu.edu.cn;https://www.ntu.edu.sg", + "aff_unique_abbr": "Nanjing U;SHU;NTU", + "aff_campus_unique_index": "0;0;1;2;0", + "aff_campus_unique": "Nanjing;Shanghai;Singapore", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26949", + "title": "An Emotion-Guided Approach to Domain Adaptive Fake News Detection Using Adversarial Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent works on fake news detection have shown the efficacy of using emotions as a feature for improved performance. However, the cross-domain impact of emotion-guided features for fake news detection still remains an open problem. In this work, we propose an emotion-guided, domain-adaptive, multi-task approach for cross-domain fake news detection, proving the efficacy of emotion-guided models in cross-domain settings for various datasets.", + "primary_area": "", + "author": "Arkajyoti Chakraborty; Inder Khatri; Arjun Choudhry; Pankaj Gupta; Dinesh Kumar Vishwakarma; Mukesh Prasad", + "authorids": "", + "aff": "Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; School of Computer Science, FEIT, University of Technology Sydney, Sydney, Australia", + "bibtex": "@article{Chakraborty_Khatri_Choudhry_Gupta_Vishwakarma_Prasad_2024, title={An Emotion-Guided Approach to Domain Adaptive Fake News Detection Using Adversarial Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26949}, DOI={10.1609/aaai.v37i13.26949}, abstractNote={Recent works on fake news detection have shown the efficacy of using emotions as a feature for improved performance. However, the cross-domain impact of emotion-guided features for fake news detection still remains an open problem. In this work, we propose an emotion-guided, domain-adaptive, multi-task approach for cross-domain fake news detection, proving the efficacy of emotion-guided models in cross-domain settings for various datasets.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chakraborty, Arkajyoti and Khatri, Inder and Choudhry, Arjun and Gupta, Pankaj and Vishwakarma, Dinesh Kumar and Prasad, Mukesh}, year={2024}, month={Jul.}, pages={16178-16179} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26949/26721", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26949", + "pdf_size": 1149938, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11164698346849395682&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "dtu.ac.in;gmail.com;gmail.com; ;dtu.ac.in;uts.edu.au", + "email": "dtu.ac.in;gmail.com;gmail.com; ;dtu.ac.in;uts.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Delhi Technological University;University of Technology Sydney", + "aff_unique_dep": "Biometric Research Laboratory;School of Computer Science", + "aff_unique_url": "https://www.dtu.ac.in;https://www.uts.edu.au", + "aff_unique_abbr": "DTU;UTS", + "aff_campus_unique_index": "0;0;0;0;0;1", + "aff_campus_unique": "New Delhi;Sydney", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "India;Australia" + }, + { + "id": "article-26647", + "title": "An Ensemble Distillation Framework for Sentence Embeddings with Multilingual Round-Trip Translation", + "track": "main", + "status": "Technical", + "abstract": "In this work, we propose a novel unsupervised contrastive learning framework to improve state-of-the-art sentence embeddings. First, we train a set of contrastive submodels which take multilingual round-trip translation(RTT) as data augmentation. The RTT naturally changes the length of the same sentence and replaces Synonyms simultaneously. Then we incorporate them into a single model through knowledge distillation. Specifically, it takes an input sentence and predicts the ensemble output of all submodels via a contrastive objective. Thus we preserve nearly the same semantic expressiveness as the ensemble model without increasing the test cost. We evaluate our framework on standard semantic textual similarity (STS) tasks. Experimental results show the advantage of our framework that we achieve an average of 79.27% Spearman's correlation, a 3.02% improvement compared to the previous best results using BERT-base.", + "primary_area": "speech natural language processing", + "author": "Tianyu Zong; Likun Zhang", + "authorids": "", + "aff": "University of Chinese Academy of Sciences; University of Chinese Academy of Sciences", + "bibtex": "@article{Zong_Zhang_2023, title={An Ensemble Distillation Framework for Sentence Embeddings with Multilingual Round-Trip Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26647}, DOI={10.1609/aaai.v37i11.26647}, abstractNote={In this work, we propose a novel unsupervised contrastive learning framework to improve state-of-the-art sentence embeddings. First, we train a set of contrastive submodels which take multilingual round-trip translation(RTT) as data augmentation. The RTT naturally changes the length of the same sentence and replaces Synonyms simultaneously. Then we incorporate them into a single model through knowledge distillation. Specifically, it takes an input sentence and predicts the ensemble output of all submodels via a contrastive objective. Thus we preserve nearly the same semantic expressiveness as the ensemble model without increasing the test cost. We evaluate our framework on standard semantic textual similarity (STS) tasks. Experimental results show the advantage of our framework that we achieve an average of 79.27% Spearman\u2019s correlation, a 3.02% improvement compared to the previous best results using BERT-base.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zong, Tianyu and Zhang, Likun}, year={2023}, month={Jun.}, pages={14074-14082} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26647/26419", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26647", + "pdf_size": 856976, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10434731473203563001&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.ucas.ac.cn;mails.ucas.ac.cn", + "email": "mails.ucas.ac.cn;mails.ucas.ac.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Chinese Academy of Sciences", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ucas.ac.cn", + "aff_unique_abbr": "UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25849", + "title": "An Equivalence Analysis of Binary Quantification Methods", + "track": "main", + "status": "Technical", + "abstract": "Quantification (or prevalence estimation) algorithms aim at predicting the class distribution of unseen sets (or bags) of examples. These methods are useful for two main tasks: 1) quantification applications, for instance when we need to track the proportions of several groups of interest over time, and 2) domain adaptation problems, in which we usually need to adapt a previously trained classifier to a different --albeit related-- target distribution according to the estimated prevalences. This paper analyzes several binary quantification algorithms showing that not only do they share a common framework but are, in fact, equivalent. Inspired by this study, we propose a new method that extends one of the approaches analyzed. After an empirical evaluation of all these methods using synthetic and benchmark datasets, the paper concludes recommending three of them due to their precision, efficiency, and diversity.", + "primary_area": "machine learning i", + "author": "Alberto Casta\u00f1o; Jaime Alonso; Pablo Gonz\u00e1lez; Juan Jos\u00e9 del Coz", + "authorids": "", + "aff": "Artificial Intelligence Center - University of Oviedo. Campus de Viesques, 33204, Gij\u00f3n, Spain; Artificial Intelligence Center - University of Oviedo. Campus de Viesques, 33204, Gij\u00f3n, Spain; Artificial Intelligence Center - University of Oviedo. Campus de Viesques, 33204, Gij\u00f3n, Spain; Artificial Intelligence Center - University of Oviedo. Campus de Viesques, 33204, Gij\u00f3n, Spain", + "bibtex": "@article{Casta\u00f1o_Alonso_Gonz\u00e1lez_del Coz_2023, title={An Equivalence Analysis of Binary Quantification Methods}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25849}, DOI={10.1609/aaai.v37i6.25849}, abstractNote={Quantification (or prevalence estimation) algorithms aim at predicting the class distribution of unseen sets (or bags) of examples. These methods are useful for two main tasks: 1) quantification applications, for instance when we need to track the proportions of several groups of interest over time, and 2) domain adaptation problems, in which we usually need to adapt a previously trained classifier to a different --albeit related-- target distribution according to the estimated prevalences. This paper analyzes several binary quantification algorithms showing that not only do they share a common framework but are, in fact, equivalent. Inspired by this study, we propose a new method that extends one of the approaches analyzed. After an empirical evaluation of all these methods using synthetic and benchmark datasets, the paper concludes recommending three of them due to their precision, efficiency, and diversity.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Casta\u00f1o, Alberto and Alonso, Jaime and Gonz\u00e1lez, Pablo and del Coz, Juan Jos\u00e9}, year={2023}, month={Jun.}, pages={6944-6952} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25849/25621", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25849", + "pdf_size": 998909, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3571721355616459970&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "uniovi.es;uniovi.es;uniovi.es;uniovi.es", + "email": "uniovi.es;uniovi.es;uniovi.es;uniovi.es", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Oviedo", + "aff_unique_dep": "Artificial Intelligence Center", + "aff_unique_url": "https://www.uniovi.es", + "aff_unique_abbr": "UniOvi", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Gij\u00f3n", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "article-26846", + "title": "An Explainable Forecasting System for Humanitarian Needs Assessment", + "track": "iaai technical track", + "status": "Technical", + "abstract": "We present a machine learning system for forecasting forced displacement populations deployed at the Danish Refugee Council (DRC). The system, named Foresight, supports long term forecasts aimed at humanitarian response planning. It is explainable, providing evidence and context supporting the forecast. Additionally, it supports scenarios, whereby analysts are able to generate forecasts under alternative conditions. The system has been in deployment since early 2020 and powers several downstream business functions within DRC. It is central to our annual Global Displacement Report which informs our response planning. We describe the system, key outcomes, lessons learnt, along with technical limitations and challenges in deploying machine learning systems in the humanitarian sector.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Rahul Nair; Bo Madsen; Alexander Kj\u00e6rum", + "authorids": "", + "aff": "IBM Research Europe; Danish Refugee Council; Danish Refugee Council", + "bibtex": "@article{Nair_Madsen_Kj\u00e6rum_2024, title={An Explainable Forecasting System for Humanitarian Needs Assessment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26846}, DOI={10.1609/aaai.v37i13.26846}, abstractNote={We present a machine learning system for forecasting forced displacement populations deployed at the Danish Refugee Council (DRC). The system, named Foresight, supports long term forecasts aimed at humanitarian response planning. It is explainable, providing evidence and context supporting the forecast. Additionally, it supports scenarios, whereby analysts are able to generate forecasts under alternative conditions. The system has been in deployment since early 2020 and powers several downstream business functions within DRC. It is central to our annual Global Displacement Report which informs our response planning. We describe the system, key outcomes, lessons learnt, along with technical limitations and challenges in deploying machine learning systems in the humanitarian sector.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nair, Rahul and Madsen, Bo and Kj\u00e6rum, Alexander}, year={2024}, month={Jul.}, pages={15569-15575} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26846/26618", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26846", + "pdf_size": 1125410, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4717102648997286088&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "ie.ibm.com;drc.ngo;drc.ngo", + "email": "ie.ibm.com;drc.ngo;drc.ngo", + "github": "", + "project": "https://mixedmigration.org/4mi/", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "IBM Research;Danish Refugee Council", + "aff_unique_dep": "Research;", + "aff_unique_url": "https://www.ibm.com/research/europe;https://www.drc.dk", + "aff_unique_abbr": "IBM Research;DRC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Europe;Denmark" + }, + { + "id": "article-26045", + "title": "An Extreme-Adaptive Time Series Prediction Model Based on Probability-Enhanced LSTM Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Forecasting time series with extreme events has been a challenging and prevalent research topic, especially when the time series data are affected by complicated uncertain factors, such as is the case in hydrologic prediction. Diverse traditional and deep learning models have been applied to discover the nonlinear relationships and recognize the complex patterns in these types of data. However, existing methods usually ignore the negative influence of imbalanced data, or severe events, on model training. Moreover, methods are usually evaluated on a small number of generally well-behaved time series, which does not show their ability to generalize. To tackle these issues, we propose a novel probability-enhanced neural network model, called NEC+, which concurrently learns extreme and normal prediction functions and a way to choose among them via selective back propagation. We evaluate the proposed model on the difficult 3-day ahead hourly water level prediction task applied to 9 reservoirs in California. Experimental results demonstrate that the proposed model significantly outperforms state-of-the-art baselines and exhibits superior generalization ability on data with diverse distributions.", + "primary_area": "machine learning ii", + "author": "Yanhong Li; Jack Xu; David C. Anastasiu", + "authorids": "", + "aff": "Santa Clara University, Santa Clara, CA, USA; Santa Clara Valley Water District, San Jose, CA, USA; Santa Clara University, Santa Clara, CA, USA", + "bibtex": "@article{Li_Xu_Anastasiu_2023, title={An Extreme-Adaptive Time Series Prediction Model Based on Probability-Enhanced LSTM Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26045}, DOI={10.1609/aaai.v37i7.26045}, abstractNote={Forecasting time series with extreme events has been a challenging and prevalent research topic, especially when the time series data are affected by complicated uncertain factors, such as is the case in hydrologic prediction. Diverse traditional and deep learning models have been applied to discover the nonlinear relationships and recognize the complex patterns in these types of data. However, existing methods usually ignore the negative influence of imbalanced data, or severe events, on model training. Moreover, methods are usually evaluated on a small number of generally well-behaved time series, which does not show their ability to generalize. To tackle these issues, we propose a novel probability-enhanced neural network model, called NEC+, which concurrently learns extreme and normal prediction functions and a way to choose among them via selective back propagation. We evaluate the proposed model on the difficult 3-day ahead hourly water level prediction task applied to 9 reservoirs in California. Experimental results demonstrate that the proposed model significantly outperforms state-of-the-art baselines and exhibits superior generalization ability on data with diverse distributions.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yanhong and Xu, Jack and Anastasiu, David C.}, year={2023}, month={Jun.}, pages={8684-8691} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26045/25817", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26045", + "pdf_size": 563294, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18055277803928646397&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "scu.edu;valleywater.org;scu.edu", + "email": "scu.edu;valleywater.org;scu.edu", + "github": "https://github.com/davidanastasiu/NECPlus", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Santa Clara University;Santa Clara Valley Water District", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.scu.edu;", + "aff_unique_abbr": "SCU;", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Santa Clara;San Jose", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25835", + "title": "An Improved Algorithm for Online Min-Sum Set Cover", + "track": "main", + "status": "Technical", + "abstract": "We study a fundamental model of online preference aggregation, where an algorithm maintains an ordered list of n elements. An input is a stream of preferred sets R_1, R_2, ..., R_t, ... Upon seeing R_t and without knowledge of any future sets, an algorithm has to rerank elements (change the list ordering), so that at least one element of R_t is found near the list front. The incurred cost is a sum of the list update costs (the number of swaps of neighboring list elements) and access cost (the position of the first element of R_t on the list). This scenario occurs naturally in applications such as ordering items in an online shop using aggregated preferences of shop customers. The theoretical underpinning of this problem is known as Min-Sum Set Cover.\n\nUnlike previous work that mostly studied the performance of an online algorithm ALG in comparison to the static optimal solution (a single optimal list ordering), in this paper, we study an arguably harder variant where the benchmark is the provably stronger optimal dynamic solution OPT (that may also modify the list ordering). In terms of an online shop, this means that the aggregated preferences of its user base evolve with time. We construct a computationally efficient randomized algorithm whose competitive ratio (ALG-to-OPT cost ratio) is O(r^2) and prove the existence of a deterministic O(r^4)-competitive algorithm. Here, r is the maximum cardinality of sets R_t. This is the first algorithm whose ratio does not depend on n: the previously best algorithm for this problem was O(r^(3/2) * n^(1/2))-competitive and \u03a9(r) is a lower bound on the performance of any deterministic online algorithm.", + "primary_area": "machine learning i", + "author": "Marcin Bienkowski; Marcin Mucha", + "authorids": "", + "aff": "University of Wroclaw; University of Warsaw", + "bibtex": "@article{Bienkowski_Mucha_2023, title={An Improved Algorithm for Online Min-Sum Set Cover}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25835}, DOI={10.1609/aaai.v37i6.25835}, abstractNote={We study a fundamental model of online preference aggregation, where an algorithm maintains an ordered list of n elements. An input is a stream of preferred sets R_1, R_2, ..., R_t, ... Upon seeing R_t and without knowledge of any future sets, an algorithm has to rerank elements (change the list ordering), so that at least one element of R_t is found near the list front. The incurred cost is a sum of the list update costs (the number of swaps of neighboring list elements) and access cost (the position of the first element of R_t on the list). This scenario occurs naturally in applications such as ordering items in an online shop using aggregated preferences of shop customers. The theoretical underpinning of this problem is known as Min-Sum Set Cover. Unlike previous work that mostly studied the performance of an online algorithm ALG in comparison to the static optimal solution (a single optimal list ordering), in this paper, we study an arguably harder variant where the benchmark is the provably stronger optimal dynamic solution OPT (that may also modify the list ordering). In terms of an online shop, this means that the aggregated preferences of its user base evolve with time. We construct a computationally efficient randomized algorithm whose competitive ratio (ALG-to-OPT cost ratio) is O(r^2) and prove the existence of a deterministic O(r^4)-competitive algorithm. Here, r is the maximum cardinality of sets R_t. This is the first algorithm whose ratio does not depend on n: the previously best algorithm for this problem was O(r^(3/2) * n^(1/2))-competitive and \u03a9(r) is a lower bound on the performance of any deterministic online algorithm.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bienkowski, Marcin and Mucha, Marcin}, year={2023}, month={Jun.}, pages={6815-6822} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25835/25607", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25835", + "pdf_size": 157454, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14870002743387474575&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.uni.wroc.pl;mimuw.edu.pl", + "email": "cs.uni.wroc.pl;mimuw.edu.pl", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Wroc\u0142aw;University of Warsaw", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uni.wroc.pl;https://www.uw.edu.pl", + "aff_unique_abbr": "UW;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Poland" + }, + { + "id": "article-25512", + "title": "An Improved Approximation Algorithm for Wage Determination and Online Task Allocation in Crowd-Sourcing", + "track": "main", + "status": "Technical", + "abstract": "Crowd-sourcing has attracted much attention due to its growing importance to society, and numerous studies have been conducted on task allocation and wage determination. Recent works have focused on optimizing task allocation and workers' wages, simultaneously. However, existing methods do not provide good solutions for real-world crowd-sourcing platforms due to the low approximation ratio or myopic problem settings. We tackle an optimization problem for wage determination and online task allocation in crowd-sourcing and propose a fast 1-1/(k+3)^(1/2)-approximation algorithm, where k is the minimum of tasks' budgets (numbers of possible assignments). This approximation ratio is greater than or equal to the existing method. The proposed method reduces the tackled problem to a non-convex multi-period continuous optimization problem by approximating the objective function. Then, the method transforms the reduced problem into a minimum convex cost flow problem, which is a well-known combinatorial optimization problem, and solves it by the capacity scaling algorithm. Synthetic experiments and simulation experiments using real crowd-sourcing data show that the proposed method solves the problem faster and outputs higher objective values than existing methods.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yuya Hikima; Yasunori Akagi; Hideaki Kim; Taichi Asami", + "authorids": "", + "aff": "NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation", + "bibtex": "@article{Hikima_Akagi_Kim_Asami_2023, title={An Improved Approximation Algorithm for Wage Determination and Online Task Allocation in Crowd-Sourcing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25512}, DOI={10.1609/aaai.v37i4.25512}, abstractNote={Crowd-sourcing has attracted much attention due to its growing importance to society, and numerous studies have been conducted on task allocation and wage determination. Recent works have focused on optimizing task allocation and workers\u2019 wages, simultaneously. However, existing methods do not provide good solutions for real-world crowd-sourcing platforms due to the low approximation ratio or myopic problem settings. We tackle an optimization problem for wage determination and online task allocation in crowd-sourcing and propose a fast 1-1/(k+3)^(1/2)-approximation algorithm, where k is the minimum of tasks\u2019 budgets (numbers of possible assignments). This approximation ratio is greater than or equal to the existing method. The proposed method reduces the tackled problem to a non-convex multi-period continuous optimization problem by approximating the objective function. Then, the method transforms the reduced problem into a minimum convex cost flow problem, which is a well-known combinatorial optimization problem, and solves it by the capacity scaling algorithm. Synthetic experiments and simulation experiments using real crowd-sourcing data show that the proposed method solves the problem faster and outputs higher objective values than existing methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hikima, Yuya and Akagi, Yasunori and Kim, Hideaki and Asami, Taichi}, year={2023}, month={Jun.}, pages={3977-3986} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25512/25284", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25512", + "pdf_size": 579675, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17202855756272477966&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; fyasunori.akagi.cu; hideaki.kin.cn;hco.ntt.co.jp", + "email": "gmail.com; fyasunori.akagi.cu; hideaki.kin.cn;hco.ntt.co.jp", + "github": "https://github.com/Yuya-Hikima/AAAI2023-An-Improved-Approximation-Algorithm-for-Wage-Determination-and-Online-Task-Allocation", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "NTT Corporation", + "aff_unique_dep": "Human Informatics Laboratories", + "aff_unique_url": "https://www.ntt.co.jp", + "aff_unique_abbr": "NTT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26900", + "title": "An Introduction to Rule-Based Feature and Object Perception for Middle School Students", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "The Feature Detection tool is a web-based activity that allows students to detect features in images and build their own rule-based classification algorithms. In this paper, we introduce the tool and share how it is incorporated into two, 45-minute lessons. The objective of the first lesson is to introduce students to the concept of feature detection, or how a computer can break down visual input into lower-level features. The second lesson aims to show students how these lower-level features can be incorporated into rule-based models to classify higher-order objects. We discuss how this tool can be used as a \"first step\" to the more complex concept ideas of data representation and neural networks.", + "primary_area": "", + "author": "Daniella DiPaola; Parker Malachowsky; Nancye Blair Black; Sharifa Alghowinem; Xiaoxue Du; Cynthia Breazeal", + "authorids": "", + "aff": "MIT Media Lab, Massachusetts Institute of Technology; MIT Media Lab, Massachusetts Institute of Technology; Teacher\u2019s College, Columbia University; MIT Media Lab, Massachusetts Institute of Technology; MIT Media Lab, Massachusetts Institute of Technology; MIT Media Lab, Massachusetts Institute of Technology", + "bibtex": "@article{DiPaola_Malachowsky_Blair Black_Alghowinem_Du_Breazeal_2024, title={An Introduction to Rule-Based Feature and Object Perception for Middle School Students}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26900}, DOI={10.1609/aaai.v37i13.26900}, abstractNote={The Feature Detection tool is a web-based activity that allows students to detect features in images and build their own rule-based classification algorithms. In this paper, we introduce the tool and share how it is incorporated into two, 45-minute lessons. The objective of the first lesson is to introduce students to the concept of feature detection, or how a computer can break down visual input into lower-level features. The second lesson aims to show students how these lower-level features can be incorporated into rule-based models to classify higher-order objects. We discuss how this tool can be used as a "first step" to the more complex concept ideas of data representation and neural networks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={DiPaola, Daniella and Malachowsky, Parker and Blair Black, Nancye and Alghowinem, Sharifa and Du, Xiaoxue and Breazeal, Cynthia}, year={2024}, month={Jul.}, pages={16004-16010} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26900/26672", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26900", + "pdf_size": 2550428, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5189127262442738703&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "media.mit.edu;media.mit.edu;tc.columbia.edu;media.mit.edu;media.mit.edu;media.mit.edu", + "email": "media.mit.edu;media.mit.edu;tc.columbia.edu;media.mit.edu;media.mit.edu;media.mit.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Columbia University", + "aff_unique_dep": "Media Lab;Teacher's College", + "aff_unique_url": "http://web.mit.edu/;https://www.columbia.edu", + "aff_unique_abbr": "MIT;Columbia", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Cambridge;New York", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27090", + "title": "An Online Presentation Slide Assessment System Using Visual and Semantic Segmentation Features", + "track": "demonstrations", + "status": "Technical", + "abstract": "In this study, we present a new presentation slide assessment system that can extract the structural features from any slide file formats. Our previous work used a neural network to identify novice vs. well-designed presentation slides based on visual and structural features. However, the structural feature extraction was only applicable to PowerPoint files. To solve this problem, we extract the semantic segmentation from the slide images as a new format of structural features. The proposed multi-modal Transformer extracts the features from the original images and semantic segmentation results to assess the slide design. The prediction targets are the top-10 checkpoints pointed out by the professional consultants. Class-imbalanced learning and multi-task learning methods are also applied to improve the accuracy. The proposed model only requiring the slide images achieved an average accuracy of 81.67% that is comparative to the performance of the previous work requiring the PowerPoint files.", + "primary_area": "", + "author": "Shengzhou Yi; Junichiro Matsugami; Hiroshi Yumoto; Toshihiko Yamasaki", + "authorids": "", + "aff": "The University of Tokyo; Rubato Co., Ltd.; P&I Information Engineering Co., Ltd.; The University of Tokyo", + "bibtex": "@article{Yi_Matsugami_Yumoto_Yamasaki_2024, title={An Online Presentation Slide Assessment System Using Visual and Semantic Segmentation Features}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27090}, DOI={10.1609/aaai.v37i13.27090}, abstractNote={In this study, we present a new presentation slide assessment system that can extract the structural features from any slide file formats. Our previous work used a neural network to identify novice vs. well-designed presentation slides based on visual and structural features. However, the structural feature extraction was only applicable to PowerPoint files. To solve this problem, we extract the semantic segmentation from the slide images as a new format of structural features. The proposed multi-modal Transformer extracts the features from the original images and semantic segmentation results to assess the slide design. The prediction targets are the top-10 checkpoints pointed out by the professional consultants. Class-imbalanced learning and multi-task learning methods are also applied to improve the accuracy. The proposed model only requiring the slide images achieved an average accuracy of 81.67% that is comparative to the performance of the previous work requiring the PowerPoint files.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yi, Shengzhou and Matsugami, Junichiro and Yumoto, Hiroshi and Yamasaki, Toshihiko}, year={2024}, month={Jul.}, pages={16494-16496} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27090/26862", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27090", + "pdf_size": 827270, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:NOePZw2yNeQJ:scholar.google.com/&scioq=An+Online+Presentation+Slide+Assessment+System+Using+Visual+and+Semantic+Segmentation+Features&hl=en&as_sdt=0,14", + "gs_version_total": 3, + "aff_domain": "cvm.t.u-tokyo.ac.jp;rubato.co;pandi.co.jp;cvm.t.u-tokyo.ac.jp", + "email": "cvm.t.u-tokyo.ac.jp;rubato.co;pandi.co.jp;cvm.t.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Tokyo;Rubato Company;P&I Information Engineering Co., Ltd.", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;;", + "aff_unique_abbr": "UTokyo;;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;2;0", + "aff_country_unique": "Japan;;China" + }, + { + "id": "article-26111", + "title": "An Operator Theoretic Approach for Analyzing Sequence Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Analyzing the inner mechanisms of deep neural networks is a fundamental task in machine learning. Existing work provides limited analysis or it depends on local theories, such as fixed-point analysis. In contrast, we propose to analyze trained neural networks using an operator theoretic approach which is rooted in Koopman theory, the Koopman Analysis of Neural Networks (KANN). Key to our method is the Koopman operator, which is a linear object that globally represents the dominant behavior of the network dynamics. The linearity of the Koopman operator facilitates analysis via its eigenvectors and eigenvalues. Our method reveals that the latter eigendecomposition holds semantic information related to the neural network inner workings. For instance, the eigenvectors highlight positive and negative n-grams in the sentiments analysis task; similarly, the eigenvectors capture the salient features of healthy heart beat signals in the ECG classification problem.", + "primary_area": "machine learning iii", + "author": "Ilan Naiman; Omri Azencot", + "authorids": "", + "aff": "Department of Computer Science, Ben-Gurion University of the Negev, Beer Sheva, Israel; Department of Computer Science, Ben-Gurion University of the Negev, Beer Sheva, Israel", + "bibtex": "@article{Naiman_Azencot_2023, title={An Operator Theoretic Approach for Analyzing Sequence Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26111}, DOI={10.1609/aaai.v37i8.26111}, abstractNote={Analyzing the inner mechanisms of deep neural networks is a fundamental task in machine learning. Existing work provides limited analysis or it depends on local theories, such as fixed-point analysis. In contrast, we propose to analyze trained neural networks using an operator theoretic approach which is rooted in Koopman theory, the Koopman Analysis of Neural Networks (KANN). Key to our method is the Koopman operator, which is a linear object that globally represents the dominant behavior of the network dynamics. The linearity of the Koopman operator facilitates analysis via its eigenvectors and eigenvalues. Our method reveals that the latter eigendecomposition holds semantic information related to the neural network inner workings. For instance, the eigenvectors highlight positive and negative n-grams in the sentiments analysis task; similarly, the eigenvectors capture the salient features of healthy heart beat signals in the ECG classification problem.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Naiman, Ilan and Azencot, Omri}, year={2023}, month={Jun.}, pages={9268-9276} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26111/25883", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26111", + "pdf_size": 1226580, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7942944106625862617&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "post.bgu.ac.il;cs.bgu.ac.il", + "email": "post.bgu.ac.il;cs.bgu.ac.il", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ben-Gurion University of the Negev", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beer Sheva", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25605", + "title": "Analogical Inference Enhanced Knowledge Graph Embedding", + "track": "main", + "status": "Technical", + "abstract": "Knowledge graph embedding (KGE), which maps entities and relations in a knowledge graph into continuous vector spaces, has achieved great success in predicting missing links in knowledge graphs. However, knowledge graphs often contain incomplete triples that are difficult to inductively infer by KGEs. To address this challenge, we resort to analogical inference and propose a novel and general self-supervised framework AnKGE to enhance KGE models with analogical inference capability. We propose an analogical object retriever that retrieves appropriate analogical objects from entity-level, relation-level, and triple-level. And in AnKGE, we train an analogy function for each level of analogical inference with the original element embedding from a well-trained KGE model as input, which outputs the analogical object embedding. In order to combine inductive inference capability from the original KGE model and analogical inference capability enhanced by AnKGE, we interpolate the analogy score with the base model score and introduce the adaptive weights in the score function for prediction. Through extensive experiments on FB15k-237 and WN18RR datasets, we show that AnKGE achieves competitive results on link prediction task and well performs analogical inference.", + "primary_area": "data mining and knowledge management", + "author": "Zhen Yao; Wen Zhang; Mingyang Chen; Yufeng Huang; Yi Yang; Huajun Chen", + "authorids": "", + "aff": "School of Software Technology, Zhejiang University; School of Software Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University + Donghai Laboratory, Zhoushan 316021, China + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; School of Software Technology, Zhejiang University; Huawei Technologies Co., Ltd; College of Computer Science and Technology, Zhejiang University + Donghai Laboratory, Zhoushan 316021, China + Alibaba-Zhejiang University Joint Institute of Frontier Technologies", + "bibtex": "@article{Yao_Zhang_Chen_Huang_Yang_Chen_2023, title={Analogical Inference Enhanced Knowledge Graph Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25605}, DOI={10.1609/aaai.v37i4.25605}, abstractNote={Knowledge graph embedding (KGE), which maps entities and relations in a knowledge graph into continuous vector spaces, has achieved great success in predicting missing links in knowledge graphs. However, knowledge graphs often contain incomplete triples that are difficult to inductively infer by KGEs. To address this challenge, we resort to analogical inference and propose a novel and general self-supervised framework AnKGE to enhance KGE models with analogical inference capability. We propose an analogical object retriever that retrieves appropriate analogical objects from entity-level, relation-level, and triple-level. And in AnKGE, we train an analogy function for each level of analogical inference with the original element embedding from a well-trained KGE model as input, which outputs the analogical object embedding. In order to combine inductive inference capability from the original KGE model and analogical inference capability enhanced by AnKGE, we interpolate the analogy score with the base model score and introduce the adaptive weights in the score function for prediction. Through extensive experiments on FB15k-237 and WN18RR datasets, we show that AnKGE achieves competitive results on link prediction task and well performs analogical inference.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yao, Zhen and Zhang, Wen and Chen, Mingyang and Huang, Yufeng and Yang, Yi and Chen, Huajun}, year={2023}, month={Jun.}, pages={4801-4808} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25605/25377", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25605", + "pdf_size": 428235, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9470531069698154025&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;huawei.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;huawei.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1+0;0;2;0+1+0", + "aff_unique_norm": "Zhejiang University;Donghai Laboratory;Huawei Technologies", + "aff_unique_dep": "School of Software Technology;;", + "aff_unique_url": "http://www.zju.edu.cn;;https://www.huawei.com", + "aff_unique_abbr": "ZJU;;Huawei", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0+0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26469", + "title": "Analyzing and Improving the Use of the FastMap Embedding in Pathfinding Tasks", + "track": "main", + "status": "Technical", + "abstract": "The FastMap algorithm has been proposed as an inexpensive metric embedding which provides admissible distance estimates between all vertices in an embedding. As an embedding, it also supports additional operations such as taking the median location of two vertices, which is important in some problems. This paper studies several aspects of FastMap embeddings, showing the relationship of FastMap to general additive heuristics. As an admissible heuristic, FastMap is not as strong as previous suggested. However, by combining FastMap with the ideas of differential heuristics, we can significantly improve the performance of FastMap heuristics. We show the impact of these ideas in both single-agent pathfinding and the Multi-Agent Meeting problem, where the performance of algorithms using our improved FastMap embedding is improved by up to a factor of two.", + "primary_area": "search and optimization", + "author": "Reza Mashayekhi; Dor Atzmon; Nathan R. Sturtevant", + "authorids": "", + "aff": "Department of Computing Science, University of Alberta, Canada + Alberta Machine Intelligence Institute (Amii); Ben Gurion University of the Negev, Israel + Royal Holloway, University of London; Department of Computing Science, University of Alberta, Canada + Alberta Machine Intelligence Institute (Amii)", + "bibtex": "@article{Mashayekhi_Atzmon_Sturtevant_2023, title={Analyzing and Improving the Use of the FastMap Embedding in Pathfinding Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26469}, DOI={10.1609/aaai.v37i10.26469}, abstractNote={The FastMap algorithm has been proposed as an inexpensive metric embedding which provides admissible distance estimates between all vertices in an embedding. As an embedding, it also supports additional operations such as taking the median location of two vertices, which is important in some problems. This paper studies several aspects of FastMap embeddings, showing the relationship of FastMap to general additive heuristics. As an admissible heuristic, FastMap is not as strong as previous suggested. However, by combining FastMap with the ideas of differential heuristics, we can significantly improve the performance of FastMap heuristics. We show the impact of these ideas in both single-agent pathfinding and the Multi-Agent Meeting problem, where the performance of algorithms using our improved FastMap embedding is improved by up to a factor of two.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mashayekhi, Reza and Atzmon, Dor and Sturtevant, Nathan R.}, year={2023}, month={Jun.}, pages={12473-12481} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26469/26241", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26469", + "pdf_size": 580549, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18398625956959938323&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff_domain": "ualberta.ca;post.bgu.ac.il;ualberta.ca", + "email": "ualberta.ca;post.bgu.ac.il;ualberta.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2+3;0+1", + "aff_unique_norm": "University of Alberta;Alberta Machine Intelligence Institute;Ben Gurion University of the Negev;University of London", + "aff_unique_dep": "Department of Computing Science;Machine Intelligence;;", + "aff_unique_url": "https://www.ualberta.ca;https://amiilabs.ca;https://www.bgu.ac.il;https://www.royalholloway.ac.uk", + "aff_unique_abbr": "UAlberta;Amii;BGU;RHUL", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Royal Holloway", + "aff_country_unique_index": "0+0;1+2;0+0", + "aff_country_unique": "Canada;Israel;United Kingdom" + }, + { + "id": "article-26864", + "title": "AnimateSVG: Autonomous Creation and Aesthetics Evaluation of Scalable Vector Graphics Animations for the Case of Brand Logos", + "track": "iaai technical track", + "status": "Technical", + "abstract": "In the light of the constant battle for attention on digital media, animating digital content plays an increasing role in modern graphic design. In this study, we use artificial intelligence methods to create aesthetic animations along the case of brand logos. With scalable vector graphics as the standard format in modern graphic design, we develop an autonomous end-to-end method using complex machine learning techniques to create brand logo animations as scalable vector graphics from scratch. We acquire data and setup a comprehensive animation space to create novel animations and evaluate them based on their aesthetics. We propose and compare two alternative computational models for automated logo animation and carefully weigh up their idiosyncrasies: on the one hand, we set up an aesthetics evaluation model to train an animation generator and, on the other hand, we combine tree ensembles with global optimization. Indeed, our proposed methods are capable of creating aesthetic logo animations, receiving an average rating of \u2018good\u2019 from observers.", + "primary_area": "emerging applications of ai", + "author": "Deborah Mateja; Rebecca Armbruster; Jonathan Baumert; Tim Bleil; Jakob Langenbahn; Jan Christian Schwedhelm; Sarah Sester; Armin Heinzl", + "authorids": "", + "aff": "University of Mannheim; University of Mannheim; University of Mannheim; University of Mannheim; University of Mannheim; University of Mannheim; University of Mannheim; University of Mannheim", + "bibtex": "@article{Mateja_Armbruster_Baumert_Bleil_Langenbahn_Schwedhelm_Sester_Heinzl_2024, title={AnimateSVG: Autonomous Creation and Aesthetics Evaluation of Scalable Vector Graphics Animations for the Case of Brand Logos}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26864}, DOI={10.1609/aaai.v37i13.26864}, abstractNote={In the light of the constant battle for attention on digital media, animating digital content plays an increasing role in modern graphic design. In this study, we use artificial intelligence methods to create aesthetic animations along the case of brand logos. With scalable vector graphics as the standard format in modern graphic design, we develop an autonomous end-to-end method using complex machine learning techniques to create brand logo animations as scalable vector graphics from scratch. We acquire data and setup a comprehensive animation space to create novel animations and evaluate them based on their aesthetics. We propose and compare two alternative computational models for automated logo animation and carefully weigh up their idiosyncrasies: on the one hand, we set up an aesthetics evaluation model to train an animation generator and, on the other hand, we combine tree ensembles with global optimization. Indeed, our proposed methods are capable of creating aesthetic logo animations, receiving an average rating of \u2018good\u2019 from observers.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mateja, Deborah and Armbruster, Rebecca and Baumert, Jonathan and Bleil, Tim and Langenbahn, Jakob and Schwedhelm, Jan Christian and Sester, Sarah and Heinzl, Armin}, year={2024}, month={Jul.}, pages={15710-15716} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26864/26636", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26864", + "pdf_size": 1234818, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6682240733826726520&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de", + "email": "uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "University of Mannheim", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uni-mannheim.de", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-27088", + "title": "AnoViz: A Visual Inspection Tool of Anomalies in Multivariate Time Series", + "track": "demonstrations", + "status": "Technical", + "abstract": "This paper presents AnoViz, a novel visualization tool of anomalies in multivariate time series, to support domain experts and data scientists in understanding anomalous instances in their systems. AnoViz provides an overall summary of time series as well as detailed visualizations of relevant detected anomalies in both query and stream modes, rendering near real-time visual analysis available. Here, we show that AnoViz streamlines the process of finding a potential cause of an anomaly with a deeper analysis of anomalous instances, giving explainability to any anomaly detector.", + "primary_area": "", + "author": "Patara Trirat; Youngeun Nam; Taeyoon Kim; Jae-Gil Lee", + "authorids": "", + "aff": "School of Computing, KAIST; School of Computing, KAIST; School of Computing, KAIST; School of Computing, KAIST", + "bibtex": "@article{Trirat_Nam_Kim_Lee_2024, title={AnoViz: A Visual Inspection Tool of Anomalies in Multivariate Time Series}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27088}, DOI={10.1609/aaai.v37i13.27088}, abstractNote={This paper presents AnoViz, a novel visualization tool of anomalies in multivariate time series, to support domain experts and data scientists in understanding anomalous instances in their systems. AnoViz provides an overall summary of time series as well as detailed visualizations of relevant detected anomalies in both query and stream modes, rendering near real-time visual analysis available. Here, we show that AnoViz streamlines the process of finding a potential cause of an anomaly with a deeper analysis of anomalous instances, giving explainability to any anomaly detector.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Trirat, Patara and Nam, Youngeun and Kim, Taeyoon and Lee, Jae-Gil}, year={2024}, month={Jul.}, pages={16488-16490} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27088/26860", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27088", + "pdf_size": 893660, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8599783871776508112&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25563", + "title": "Anomaly Segmentation for High-Resolution Remote Sensing Images Based on Pixel Descriptors", + "track": "main", + "status": "Technical", + "abstract": "Anomaly segmentation in high spatial resolution (HSR) remote sensing imagery is aimed at segmenting anomaly patterns of the earth deviating from normal patterns, which plays an important role in various Earth vision applications. However, it is a challenging task due to the complex distribution and the irregular shapes of objects, and the lack of abnormal samples. To tackle these problems, an anomaly segmentation model based on pixel descriptors (ASD) is proposed for anomaly segmentation in HSR imagery. Specifically, deep one-class classification is introduced for anomaly segmentation in the feature space with discriminative pixel descriptors. The ASD model incorporates the data argument for generating virtual abnormal samples, which can force the pixel descriptors to be compact for normal data and meanwhile to be diverse to avoid the model collapse problems when only positive samples participated in the training. In addition, the ASD introduced a multi-level and multi-scale feature extraction strategy for learning the low-level and semantic information to make the pixel descriptors feature-rich. The proposed ASD model was validated using four HSR datasets and compared with the recent state-of-the-art models, showing its potential value in Earth vision applications.", + "primary_area": "data mining and knowledge management", + "author": "Jingtao Li; Xinyu Wang; Hengwei Zhao; Shaoyu Wang; Yanfei Zhong", + "authorids": "", + "aff": "State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, P. R. China; School of Remote Sensing and Information Engineering, Wuhan University, P. R. China; State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, P. R. China; State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, P. R. China; State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, P. R. China", + "bibtex": "@article{Li_Wang_Zhao_Wang_Zhong_2023, title={Anomaly Segmentation for High-Resolution Remote Sensing Images Based on Pixel Descriptors}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25563}, DOI={10.1609/aaai.v37i4.25563}, abstractNote={Anomaly segmentation in high spatial resolution (HSR) remote sensing imagery is aimed at segmenting anomaly patterns of the earth deviating from normal patterns, which plays an important role in various Earth vision applications. However, it is a challenging task due to the complex distribution and the irregular shapes of objects, and the lack of abnormal samples. To tackle these problems, an anomaly segmentation model based on pixel descriptors (ASD) is proposed for anomaly segmentation in HSR imagery. Specifically, deep one-class classification is introduced for anomaly segmentation in the feature space with discriminative pixel descriptors. The ASD model incorporates the data argument for generating virtual abnormal samples, which can force the pixel descriptors to be compact for normal data and meanwhile to be diverse to avoid the model collapse problems when only positive samples participated in the training. In addition, the ASD introduced a multi-level and multi-scale feature extraction strategy for learning the low-level and semantic information to make the pixel descriptors feature-rich. The proposed ASD model was validated using four HSR datasets and compared with the recent state-of-the-art models, showing its potential value in Earth vision applications.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jingtao and Wang, Xinyu and Zhao, Hengwei and Wang, Shaoyu and Zhong, Yanfei}, year={2023}, month={Jun.}, pages={4426-4434} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25563/25335", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25563", + "pdf_size": 18281610, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2054390163116649021&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "https://github.com/Jingtao-Li-CVer/ASD", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing", + "aff_unique_url": "http://www.whu.edu.cn/", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26754", + "title": "Anonymization for Skeleton Action Recognition", + "track": "aaai special track", + "status": "Technical", + "abstract": "Skeleton-based action recognition attracts practitioners and researchers due to the lightweight, compact nature of datasets. Compared with RGB-video-based action recognition, skeleton-based action recognition is a safer way to protect the privacy of subjects while having competitive recognition performance. However, due to improvements in skeleton recognition algorithms as well as motion and depth sensors, more details of motion characteristics can be preserved in the skeleton dataset, leading to potential privacy leakage. We first train classifiers to categorize private information from skeleton trajectories to investigate the potential privacy leakage from skeleton datasets. Our preliminary experiments show that the gender classifier achieves 87% accuracy on average, and the re-identification classifier achieves 80% accuracy on average with three baseline models: Shift-GCN, MS-G3D, and 2s-AGCN. We propose an anonymization framework based on adversarial learning to protect potential privacy leakage from the skeleton dataset. Experimental results show that an anonymized dataset can reduce the risk of privacy leakage while having marginal effects on action recognition performance even with simple anonymizer architectures. The code used in our experiments is available at https://github.com/ml-postech/Skeleton-anonymization/", + "primary_area": "safe and robust ai", + "author": "Saemi Moon; Myeonghyeon Kim; Zhenyue Qin; Yang Liu; Dongwoo Kim", + "authorids": "", + "aff": "Computer Science and Engineering, Pohang University of Science and Technology + Graduate School of Arti\ufb01cial Intelligence, Pohang University of Science and Technology; Scatter Lab; Australian National University + Tencent; Australian National University; Computer Science and Engineering, Pohang University of Science and Technology + Graduate School of Arti\ufb01cial Intelligence, Pohang University of Science and Technology", + "bibtex": "@article{Moon_Kim_Qin_Liu_Kim_2023, title={Anonymization for Skeleton Action Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26754}, DOI={10.1609/aaai.v37i12.26754}, abstractNote={Skeleton-based action recognition attracts practitioners and researchers due to the lightweight, compact nature of datasets. Compared with RGB-video-based action recognition, skeleton-based action recognition is a safer way to protect the privacy of subjects while having competitive recognition performance. However, due to improvements in skeleton recognition algorithms as well as motion and depth sensors, more details of motion characteristics can be preserved in the skeleton dataset, leading to potential privacy leakage. We first train classifiers to categorize private information from skeleton trajectories to investigate the potential privacy leakage from skeleton datasets. Our preliminary experiments show that the gender classifier achieves 87% accuracy on average, and the re-identification classifier achieves 80% accuracy on average with three baseline models: Shift-GCN, MS-G3D, and 2s-AGCN. We propose an anonymization framework based on adversarial learning to protect potential privacy leakage from the skeleton dataset. Experimental results show that an anonymized dataset can reduce the risk of privacy leakage while having marginal effects on action recognition performance even with simple anonymizer architectures. The code used in our experiments is available at https://github.com/ml-postech/Skeleton-anonymization/}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Moon, Saemi and Kim, Myeonghyeon and Qin, Zhenyue and Liu, Yang and Kim, Dongwoo}, year={2023}, month={Jun.}, pages={15028-15036} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26754/26526", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26754", + "pdf_size": 627994, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12301084733730989220&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "postech.ac.kr;gmail.com;anu.edu.au;anu.edu.au;postech.ac.kr", + "email": "postech.ac.kr;gmail.com;anu.edu.au;anu.edu.au;postech.ac.kr", + "github": "https://github.com/ml-postech/Skeleton-anonymization/", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;2+3;2;0+0", + "aff_unique_norm": "Pohang University of Science and Technology;Scatter Lab;Australian National University;Tencent Holdings Limited", + "aff_unique_dep": "Computer Science and Engineering;;;", + "aff_unique_url": "https://www.postech.ac.kr;;https://www.anu.edu.au;https://www.tencent.com", + "aff_unique_abbr": "POSTECH;;ANU;Tencent", + "aff_campus_unique_index": "0+0;;0+0", + "aff_campus_unique": "Pohang;", + "aff_country_unique_index": "0+0;2+3;2;0+0", + "aff_country_unique": "South Korea;;Australia;China" + }, + { + "id": "article-27038", + "title": "Anti-drifting Feature Selection via Deep Reinforcement Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Feature selection (FS) is a crucial procedure in machine learning pipelines for its significant benefits in removing data redundancy and mitigating model overfitting. Since concept drift is a widespread phenomenon in streaming data and could severely affect model performance, effective FS on concept drifting data streams is imminent. However, existing state-of-the-art FS algorithms fail to adjust their selection strategy adaptively when the effective feature subset changes, making them unsuitable for drifting streams. In this paper, we propose a dynamic FS method that selects effective features on concept drifting data streams via deep reinforcement learning. Specifically, we present two novel designs: (i) a skip-mode reinforcement learning environment that shrinks action space size for high-dimensional FS tasks; (ii) a curiosity mechanism that generates intrinsic rewards to address the long-horizon exploration problem. The experiment results show that our proposed method outperforms other FS methods and can dynamically adapt to concept drifts.", + "primary_area": "", + "author": "Aoran Wang; Hongyang Yang; Feng Mao; Zongzhang Zhang; Yang Yu; Xiaoyang Liu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University; Alibaba Group; Alibaba Group; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; Columbia University", + "bibtex": "@article{Wang_Yang_Mao_Zhang_Yu_Liu_2024, title={Anti-drifting Feature Selection via Deep Reinforcement Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27038}, DOI={10.1609/aaai.v37i13.27038}, abstractNote={Feature selection (FS) is a crucial procedure in machine learning pipelines for its significant benefits in removing data redundancy and mitigating model overfitting. Since concept drift is a widespread phenomenon in streaming data and could severely affect model performance, effective FS on concept drifting data streams is imminent. However, existing state-of-the-art FS algorithms fail to adjust their selection strategy adaptively when the effective feature subset changes, making them unsuitable for drifting streams. In this paper, we propose a dynamic FS method that selects effective features on concept drifting data streams via deep reinforcement learning. Specifically, we present two novel designs: (i) a skip-mode reinforcement learning environment that shrinks action space size for high-dimensional FS tasks; (ii) a curiosity mechanism that generates intrinsic rewards to address the long-horizon exploration problem. The experiment results show that our proposed method outperforms other FS methods and can dynamically adapt to concept drifts.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Aoran and Yang, Hongyang and Mao, Feng and Zhang, Zongzhang and Yu, Yang and Liu, Xiaoyang}, year={2024}, month={Jul.}, pages={16356-16357} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27038/26810", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27038", + "pdf_size": 343697, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:bqX9th5OlugJ:scholar.google.com/&scioq=Anti-drifting+Feature+Selection+via+Deep+Reinforcement+Learning+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;lamda.nju.edu.cn;lamda.nju.edu.cn;columbia.edu", + "email": "lamda.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;lamda.nju.edu.cn;lamda.nju.edu.cn;columbia.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;0;0;2", + "aff_unique_norm": "Nanjing University;Alibaba Group;Columbia University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.alibaba.com;https://www.columbia.edu", + "aff_unique_abbr": "Nanjing University;Alibaba;Columbia", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25627", + "title": "Anytime User Engagement Prediction in Information Cascades for Arbitrary Observation Periods", + "track": "main", + "status": "Technical", + "abstract": "Predicting user engagement -- whether a user will engage in a given information cascade -- is an important problem in the context of social media, as it is useful to online marketing and misinformation mitigation just to name a couple major applications. Based on split population multi-variate survival processes, we develop a discriminative approach that, unlike prior works, leads to a single model for predicting whether individual users of an information network will engage a given cascade for arbitrary forecast horizons and observation periods. Being probabilistic in nature, this model retains the interpretability of its generative counterpart and renders count prediction intervals in a disciplined manner. Our results indicate that our model is highly competitive, if not superior, to current approaches, when compared over varying observed cascade histories and forecast horizons.", + "primary_area": "domain s of application", + "author": "Akshay Aravamudan; Xi Zhang; Georgios C. Anagnostopoulos", + "authorids": "", + "aff": "Department of Computer Engineering & Sciences, Florida Institute of Technology, Melbourne, FL, USA; Department of Computer Engineering & Sciences, Florida Institute of Technology, Melbourne, FL, USA; Department of Computer Engineering & Sciences, Florida Institute of Technology, Melbourne, FL, USA", + "bibtex": "@article{Aravamudan_Zhang_Anagnostopoulos_2023, title={Anytime User Engagement Prediction in Information Cascades for Arbitrary Observation Periods}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25627}, DOI={10.1609/aaai.v37i4.25627}, abstractNote={Predicting user engagement -- whether a user will engage in a given information cascade -- is an important problem in the context of social media, as it is useful to online marketing and misinformation mitigation just to name a couple major applications. Based on split population multi-variate survival processes, we develop a discriminative approach that, unlike prior works, leads to a single model for predicting whether individual users of an information network will engage a given cascade for arbitrary forecast horizons and observation periods. Being probabilistic in nature, this model retains the interpretability of its generative counterpart and renders count prediction intervals in a disciplined manner. Our results indicate that our model is highly competitive, if not superior, to current approaches, when compared over varying observed cascade histories and forecast horizons.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aravamudan, Akshay and Zhang, Xi and Anagnostopoulos, Georgios C.}, year={2023}, month={Jun.}, pages={4999-5009} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25627/25399", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25627", + "pdf_size": 1329073, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9865023247799805876&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "my.fit.edu;my.fit.edu;fit.edu", + "email": "my.fit.edu;my.fit.edu;fit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Florida Institute of Technology", + "aff_unique_dep": "Department of Computer Engineering & Sciences", + "aff_unique_url": "https://www.fit.edu", + "aff_unique_abbr": "FIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Melbourne", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25717", + "title": "Approval-Based Voting with Mixed Goods", + "track": "main", + "status": "Technical", + "abstract": "We consider a voting scenario in which the resource to be voted upon may consist of both indivisible and divisible goods. This generalizes both the well-studied model of multiwinner voting and the recently introduced model of cake sharing. Under approval votes, we propose two variants of the extended justified representation (EJR) notion from multiwinner voting, a stronger one called EJR for mixed goods (EJR-M) and a weaker one called EJR up to 1 (EJR-1). We extend three multiwinner voting rules to our setting\u2014GreedyEJR, the method of equal shares (MES), and proportional approval voting (PAV)\u2014and show that while all three generalizations satisfy EJR-1, only the first one provides EJR-M. In addition, we derive tight bounds on the proportionality degree implied by EJR-M and EJR-1, and investigate the proportionality degree of our proposed rules.", + "primary_area": "game theory and economic paradigms", + "author": "Xinhang Lu; Jannik Peters; Haris Aziz; Xiaohui Bei; Warut Suksompong", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of New South Wales; Efficient Algorithms Research Group, Technische Universit \u00a8at Berlin; School of Computer Science and Engineering, University of New South Wales; School of Physical and Mathematical Sciences, Nanyang Technological University; School of Computing, National University of Singapore", + "bibtex": "@article{Lu_Peters_Aziz_Bei_Suksompong_2023, title={Approval-Based Voting with Mixed Goods}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25717}, DOI={10.1609/aaai.v37i5.25717}, abstractNote={We consider a voting scenario in which the resource to be voted upon may consist of both indivisible and divisible goods. This generalizes both the well-studied model of multiwinner voting and the recently introduced model of cake sharing. Under approval votes, we propose two variants of the extended justified representation (EJR) notion from multiwinner voting, a stronger one called EJR for mixed goods (EJR-M) and a weaker one called EJR up to 1 (EJR-1). We extend three multiwinner voting rules to our setting\u2014GreedyEJR, the method of equal shares (MES), and proportional approval voting (PAV)\u2014and show that while all three generalizations satisfy EJR-1, only the first one provides EJR-M. In addition, we derive tight bounds on the proportionality degree implied by EJR-M and EJR-1, and investigate the proportionality degree of our proposed rules.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Xinhang and Peters, Jannik and Aziz, Haris and Bei, Xiaohui and Suksompong, Warut}, year={2023}, month={Jun.}, pages={5781-5788} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25717/25489", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25717", + "pdf_size": 152721, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11128423153008273696&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2;3", + "aff_unique_norm": "University of New South Wales;Technische Universit\u00e4t Berlin;Nanyang Technological University;National University of Singapore", + "aff_unique_dep": "School of Computer Science and Engineering;Efficient Algorithms Research Group;School of Physical and Mathematical Sciences;School of Computing", + "aff_unique_url": "https://www.unsw.edu.au;https://www.tu-berlin.de;https://www.ntu.edu.sg;https://www.nus.edu.sg", + "aff_unique_abbr": "UNSW;TU Berlin;NTU;NUS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Berlin", + "aff_country_unique_index": "0;1;0;2;2", + "aff_country_unique": "Australia;Germany;Singapore" + }, + { + "id": "article-25814", + "title": "Approximating Full Conformal Prediction at Scale via Influence Functions", + "track": "main", + "status": "Technical", + "abstract": "Conformal prediction (CP) is a wrapper around traditional machine learning models, giving coverage guarantees under the sole assumption of exchangeability; in classification problems, a CP guarantees that the error rate is at most a chosen significance level, irrespective of whether the underlying model is misspecified. However, the prohibitive computational costs of full CP led researchers to design scalable alternatives, which alas do not attain the same guarantees or statistical power of full CP. In this paper, we use influence functions to efficiently approximate full CP. We prove that our method is a consistent approximation of full CP, and empirically show that the approximation error becomes smaller as the training set increases; e.g., for 1,000 training points the two methods output p-values that are <0.001 apart: a negligible error for any practical application. Our methods enable scaling full CP to large real-world datasets. We compare our full CP approximation (ACP) to mainstream CP alternatives, and observe that our method is computationally competitive whilst enjoying the statistical predictive power of full CP.", + "primary_area": "machine learning i", + "author": "Javier Abad Martinez; Umang Bhatt; Adrian Weller; Giovanni Cherubin", + "authorids": "", + "aff": "ETH Zurich, Switzerland; University of Cambridge, UK + The Alan Turing Institute, London, UK; University of Cambridge, UK + The Alan Turing Institute, London, UK; Microsoft Research, Cambridge, UK", + "bibtex": "@article{Abad Martinez_Bhatt_Weller_Cherubin_2023, title={Approximating Full Conformal Prediction at Scale via Influence Functions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25814}, DOI={10.1609/aaai.v37i6.25814}, abstractNote={Conformal prediction (CP) is a wrapper around traditional machine learning models, giving coverage guarantees under the sole assumption of exchangeability; in classification problems, a CP guarantees that the error rate is at most a chosen significance level, irrespective of whether the underlying model is misspecified. However, the prohibitive computational costs of full CP led researchers to design scalable alternatives, which alas do not attain the same guarantees or statistical power of full CP. In this paper, we use influence functions to efficiently approximate full CP. We prove that our method is a consistent approximation of full CP, and empirically show that the approximation error becomes smaller as the training set increases; e.g., for 1,000 training points the two methods output p-values that are <0.001 apart: a negligible error for any practical application. Our methods enable scaling full CP to large real-world datasets. We compare our full CP approximation (ACP) to mainstream CP alternatives, and observe that our method is computationally competitive whilst enjoying the statistical predictive power of full CP.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abad Martinez, Javier and Bhatt, Umang and Weller, Adrian and Cherubin, Giovanni}, year={2023}, month={Jun.}, pages={6631-6639} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25814/25586", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25814", + "pdf_size": 2536158, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6127829328369333442&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "ai.ethz.ch;cam.ac.uk;cam.ac.uk;microsoft.com", + "email": "ai.ethz.ch;cam.ac.uk;cam.ac.uk;microsoft.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;1+2;3", + "aff_unique_norm": "ETH Zurich;University of Cambridge;The Alan Turing Institute;Microsoft Research", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ethz.ch;https://www.cam.ac.uk;https://www.turing.ac.uk;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "ETHZ;Cambridge;ATI;MSR", + "aff_campus_unique_index": "1+2;1+2;1", + "aff_campus_unique": ";Cambridge;London", + "aff_country_unique_index": "0;1+1;1+1;1", + "aff_country_unique": "Switzerland;United Kingdom" + }, + { + "id": "article-25708", + "title": "Approximations for Indivisible Concave Allocations with Applications to Nash Welfare Maximization", + "track": "main", + "status": "Technical", + "abstract": "We study a general allocation setting where agent valuations are concave additive. In this model, a collection of items must be uniquely distributed among a set of agents, where each agent-item pair has a specified utility. The objective is to maximize the sum of agent valuations, each of which is an arbitrary non-decreasing concave function of the agent's total additive utility. This setting was studied by Devanur and Jain (STOC 2012) in the online setting for divisible items. In this paper, we obtain both multiplicative and additive approximations in the offline setting for indivisible items. Our approximations depend on novel parameters that measure the local multiplicative/additive curvatures of each agent valuation, which we show correspond directly to the integrality gap of the natural assignment convex program of the problem. Furthermore, we extend our additive guarantees to obtain constant multiplicative approximations for Asymmetric Nash Welfare Maximization when agents have smooth valuations. This algorithm also yields an interesting tatonnement-style interpretation, where agents adjust uniform prices and items are assigned according to maximum weighted bang-per-buck ratios.", + "primary_area": "game theory and economic paradigms", + "author": "Nathaniel Kell; Kevin Sun", + "authorids": "", + "aff": "Denison University; Elon University", + "bibtex": "@article{Kell_Sun_2023, title={Approximations for Indivisible Concave Allocations with Applications to Nash Welfare Maximization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25708}, DOI={10.1609/aaai.v37i5.25708}, abstractNote={We study a general allocation setting where agent valuations are concave additive. In this model, a collection of items must be uniquely distributed among a set of agents, where each agent-item pair has a specified utility. The objective is to maximize the sum of agent valuations, each of which is an arbitrary non-decreasing concave function of the agent\u2019s total additive utility. This setting was studied by Devanur and Jain (STOC 2012) in the online setting for divisible items. In this paper, we obtain both multiplicative and additive approximations in the offline setting for indivisible items. Our approximations depend on novel parameters that measure the local multiplicative/additive curvatures of each agent valuation, which we show correspond directly to the integrality gap of the natural assignment convex program of the problem. Furthermore, we extend our additive guarantees to obtain constant multiplicative approximations for Asymmetric Nash Welfare Maximization when agents have smooth valuations. This algorithm also yields an interesting tatonnement-style interpretation, where agents adjust uniform prices and items are assigned according to maximum weighted bang-per-buck ratios.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kell, Nathaniel and Sun, Kevin}, year={2023}, month={Jun.}, pages={5705-5713} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25708/25480", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25708", + "pdf_size": 199768, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15472412440671747312&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "denison.edu;elon.edu", + "email": "denison.edu;elon.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Denison University;Elon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.denison.edu;https://www.elon.edu", + "aff_unique_abbr": "Denison;Elon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26317", + "title": "Are Transformers Effective for Time Series Forecasting?", + "track": "main", + "status": "Technical", + "abstract": "Recently, there has been a surge of Transformer-based solutions for the long-term time series forecasting (LTSF) task. Despite the growing performance over the past few years, we question the validity of this line of research in this work. Specifically, Transformers is arguably the most successful solution to extract the semantic correlations among the elements in a long sequence. However, in time series modeling, we are to extract the temporal relations in an ordered set of continuous points. While employing positional encoding and using tokens to embed sub-series in Transformers facilitate preserving some ordering information, the nature of the permutation-invariant self-attention mechanism inevitably results in temporal information loss. \nTo validate our claim, we introduce a set of embarrassingly simple one-layer linear models named LTSF-Linear for comparison. Experimental results on nine real-life datasets show that LTSF-Linear surprisingly outperforms existing sophisticated Transformer-based LTSF models in all cases, and often by a large margin. Moreover, we conduct comprehensive empirical studies to explore the impacts of various design elements of LTSF models on their temporal relation extraction capability. We hope this surprising finding opens up new research directions for the LTSF task. We also advocate revisiting the validity of Transformer-based solutions for other time series analysis tasks (e.g., anomaly detection) in the future.", + "primary_area": "machine learning iv", + "author": "Ailing Zeng; Muxi Chen; Lei Zhang; Qiang Xu", + "authorids": "", + "aff": "The Chinese University of Hong Kong + International Digital Economy Academy; The Chinese University of Hong Kong + International Digital Economy Academy; International Digital Economy Academy; The Chinese University of Hong Kong", + "bibtex": "@article{Zeng_Chen_Zhang_Xu_2023, title={Are Transformers Effective for Time Series Forecasting?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26317}, DOI={10.1609/aaai.v37i9.26317}, abstractNote={Recently, there has been a surge of Transformer-based solutions for the long-term time series forecasting (LTSF) task. Despite the growing performance over the past few years, we question the validity of this line of research in this work. Specifically, Transformers is arguably the most successful solution to extract the semantic correlations among the elements in a long sequence. However, in time series modeling, we are to extract the temporal relations in an ordered set of continuous points. While employing positional encoding and using tokens to embed sub-series in Transformers facilitate preserving some ordering information, the nature of the permutation-invariant self-attention mechanism inevitably results in temporal information loss. To validate our claim, we introduce a set of embarrassingly simple one-layer linear models named LTSF-Linear for comparison. Experimental results on nine real-life datasets show that LTSF-Linear surprisingly outperforms existing sophisticated Transformer-based LTSF models in all cases, and often by a large margin. Moreover, we conduct comprehensive empirical studies to explore the impacts of various design elements of LTSF models on their temporal relation extraction capability. We hope this surprising finding opens up new research directions for the LTSF task. We also advocate revisiting the validity of Transformer-based solutions for other time series analysis tasks (e.g., anomaly detection) in the future.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Ailing and Chen, Muxi and Zhang, Lei and Xu, Qiang}, year={2023}, month={Jun.}, pages={11121-11128} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26317/26089", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26317", + "pdf_size": 495992, + "gs_citation": 2271, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7113187934556758762&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "idea.edu.cn;cse.cuhk.edu.hk;idea.edu.cn;cse.cuhk.edu.hk", + "email": "idea.edu.cn;cse.cuhk.edu.hk;idea.edu.cn;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;International Digital Economy Academy", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cuhk.edu.hk;", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26825", + "title": "Artificial Intelligence at the Service of Society to Analyse Human Arguments", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Argument(ation) mining (AM) is an area of research in Artificial Intelligence (AI) that aims to identify, analyse and automatically generate arguments in natural language. In a pipeline, the identification and analysis of the arguments and their components (i.e. premises and claims) in texts and the prediction of their relations (i.e. attack and support) are then handled by argument-based reasoning frameworks so that, for example, fallacies and inconsistencies can be automatically identified. Recently, the field of argument mining has tackled new challenges, namely the evaluation of argument quality (e.g. strength, persuasiveness), natural language argument summarisation and retrieval, and natural language argument generation. In this paper, I discuss my main contributions in this area as well as some lines of future research. This paper is part of the AAAI-23 New Faculty Highlights.", + "primary_area": "", + "author": "Serena Villata", + "authorids": "", + "aff": "Universit \u00b4e C\u02c6ote d\u2019Azur, CNRS, Inria, I3S, France", + "bibtex": "@article{Villata_2024, title={Artificial Intelligence at the Service of Society to Analyse Human Arguments}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26825}, DOI={10.1609/aaai.v37i13.26825}, abstractNote={Argument(ation) mining (AM) is an area of research in Artificial Intelligence (AI) that aims to identify, analyse and automatically generate arguments in natural language. In a pipeline, the identification and analysis of the arguments and their components (i.e. premises and claims) in texts and the prediction of their relations (i.e. attack and support) are then handled by argument-based reasoning frameworks so that, for example, fallacies and inconsistencies can be automatically identified. Recently, the field of argument mining has tackled new challenges, namely the evaluation of argument quality (e.g. strength, persuasiveness), natural language argument summarisation and retrieval, and natural language argument generation. In this paper, I discuss my main contributions in this area as well as some lines of future research. This paper is part of the AAAI-23 New Faculty Highlights.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Villata, Serena}, year={2024}, month={Jul.}, pages={15458-15458} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26825/26597", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26825", + "pdf_size": 43263, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:NZfuUolKNYMJ:scholar.google.com/&scioq=Artificial+Intelligence+at+the+Service+of+Society+to+Analyse+Human+Arguments&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "univ-cotedazur.fr", + "email": "univ-cotedazur.fr", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur", + "aff_unique_dep": "", + "aff_unique_url": "https://www.univ-cotedazur.fr", + "aff_unique_abbr": "UCA", + "aff_country_unique_index": "0", + "aff_country_unique": "France" + }, + { + "id": "article-26953", + "title": "AsT: An Asymmetric-Sensitive Transformer for Osteonecrosis of the Femoral Head Detection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Early diagnosis of osteonecrosis of the femoral head (ONFH) can inhibit the progression and improve femoral head preservation. The radiograph difference between early ONFH and healthy ones is not apparent to the naked eye. It is also hard to produce a large dataset to train the classification model. In this paper, we propose Asymmetric-Sensitive Transformer (AsT) to capture the uneven development of the bilateral femoral head to enable robust ONFH detection. Our ONFH detection is realized using the self-attention mechanism to femoral head regions while conferring sensitivity to the uneven development by the attention-shared transformer. The real-world experiment studies show that AsT achieves the best performance of AUC 0.9313 in the early diagnosis of ONFH and can find out misdiagnosis cases firmly.", + "primary_area": "", + "author": "Haoyang Chen; Shuai Liu; Feng Lu; Wei Li; Bin Sheng; Mi Li; Hai Jin; Albert Y. Zomaya", + "authorids": "", + "aff": "National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Tongji Hospital, Tongji Medical College, Huazhong University of Science and Technology; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia", + "bibtex": "@article{Chen_Liu_Lu_Li_Sheng_Li_Jin_Zomaya_2024, title={AsT: An Asymmetric-Sensitive Transformer for Osteonecrosis of the Femoral Head Detection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26953}, DOI={10.1609/aaai.v37i13.26953}, abstractNote={Early diagnosis of osteonecrosis of the femoral head (ONFH) can inhibit the progression and improve femoral head preservation. The radiograph difference between early ONFH and healthy ones is not apparent to the naked eye. It is also hard to produce a large dataset to train the classification model. In this paper, we propose Asymmetric-Sensitive Transformer (AsT) to capture the uneven development of the bilateral femoral head to enable robust ONFH detection. Our ONFH detection is realized using the self-attention mechanism to femoral head regions while conferring sensitivity to the uneven development by the attention-shared transformer. The real-world experiment studies show that AsT achieves the best performance of AUC 0.9313 in the early diagnosis of ONFH and can find out misdiagnosis cases firmly.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Haoyang and Liu, Shuai and Lu, Feng and Li, Wei and Sheng, Bin and Li, Mi and Jin, Hai and Zomaya, Albert Y.}, year={2024}, month={Jul.}, pages={16186-16187} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26953/26725", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26953", + "pdf_size": 5097389, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:iYJCbkPt0woJ:scholar.google.com/&scioq=AsT:+An+Asymmetric-Sensitive+Transformer+for+Osteonecrosis+of+the+Femoral+Head+Detection+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au;sjtu.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au;sjtu.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2;0;0;1", + "aff_unique_norm": "Huazhong University of Science and Technology;The University of Sydney;Shanghai Jiao Tong University", + "aff_unique_dep": "School of Computer Science and Technology;School of Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.hust.edu.cn;https://www.sydney.edu.au;https://www.sjtu.edu.cn", + "aff_unique_abbr": "HUST;USYD;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26918", + "title": "Assessing Learned Representations under Open-World Novelty", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "My dissertation research focuses on sequential decision-making (SDM) in complex environments, and how agents can perform well even when novelty is introduced to those environments. The problem of how agents can respond intelligently to novelty has been a long-standing challenge in AI, and poses unique problems across approaches to SDM. This question has been studied in various formulations, including open-world learning and reasoning, transfer learning, concept drift, and statistical relational learning. Classical and modern approaches in agent design offer tradeoffs in human effort for feature encoding, ease of deployment in new domains, and the development of both provably and empirically reliable policies. I propose a formalism for studying open-world novelty in SDM processes with feature-rich observations. I study the conditions under which causal-relational queries can be estimated from non-novel observations, and empirically examine the effects of open-world novelty on agent behavior.", + "primary_area": "", + "author": "Kaleigh Clary", + "authorids": "", + "aff": "University of Massachusetts Amherst", + "bibtex": "@article{Clary_2024, title={Assessing Learned Representations under Open-World Novelty}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26918}, DOI={10.1609/aaai.v37i13.26918}, abstractNote={My dissertation research focuses on sequential decision-making (SDM) in complex environments, and how agents can perform well even when novelty is introduced to those environments. The problem of how agents can respond intelligently to novelty has been a long-standing challenge in AI, and poses unique problems across approaches to SDM. This question has been studied in various formulations, including open-world learning and reasoning, transfer learning, concept drift, and statistical relational learning. Classical and modern approaches in agent design offer tradeoffs in human effort for feature encoding, ease of deployment in new domains, and the development of both provably and empirically reliable policies. I propose a formalism for studying open-world novelty in SDM processes with feature-rich observations. I study the conditions under which causal-relational queries can be estimated from non-novel observations, and empirically examine the effects of open-world novelty on agent behavior.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Clary, Kaleigh}, year={2024}, month={Jul.}, pages={16115-16116} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26918/26690", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26918", + "pdf_size": 58286, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:MBkPPFsk1E4J:scholar.google.com/&scioq=Assessing+Learned+Representations+under+Open-World+Novelty&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "cs.umass.edu", + "email": "cs.umass.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25947", + "title": "Astromorphic Self-Repair of Neuromorphic Hardware Systems", + "track": "main", + "status": "Technical", + "abstract": "While neuromorphic computing architectures based on Spiking Neural Networks (SNNs) are increasingly gaining interest as a pathway toward bio-plausible machine learning, attention is still focused on computational units like the neuron and synapse. Shifting from this neuro-synaptic perspective, this paper attempts to explore the self-repair role of glial cells, in particular, astrocytes. The work investigates stronger correlations with astrocyte computational neuroscience models to develop macro-models with a higher degree of bio-fidelity that accurately captures the dynamic behavior of the self-repair process. Hardware-software co-design analysis reveals that bio-morphic astrocytic regulation has the potential to self-repair hardware realistic faults in neuromorphic hardware systems with significantly better accuracy and repair convergence for unsupervised learning tasks on the MNIST and F-MNIST datasets. Our implementation source code and trained models are available at https://github.com/NeuroCompLab-psu/Astromorphic_Self_Repair.", + "primary_area": "machine learning i", + "author": "Zhuangyu Han; A N M Nafiul Islam; Abhronil Sengupta", + "authorids": "", + "aff": "School of Electrical Engineering and Computer Science, Penn State University; School of Electrical Engineering and Computer Science, Penn State University; School of Electrical Engineering and Computer Science, Penn State University", + "bibtex": "@article{Han_Islam_Sengupta_2023, title={Astromorphic Self-Repair of Neuromorphic Hardware Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25947}, DOI={10.1609/aaai.v37i6.25947}, abstractNote={While neuromorphic computing architectures based on Spiking Neural Networks (SNNs) are increasingly gaining interest as a pathway toward bio-plausible machine learning, attention is still focused on computational units like the neuron and synapse. Shifting from this neuro-synaptic perspective, this paper attempts to explore the self-repair role of glial cells, in particular, astrocytes. The work investigates stronger correlations with astrocyte computational neuroscience models to develop macro-models with a higher degree of bio-fidelity that accurately captures the dynamic behavior of the self-repair process. Hardware-software co-design analysis reveals that bio-morphic astrocytic regulation has the potential to self-repair hardware realistic faults in neuromorphic hardware systems with significantly better accuracy and repair convergence for unsupervised learning tasks on the MNIST and F-MNIST datasets. Our implementation source code and trained models are available at https://github.com/NeuroCompLab-psu/Astromorphic_Self_Repair.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Zhuangyu and Islam, A N M Nafiul and Sengupta, Abhronil}, year={2023}, month={Jun.}, pages={7821-7829} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25947/25719", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25947", + "pdf_size": 825504, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9379602308923150797&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "psu.edu;psu.edu;psu.edu", + "email": "psu.edu;psu.edu;psu.edu", + "github": "https://github.com/NeuroCompLab-psu/Astromorphic Self Repair", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Penn State University", + "aff_unique_dep": "School of Electrical Engineering and Computer Science", + "aff_unique_url": "https://www.psu.edu", + "aff_unique_abbr": "Penn State", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25336", + "title": "Asynchronous Event Processing with Local-Shift Graph Convolutional Network", + "track": "main", + "status": "Technical", + "abstract": "Event cameras are bio-inspired sensors that produce sparse and asynchronous event streams instead of frame-based images at a high-rate. Recent works utilizing graph convolutional networks (GCNs) have achieved remarkable performance in recognition tasks, which model event stream as spatio-temporal graph. However, the computational mechanism of graph convolution introduces redundant computation when aggregating neighbor features, which limits the low-latency nature of the events. And they perform a synchronous inference process, which can not achieve a fast response to the asynchronous event signals. This paper proposes a local-shift graph convolutional network (LSNet), which utilizes a novel local-shift operation equipped with a local spatio-temporal attention component to achieve efficient and adaptive aggregation of neighbor features. To improve the efficiency of pooling operation in feature extraction, we design a node-importance based parallel pooling method (NIPooling) for sparse and low-latency event data. Based on the calculated importance of each node, NIPooling can efficiently obtain uniform sampling results in parallel, which retains the diversity of event streams. Furthermore, for achieving a fast response to asynchronous event signals, an asynchronous event processing procedure is proposed to restrict the network nodes which need to recompute activations only to those affected by the new arrival event. Experimental results show that the computational cost can be reduced by nearly 9 times through using local-shift operation and the proposed asynchronous procedure can further improve the inference efficiency, while achieving state-of-the-art performance on gesture recognition and object recognition.", + "primary_area": "computer vision ii", + "author": "Linhui Sun; Yifan Zhang; Jian Cheng; Hanqing Lu", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, 100049, Beijing, China+AIRIA, 211135, Nanjing, China; Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, 100049, Beijing, China+AIRIA, 211135, Nanjing, China; Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, 100049, Beijing, China+AIRIA, 211135, Nanjing, China; Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, 100049, Beijing, China+AIRIA, 211135, Nanjing, China", + "bibtex": "@article{Sun_Zhang_Cheng_Lu_2023, title={Asynchronous Event Processing with Local-Shift Graph Convolutional Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25336}, DOI={10.1609/aaai.v37i2.25336}, abstractNote={Event cameras are bio-inspired sensors that produce sparse and asynchronous event streams instead of frame-based images at a high-rate. Recent works utilizing graph convolutional networks (GCNs) have achieved remarkable performance in recognition tasks, which model event stream as spatio-temporal graph. However, the computational mechanism of graph convolution introduces redundant computation when aggregating neighbor features, which limits the low-latency nature of the events. And they perform a synchronous inference process, which can not achieve a fast response to the asynchronous event signals. This paper proposes a local-shift graph convolutional network (LSNet), which utilizes a novel local-shift operation equipped with a local spatio-temporal attention component to achieve efficient and adaptive aggregation of neighbor features. To improve the efficiency of pooling operation in feature extraction, we design a node-importance based parallel pooling method (NIPooling) for sparse and low-latency event data. Based on the calculated importance of each node, NIPooling can efficiently obtain uniform sampling results in parallel, which retains the diversity of event streams. Furthermore, for achieving a fast response to asynchronous event signals, an asynchronous event processing procedure is proposed to restrict the network nodes which need to recompute activations only to those affected by the new arrival event. Experimental results show that the computational cost can be reduced by nearly 9 times through using local-shift operation and the proposed asynchronous procedure can further improve the inference efficiency, while achieving state-of-the-art performance on gesture recognition and object recognition.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Linhui and Zhang, Yifan and Cheng, Jian and Lu, Hanqing}, year={2023}, month={Jun.}, pages={2402-2410} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25336/25108", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25336", + "pdf_size": 347716, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10063449484267627351&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;AIRIA", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;", + "aff_unique_abbr": "CAS;UCAS;", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25477", + "title": "Attack Can Benefit: An Adversarial Approach to Recognizing Facial Expressions under Noisy Annotations", + "track": "main", + "status": "Technical", + "abstract": "The real-world Facial Expression Recognition (FER) datasets usually exhibit complex scenarios with coupled noise annotations and imbalanced classes distribution, which undoubtedly impede the development of FER methods. To address the aforementioned issues, in this paper, we propose a novel and flexible method to spot noisy labels by leveraging adversarial attack, termed as Geometry Aware Adversarial Vulnerability Estimation (GAAVE). Different from existing state-of-the-art methods of noisy label learning (NLL), our method has no reliance on additional information and is thus easy to generalize to the large-scale real-world FER datasets. Besides, the combination of Dataset Splitting module and Subset Refactoring module mitigates the impact of class imbalance, and the Self-Annotator module facilitates the sufficient use of all training data. Extensive experiments on RAF-DB, FERPlus, AffectNet, and CIFAR-10 datasets validate the effectiveness of our method. The stabilized enhancement based on different methods demonstrates the flexibility of our proposed GAAVE.", + "primary_area": "computer vision iii", + "author": "Jiawen Zheng; Bo Li; Shengchuan Zhang; Shuang Wu; Liujuan Cao; Shouhong Ding", + "authorids": "", + "aff": "Xiamen University; Youtu Lab, Tencent + Xiamen University; Xiamen University; Youtu Lab, Tencent; Xiamen University; Youtu Lab, Tencent", + "bibtex": "@article{Zheng_Li_Zhang_Wu_Cao_Ding_2023, title={Attack Can Benefit: An Adversarial Approach to Recognizing Facial Expressions under Noisy Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25477}, DOI={10.1609/aaai.v37i3.25477}, abstractNote={The real-world Facial Expression Recognition (FER) datasets usually exhibit complex scenarios with coupled noise annotations and imbalanced classes distribution, which undoubtedly impede the development of FER methods. To address the aforementioned issues, in this paper, we propose a novel and flexible method to spot noisy labels by leveraging adversarial attack, termed as Geometry Aware Adversarial Vulnerability Estimation (GAAVE). Different from existing state-of-the-art methods of noisy label learning (NLL), our method has no reliance on additional information and is thus easy to generalize to the large-scale real-world FER datasets. Besides, the combination of Dataset Splitting module and Subset Refactoring module mitigates the impact of class imbalance, and the Self-Annotator module facilitates the sufficient use of all training data. Extensive experiments on RAF-DB, FERPlus, AffectNet, and CIFAR-10 datasets validate the effectiveness of our method. The stabilized enhancement based on different methods demonstrates the flexibility of our proposed GAAVE.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Jiawen and Li, Bo and Zhang, Shengchuan and Wu, Shuang and Cao, Liujuan and Ding, Shouhong}, year={2023}, month={Jun.}, pages={3660-3668} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25477/25249", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25477", + "pdf_size": 460319, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14267987411667532140&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "xmu.edu.cn;tencent.com;xmu.edu.cn;tencent.com;xmu.edu.cn;tencent.com", + "email": "xmu.edu.cn;tencent.com;xmu.edu.cn;tencent.com;xmu.edu.cn;tencent.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+0;0;1;0;1", + "aff_unique_norm": "Xiamen University;Tencent", + "aff_unique_dep": ";Youtu Lab", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "XMU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25391", + "title": "Attention-Based Depth Distillation with 3D-Aware Positional Encoding for Monocular 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Monocular 3D object detection is a low-cost but challenging task, as it requires generating accurate 3D localization solely from a single image input. Recent developed depth-assisted methods show promising results by using explicit depth maps as intermediate features, which are either precomputed by monocular depth estimation networks or jointly evaluated with 3D object detection. However, inevitable errors from estimated depth priors may lead to misaligned semantic information and 3D localization, hence resulting in feature smearing and suboptimal predictions. To mitigate this issue, we propose ADD, an Attention-based Depth knowledge Distillation framework with 3D-aware positional encoding. Unlike previous knowledge distillation frameworks that adopt stereo- or LiDAR-based teachers, we build up our teacher with identical architecture as the student but with extra ground-truth depth as input. Credit to our teacher design, our framework is seamless, domain-gap free, easily implementable, and is compatible with object-wise ground-truth depth. Specifically, we leverage intermediate features and responses for knowledge distillation. Considering long-range 3D dependencies, we propose 3D-aware self-attention and target-aware cross-attention modules for student adaptation. Extensive experiments are performed to verify the effectiveness of our framework on the challenging KITTI 3D object detection benchmark. We implement our framework on three representative monocular detectors, and we achieve state-of-the-art performance with no additional inference computational cost relative to baseline models. Our code is available at https://github.com/rockywind/ADD.", + "primary_area": "computer vision iii", + "author": "Zizhang Wu; Yunzhe Wu; Jian Pu; Xianzhi Li; Xiaoquan Wang", + "authorids": "", + "aff": "ZongmuTech; ZongmuTech; Fudan University; Huazhong University of Science and Technology; ZongmuTech", + "bibtex": "@article{Wu_Wu_Pu_Li_Wang_2023, title={Attention-Based Depth Distillation with 3D-Aware Positional Encoding for Monocular 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25391}, DOI={10.1609/aaai.v37i3.25391}, abstractNote={Monocular 3D object detection is a low-cost but challenging task, as it requires generating accurate 3D localization solely from a single image input. Recent developed depth-assisted methods show promising results by using explicit depth maps as intermediate features, which are either precomputed by monocular depth estimation networks or jointly evaluated with 3D object detection. However, inevitable errors from estimated depth priors may lead to misaligned semantic information and 3D localization, hence resulting in feature smearing and suboptimal predictions. To mitigate this issue, we propose ADD, an Attention-based Depth knowledge Distillation framework with 3D-aware positional encoding. Unlike previous knowledge distillation frameworks that adopt stereo- or LiDAR-based teachers, we build up our teacher with identical architecture as the student but with extra ground-truth depth as input. Credit to our teacher design, our framework is seamless, domain-gap free, easily implementable, and is compatible with object-wise ground-truth depth. Specifically, we leverage intermediate features and responses for knowledge distillation. Considering long-range 3D dependencies, we propose 3D-aware self-attention and target-aware cross-attention modules for student adaptation. Extensive experiments are performed to verify the effectiveness of our framework on the challenging KITTI 3D object detection benchmark. We implement our framework on three representative monocular detectors, and we achieve state-of-the-art performance with no additional inference computational cost relative to baseline models. Our code is available at https://github.com/rockywind/ADD.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Zizhang and Wu, Yunzhe and Pu, Jian and Li, Xianzhi and Wang, Xiaoquan}, year={2023}, month={Jun.}, pages={2892-2900} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25391/25163", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25391", + "pdf_size": 720462, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14874410601350649653&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;zongmutech.com;zongmutech.com;fudan.edu.cn;hust.edu.cn", + "email": "gmail.com;zongmutech.com;zongmutech.com;fudan.edu.cn;hust.edu.cn", + "github": "https://github.com/rockywind/ADD", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "ZongmuTech;Fudan University;Huazhong University of Science and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zongmutech.com;https://www.fudan.edu.cn;http://www.hust.edu.cn", + "aff_unique_abbr": ";Fudan;HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26720", + "title": "Attention-Conditioned Augmentations for Self-Supervised Anomaly Detection and Localization", + "track": "aaai special track", + "status": "Technical", + "abstract": "Self-supervised anomaly detection and localization are critical to real-world scenarios in which collecting anomalous samples and pixel-wise labeling is tedious or infeasible, even worse when a wide variety of unseen anomalies could surface at test time. Our approach involves a pretext task in the context of masked image modeling, where the goal is to impose agreement between cluster assignments obtained from the representation of an image view containing saliency-aware masked patches and the uncorrupted image view. We harness the self-attention map extracted from the transformer to mask non-salient image patches without destroying the crucial structure associated with the foreground object. Subsequently, the pre-trained model is fine-tuned to detect and localize simulated anomalies generated under the guidance of the transformer's self-attention map. We conducted extensive validation and ablations on the benchmark of industrial images and achieved superior performance against competing methods. We also show the adaptability of our method to the medical images of the chest X-rays benchmark.", + "primary_area": "safe and robust ai", + "author": "Behzad Bozorgtabar; Dwarikanath Mahapatra", + "authorids": "", + "aff": "\u00b4Ecole Polytechnique F \u00b4ed\u00b4erale de Lausanne (EPFL), Lausanne, Switzerland+Lausanne University Hospital (CHUV), Lausanne, Switzerland; Inception Institute of AI (IIAI), Abu Dhabi, UAE", + "bibtex": "@article{Bozorgtabar_Mahapatra_2023, title={Attention-Conditioned Augmentations for Self-Supervised Anomaly Detection and Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26720}, DOI={10.1609/aaai.v37i12.26720}, abstractNote={Self-supervised anomaly detection and localization are critical to real-world scenarios in which collecting anomalous samples and pixel-wise labeling is tedious or infeasible, even worse when a wide variety of unseen anomalies could surface at test time. Our approach involves a pretext task in the context of masked image modeling, where the goal is to impose agreement between cluster assignments obtained from the representation of an image view containing saliency-aware masked patches and the uncorrupted image view. We harness the self-attention map extracted from the transformer to mask non-salient image patches without destroying the crucial structure associated with the foreground object. Subsequently, the pre-trained model is fine-tuned to detect and localize simulated anomalies generated under the guidance of the transformer\u2019s self-attention map. We conducted extensive validation and ablations on the benchmark of industrial images and achieved superior performance against competing methods. We also show the adaptability of our method to the medical images of the chest X-rays benchmark.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bozorgtabar, Behzad and Mahapatra, Dwarikanath}, year={2023}, month={Jun.}, pages={14720-14728} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26720/26492", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26720", + "pdf_size": 7540244, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1502737647035563528&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "epfl.ch;inceptioniai.org", + "email": "epfl.ch;inceptioniai.org", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;Lausanne University Hospital;Inception Institute of AI", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.epfl.ch;https://www.chuv.ch;", + "aff_unique_abbr": "EPFL;CHUV;IIAI", + "aff_campus_unique_index": "0+0;1", + "aff_campus_unique": "Lausanne;Abu Dhabi", + "aff_country_unique_index": "0+0;1", + "aff_country_unique": "Switzerland;United Arab Emirates" + }, + { + "id": "article-25858", + "title": "Attribute and Structure Preserving Graph Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Graph Contrastive Learning (GCL) has drawn much research interest due to its strong ability to capture both graph structure and node attribute information in a self-supervised manner. Current GCL methods usually adopt Graph Neural Networks (GNNs) as the base encoder, which typically relies on the homophily assumption of networks and overlooks node similarity in the attribute space. There are many scenarios where such assumption cannot be satisfied, or node similarity plays a crucial role. In order to design a more robust mechanism, we develop a novel attribute and structure preserving graph contrastive learning framework, named ASP, which comprehensively and efficiently preserves node attributes while exploiting graph structure. Specifically, we consider three different graph views in our framework, i.e., original view, attribute view, and global structure view. Then, we perform contrastive learning across three views in a joint fashion, mining comprehensive graph information. We validate the effectiveness of the proposed framework on various real-world networks with different levels of homophily. The results demonstrate the superior performance of our model over the representative baselines.", + "primary_area": "machine learning i", + "author": "Jialu Chen; Gang Kou", + "authorids": "", + "aff": "School of Business Administration, Faculty of Business Administration, Southwestern University of Finance and Economics; School of Business Administration, Faculty of Business Administration, Southwestern University of Finance and Economics", + "bibtex": "@article{Chen_Kou_2023, title={Attribute and Structure Preserving Graph Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25858}, DOI={10.1609/aaai.v37i6.25858}, abstractNote={Graph Contrastive Learning (GCL) has drawn much research interest due to its strong ability to capture both graph structure and node attribute information in a self-supervised manner. Current GCL methods usually adopt Graph Neural Networks (GNNs) as the base encoder, which typically relies on the homophily assumption of networks and overlooks node similarity in the attribute space. There are many scenarios where such assumption cannot be satisfied, or node similarity plays a crucial role. In order to design a more robust mechanism, we develop a novel attribute and structure preserving graph contrastive learning framework, named ASP, which comprehensively and efficiently preserves node attributes while exploiting graph structure. Specifically, we consider three different graph views in our framework, i.e., original view, attribute view, and global structure view. Then, we perform contrastive learning across three views in a joint fashion, mining comprehensive graph information. We validate the effectiveness of the proposed framework on various real-world networks with different levels of homophily. The results demonstrate the superior performance of our model over the representative baselines.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jialu and Kou, Gang}, year={2023}, month={Jun.}, pages={7024-7032} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25858/25630", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25858", + "pdf_size": 224355, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13698914780742739163&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.swufe.edu.cn;swufe.edu.cn", + "email": "smail.swufe.edu.cn;swufe.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Southwestern University of Finance and Economics", + "aff_unique_dep": "School of Business Administration", + "aff_unique_url": "http://www.swufe.edu.cn", + "aff_unique_abbr": "SWUFE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25967", + "title": "Audio-Visual Contrastive Learning with Temporal Self-Supervision", + "track": "main", + "status": "Technical", + "abstract": "We propose a self-supervised learning approach for videos that learns representations of both the RGB frames and the accompanying audio without human supervision. \nIn contrast to images that capture the static scene appearance, videos also contain sound and temporal scene dynamics. \nTo leverage the temporal and aural dimension inherent to videos, our method extends temporal self-supervision to the audio-visual setting and integrates it with multi-modal contrastive objectives.\nAs temporal self-supervision, we pose playback speed and direction recognition in both modalities and propose intra- and inter-modal temporal ordering tasks. \nFurthermore, we design a novel contrastive objective in which the usual pairs are supplemented with additional sample-dependent positives and negatives sampled from the evolving feature space. \nIn our model, we apply such losses among video clips and between videos and their temporally corresponding audio clips. \nWe verify our model design in extensive ablation experiments and evaluate the video and audio representations in transfer experiments to action recognition and retrieval on UCF101 and HMBD51, audio classification on ESC50, and robust video fingerprinting on VGG-Sound, with state-of-the-art results.", + "primary_area": "machine learning ii", + "author": "Simon Jenni; Alexander Black; John Collomosse", + "authorids": "", + "aff": "Adobe Research; University of Surrey; Adobe Research + University of Surrey", + "bibtex": "@article{Jenni_Black_Collomosse_2023, title={Audio-Visual Contrastive Learning with Temporal Self-Supervision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25967}, DOI={10.1609/aaai.v37i7.25967}, abstractNote={We propose a self-supervised learning approach for videos that learns representations of both the RGB frames and the accompanying audio without human supervision. In contrast to images that capture the static scene appearance, videos also contain sound and temporal scene dynamics. To leverage the temporal and aural dimension inherent to videos, our method extends temporal self-supervision to the audio-visual setting and integrates it with multi-modal contrastive objectives.\nAs temporal self-supervision, we pose playback speed and direction recognition in both modalities and propose intra- and inter-modal temporal ordering tasks. Furthermore, we design a novel contrastive objective in which the usual pairs are supplemented with additional sample-dependent positives and negatives sampled from the evolving feature space. In our model, we apply such losses among video clips and between videos and their temporally corresponding audio clips. We verify our model design in extensive ablation experiments and evaluate the video and audio representations in transfer experiments to action recognition and retrieval on UCF101 and HMBD51, audio classification on ESC50, and robust video fingerprinting on VGG-Sound, with state-of-the-art results.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jenni, Simon and Black, Alexander and Collomosse, John}, year={2023}, month={Jun.}, pages={7996-8004} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25967/25739", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25967", + "pdf_size": 3810108, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4054194947488732185&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "adobe.com;surrey.ac.uk;adobe.com", + "email": "adobe.com;surrey.ac.uk;adobe.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "Adobe;University of Surrey", + "aff_unique_dep": "Adobe Research;", + "aff_unique_url": "https://research.adobe.com;https://www.surrey.ac.uk", + "aff_unique_abbr": "Adobe;Surrey", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0+1", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-25174", + "title": "AudioEar: Single-View Ear Reconstruction for Personalized Spatial Audio", + "track": "main", + "status": "Technical", + "abstract": "Spatial audio, which focuses on immersive 3D sound rendering, is widely applied in the acoustic industry. One of the key problems of current spatial audio rendering methods is the lack of personalization based on different anatomies of individuals, which is essential to produce accurate sound source positions. In this work, we address this problem from an interdisciplinary perspective. The rendering of spatial audio is strongly correlated with the 3D shape of human bodies, particularly ears. To this end, we propose to achieve personalized spatial audio by reconstructing 3D human ears with single-view images. First, to benchmark the ear reconstruction task, we introduce AudioEar3D, a high-quality 3D ear dataset consisting of 112 point cloud ear scans with RGB images. To self-supervisedly train a reconstruction model, we further collect a 2D ear dataset composed of 2,000 images, each one with manual annotation of occlusion and 55 landmarks, named AudioEar2D. To our knowledge, both datasets have the largest scale and best quality of their kinds for public use. Further, we propose AudioEarM, a reconstruction method guided by a depth estimation network that is trained on synthetic data, with two loss functions tailored for ear data. Lastly, to fill the gap between the vision and acoustics community, we develop a pipeline to integrate the reconstructed ear mesh with an off-the-shelf 3D human body and simulate a personalized Head-Related Transfer Function (HRTF), which is the core of spatial audio rendering. Code and data are publicly available in https://github.com/seanywang0408/AudioEar.", + "primary_area": "computer vision i", + "author": "Xiaoyang Huang; Yanjun Wang; Yang Liu; Bingbing Ni; Wenjun Zhang; Jinxian Liu; Teng Li", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; FocusMedia; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Anhui University", + "bibtex": "@article{Huang_Wang_Liu_Ni_Zhang_Liu_Li_2023, title={AudioEar: Single-View Ear Reconstruction for Personalized Spatial Audio}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25174}, DOI={10.1609/aaai.v37i1.25174}, abstractNote={Spatial audio, which focuses on immersive 3D sound rendering, is widely applied in the acoustic industry. One of the key problems of current spatial audio rendering methods is the lack of personalization based on different anatomies of individuals, which is essential to produce accurate sound source positions. In this work, we address this problem from an interdisciplinary perspective. The rendering of spatial audio is strongly correlated with the 3D shape of human bodies, particularly ears. To this end, we propose to achieve personalized spatial audio by reconstructing 3D human ears with single-view images. First, to benchmark the ear reconstruction task, we introduce AudioEar3D, a high-quality 3D ear dataset consisting of 112 point cloud ear scans with RGB images. To self-supervisedly train a reconstruction model, we further collect a 2D ear dataset composed of 2,000 images, each one with manual annotation of occlusion and 55 landmarks, named AudioEar2D. To our knowledge, both datasets have the largest scale and best quality of their kinds for public use. Further, we propose AudioEarM, a reconstruction method guided by a depth estimation network that is trained on synthetic data, with two loss functions tailored for ear data. Lastly, to fill the gap between the vision and acoustics community, we develop a pipeline to integrate the reconstructed ear mesh with an off-the-shelf 3D human body and simulate a personalized Head-Related Transfer Function (HRTF), which is the core of spatial audio rendering. Code and data are publicly available in https://github.com/seanywang0408/AudioEar.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Xiaoyang and Wang, Yanjun and Liu, Yang and Ni, Bingbing and Zhang, Wenjun and Liu, Jinxian and Li, Teng}, year={2023}, month={Jun.}, pages={944-952} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25174/24946", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25174", + "pdf_size": 797287, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7704547362178549822&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn; ; ;sjtu.edu.cn; ; ; ", + "email": "sjtu.edu.cn; ; ;sjtu.edu.cn; ; ; ", + "github": "https://github.com/seanywang0408/AudioEar", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;2", + "aff_unique_norm": "Shanghai Jiao Tong University;FocusMedia;Anhui University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sjtu.edu.cn;;http://www.ahu.edu.cn/", + "aff_unique_abbr": "SJTU;;AHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26780", + "title": "Auditing and Robustifying COVID-19 Misinformation Datasets via Anticontent Sampling", + "track": "aaai special track", + "status": "Technical", + "abstract": "This paper makes two key contributions. First, it argues that highly specialized rare content classifiers trained on small data typically have limited exposure to the richness and topical diversity of the negative class (dubbed anticontent) as observed in the wild. As a result, these classifiers' strong performance observed on the test set may not translate into real-world settings. In the context of COVID-19 misinformation detection, we conduct an in-the-wild audit of multiple datasets and demonstrate that models trained with several prominently cited recent datasets are vulnerable to anticontent when evaluated in the wild. Second, we present a novel active learning pipeline that requires zero manual annotation and iteratively augments the training data with challenging anticontent, robustifying these classifiers.", + "primary_area": "safe and robust ai", + "author": "Clay H. Yoo; Ashiqur R. KhudaBukhsh", + "authorids": "", + "aff": "Carnegie Mellon University; Rochester Institute of Technology", + "bibtex": "@article{Yoo_KhudaBukhsh_2023, title={Auditing and Robustifying COVID-19 Misinformation Datasets via Anticontent Sampling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26780}, DOI={10.1609/aaai.v37i12.26780}, abstractNote={This paper makes two key contributions. First, it argues that highly specialized rare content classifiers trained on small data typically have limited exposure to the richness and topical diversity of the negative class (dubbed anticontent) as observed in the wild. As a result, these classifiers\u2019 strong performance observed on the test set may not translate into real-world settings. In the context of COVID-19 misinformation detection, we conduct an in-the-wild audit of multiple datasets and demonstrate that models trained with several prominently cited recent datasets are vulnerable to anticontent when evaluated in the wild. Second, we present a novel active learning pipeline that requires zero manual annotation and iteratively augments the training data with challenging anticontent, robustifying these classifiers.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yoo, Clay H. and KhudaBukhsh, Ashiqur R.}, year={2023}, month={Jun.}, pages={15260-15268} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26780/26552", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26780", + "pdf_size": 172791, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10992790001483253702&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "andrew.cmu.edu;rit.edu", + "email": "andrew.cmu.edu;rit.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Carnegie Mellon University;Rochester Institute of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cmu.edu;https://www.rit.edu", + "aff_unique_abbr": "CMU;RIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25888", + "title": "Augmented Proximal Policy Optimization for Safe Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Safe reinforcement learning considers practical scenarios that maximize the return while satisfying safety constraints. Current algorithms, which suffer from training oscillations or approximation errors, still struggle to update the policy efficiently with precise constraint satisfaction. In this article, we propose Augmented Proximal Policy Optimization (APPO), which augments the Lagrangian function of the primal constrained problem via attaching a quadratic deviation term. The constructed multiplier-penalty function dampens cost oscillation for stable convergence while being equivalent to the primal constrained problem to precisely control safety costs. APPO alternately updates the policy and the Lagrangian multiplier via solving the constructed augmented primal-dual problem, which can be easily implemented by any first-order optimizer. We apply our APPO methods in diverse safety-constrained tasks, setting a new state of the art compared with a comprehensive list of safe RL baselines. Extensive experiments verify the merits of our method in easy implementation, stable convergence, and precise cost control.", + "primary_area": "machine learning i", + "author": "Juntao Dai; Jiaming Ji; Long Yang; Qian Zheng; Gang Pan", + "authorids": "", + "aff": "The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; School of Artificial Intelligence, Peking University, Beijing, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China", + "bibtex": "@article{Dai_Ji_Yang_Zheng_Pan_2023, title={Augmented Proximal Policy Optimization for Safe Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25888}, DOI={10.1609/aaai.v37i6.25888}, abstractNote={Safe reinforcement learning considers practical scenarios that maximize the return while satisfying safety constraints. Current algorithms, which suffer from training oscillations or approximation errors, still struggle to update the policy efficiently with precise constraint satisfaction. In this article, we propose Augmented Proximal Policy Optimization (APPO), which augments the Lagrangian function of the primal constrained problem via attaching a quadratic deviation term. The constructed multiplier-penalty function dampens cost oscillation for stable convergence while being equivalent to the primal constrained problem to precisely control safety costs. APPO alternately updates the policy and the Lagrangian multiplier via solving the constructed augmented primal-dual problem, which can be easily implemented by any first-order optimizer. We apply our APPO methods in diverse safety-constrained tasks, setting a new state of the art compared with a comprehensive list of safe RL baselines. Extensive experiments verify the merits of our method in easy implementation, stable convergence, and precise cost control.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Juntao and Ji, Jiaming and Yang, Long and Zheng, Qian and Pan, Gang}, year={2023}, month={Jun.}, pages={7288-7295} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25888/25660", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25888", + "pdf_size": 1686639, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18358638250500320109&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;pku.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;pku.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1;0+0;0+0", + "aff_unique_norm": "Zhejiang University;Peking University", + "aff_unique_dep": "State Key Lab of Brain-Machine Intelligence;School of Artificial Intelligence", + "aff_unique_url": "http://www.zju.edu.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "ZJU;PKU", + "aff_campus_unique_index": "0+0;0+0;1;0+0;0+0", + "aff_campus_unique": "Hangzhou;Beijing", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25594", + "title": "Augmenting Affective Dependency Graph via Iterative Incongruity Graph Learning for Sarcasm Detection", + "track": "main", + "status": "Technical", + "abstract": "Recently, progress has been made towards improving automatic sarcasm detection in computer science. Among existing models, manually constructing static graphs for texts and then using graph neural networks (GNNs) is one of the most effective approaches for drawing long-range incongruity patterns. However, the manually constructed graph structure might be prone to errors (e.g., noisy or incomplete) and not optimal for the sarcasm detection task. Errors produced during the graph construction step cannot be remedied and may accrue to the following stages, resulting in poor performance. To surmount the above limitations, we explore a novel Iterative Augmenting Affective Graph and Dependency Graph (IAAD) framework to jointly and iteratively learn the incongruity graph structure. IAAD can alternatively update the incongruity graph structure and node representation until the learning graph structure is optimal for the metrics of sarcasm detection. More concretely, we begin with deriving an affective and a dependency graph for each instance, then an iterative incongruity graph learning module is employed to augment affective and dependency graphs for obtaining the optimal inconsistent semantic graph with the goal of optimizing the graph for the sarcasm detection task. Extensive experiments on three datasets demonstrate that the proposed model outperforms state-of-the-art baselines for sarcasm detection with significant margins.", + "primary_area": "data mining and knowledge management", + "author": "Xiaobao Wang; Yiqi Dong; Di Jin; Yawen Li; Longbiao Wang; Jianwu Dang", + "authorids": "", + "aff": "Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China; School of New Media and Communication, Tianjin University, Tianjin, China; Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China; School of Economics and Management, Beijing University of Posts and Telecommunications, Beijing, China; Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China+Huiyan Technology (Tianjin) Co., Ltd, Tianjin, China; Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China+Peng Cheng Laboratory, Shenzhen, China", + "bibtex": "@article{Wang_Dong_Jin_Li_Wang_Dang_2023, title={Augmenting Affective Dependency Graph via Iterative Incongruity Graph Learning for Sarcasm Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25594}, DOI={10.1609/aaai.v37i4.25594}, abstractNote={Recently, progress has been made towards improving automatic sarcasm detection in computer science. Among existing models, manually constructing static graphs for texts and then using graph neural networks (GNNs) is one of the most effective approaches for drawing long-range incongruity patterns. However, the manually constructed graph structure might be prone to errors (e.g., noisy or incomplete) and not optimal for the sarcasm detection task. Errors produced during the graph construction step cannot be remedied and may accrue to the following stages, resulting in poor performance. To surmount the above limitations, we explore a novel Iterative Augmenting Affective Graph and Dependency Graph (IAAD) framework to jointly and iteratively learn the incongruity graph structure. IAAD can alternatively update the incongruity graph structure and node representation until the learning graph structure is optimal for the metrics of sarcasm detection. More concretely, we begin with deriving an affective and a dependency graph for each instance, then an iterative incongruity graph learning module is employed to augment affective and dependency graphs for obtaining the optimal inconsistent semantic graph with the goal of optimizing the graph for the sarcasm detection task. Extensive experiments on three datasets demonstrate that the proposed model outperforms state-of-the-art baselines for sarcasm detection with significant margins.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xiaobao and Dong, Yiqi and Jin, Di and Li, Yawen and Wang, Longbiao and Dang, Jianwu}, year={2023}, month={Jun.}, pages={4702-4710} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25594/25366", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25594", + "pdf_size": 290119, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8667767582212223424&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;bupt.edu.cn;tju.edu.cn;jaist.ac.jp", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;bupt.edu.cn;tju.edu.cn;jaist.ac.jp", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0+2;0+3", + "aff_unique_norm": "Tianjin University;Beijing University of Posts and Telecommunications;Huiyan Technology (Tianjin) Co., Ltd;Peng Cheng Laboratory", + "aff_unique_dep": "College of Intelligence and Computing;School of Economics and Management;;", + "aff_unique_url": "https://www.tju.edu.cn;http://www.bupt.edu.cn/;;", + "aff_unique_abbr": "Tianjin University;BUPT;;", + "aff_campus_unique_index": "0;0;0;1;0;0+3", + "aff_campus_unique": "Tianjin;Beijing;;Shenzhen", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27071", + "title": "Augmenting Flight Training with AI to Efficiently Train Pilots", + "track": "demonstrations", + "status": "Technical", + "abstract": "We propose an AI-based pilot trainer to help students learn how to fly aircraft. First, an AI agent uses behavioral cloning to learn flying maneuvers from qualified flight instructors. Later, the system uses the agent's decisions to detect errors made by students and provide feedback to help students correct their errors. This paper presents an instantiation of the pilot trainer. We focus on teaching straight and level flying maneuvers by automatically providing formative feedback to the human student.", + "primary_area": "", + "author": "Michael Guevarra; Srijita Das; Christabel Wayllace; Carrie Demmans Epp; Matthew Taylor; Alan Tay", + "authorids": "", + "aff": "Delphi Technology Corp+University of Manitoba+Alberta Machine Intelligence Institute (Amii); University of Alberta+Alberta Machine Intelligence Institute (Amii); University of Alberta+Alberta Machine Intelligence Institute (Amii); University of Alberta; University of Alberta+Alberta Machine Intelligence Institute (Amii); Delphi Technology Corp", + "bibtex": "@article{Guevarra_Das_Wayllace_Demmans Epp_Taylor_Tay_2024, title={Augmenting Flight Training with AI to Efficiently Train Pilots}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27071}, DOI={10.1609/aaai.v37i13.27071}, abstractNote={We propose an AI-based pilot trainer to help students learn how to fly aircraft. First, an AI agent uses behavioral cloning to learn flying maneuvers from qualified flight instructors. Later, the system uses the agent\u2019s decisions to detect errors made by students and provide feedback to help students correct their errors. This paper presents an instantiation of the pilot trainer. We focus on teaching straight and level flying maneuvers by automatically providing formative feedback to the human student.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guevarra, Michael and Das, Srijita and Wayllace, Christabel and Demmans Epp, Carrie and Taylor, Matthew and Tay, Alan}, year={2024}, month={Jul.}, pages={16437-16439} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27071/26843", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27071", + "pdf_size": 1304535, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7221262115441390442&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "myumanitoba.ca;ualberta.ca;ualberta.ca;ualberta.ca;ualberta.ca;delphitechcorp.com", + "email": "myumanitoba.ca;ualberta.ca;ualberta.ca;ualberta.ca;ualberta.ca;delphitechcorp.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;3+2;3+2;3;3+2;0", + "aff_unique_norm": "Delphi Technology Corp;University of Manitoba;Alberta Machine Intelligence Institute;University of Alberta", + "aff_unique_dep": ";;Machine Intelligence;", + "aff_unique_url": "https://www.delphitechnology.com;https://umanitoba.ca;https://amiilabs.ca;https://www.ualberta.ca", + "aff_unique_abbr": ";U of M;Amii;UAlberta", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+1;1+1;1+1;1;1+1;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-26704", + "title": "Auto-CM: Unsupervised Deep Learning for Satellite Imagery Composition and Cloud Masking Using Spatio-Temporal Dynamics", + "track": "aaai special track", + "status": "Technical", + "abstract": "Cloud masking is both a fundamental and a critical task in the vast majority of Earth observation problems across social sectors, including agriculture, energy, water, etc. The sheer volume of satellite imagery to be processed has fast-climbed to a scale (e.g., >10 PBs/year) that is prohibitive for manual processing. Meanwhile, generating reliable cloud masks and image composite is increasingly challenging due to the continued distribution-shifts in the imagery collected by existing sensors and the ever-growing variety of sensors and platforms. Moreover, labeled samples are scarce and geographically limited compared to the needs in real large-scale applications. In related work, traditional remote sensing methods are often physics-based and rely on special spectral signatures from multi- or hyper-spectral bands, which are often not available in data collected by many -- and especially more recent -- high-resolution platforms. Machine learning and deep learning based methods, on the other hand, often require large volumes of up-to-date training data to be reliable and generalizable over space. We propose an autonomous image composition and masking (Auto-CM) framework to learn to solve the fundamental tasks in a label-free manner, by leveraging different dynamics of events in both geographic domains and time-series. Our experiments show that Auto-CM outperforms existing methods on a wide-range of data with different satellite platforms, geographic regions and bands.", + "primary_area": "ai for social impact", + "author": "Yiqun Xie; Zhili Li; Han Bao; Xiaowei Jia; Dongkuan Xu; Xun Zhou; Sergii Skakun", + "authorids": "", + "aff": "University of Maryland; University of Maryland; University of Iowa; University of Pittsburgh; North Carolina State University; University of Iowa; University of Maryland", + "bibtex": "@article{Xie_Li_Bao_Jia_Xu_Zhou_Skakun_2023, title={Auto-CM: Unsupervised Deep Learning for Satellite Imagery Composition and Cloud Masking Using Spatio-Temporal Dynamics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26704}, DOI={10.1609/aaai.v37i12.26704}, abstractNote={Cloud masking is both a fundamental and a critical task in the vast majority of Earth observation problems across social sectors, including agriculture, energy, water, etc. The sheer volume of satellite imagery to be processed has fast-climbed to a scale (e.g., >10 PBs/year) that is prohibitive for manual processing. Meanwhile, generating reliable cloud masks and image composite is increasingly challenging due to the continued distribution-shifts in the imagery collected by existing sensors and the ever-growing variety of sensors and platforms. Moreover, labeled samples are scarce and geographically limited compared to the needs in real large-scale applications. In related work, traditional remote sensing methods are often physics-based and rely on special spectral signatures from multi- or hyper-spectral bands, which are often not available in data collected by many -- and especially more recent -- high-resolution platforms. Machine learning and deep learning based methods, on the other hand, often require large volumes of up-to-date training data to be reliable and generalizable over space. We propose an autonomous image composition and masking (Auto-CM) framework to learn to solve the fundamental tasks in a label-free manner, by leveraging different dynamics of events in both geographic domains and time-series. Our experiments show that Auto-CM outperforms existing methods on a wide-range of data with different satellite platforms, geographic regions and bands.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Yiqun and Li, Zhili and Bao, Han and Jia, Xiaowei and Xu, Dongkuan and Zhou, Xun and Skakun, Sergii}, year={2023}, month={Jun.}, pages={14575-14583} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26704/26476", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26704", + "pdf_size": 3041424, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6485829132580793699&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "umd.edu;umd.edu;uiowa.edu;pitt.edu;ncsu.edu;uiowa.edu;umd.edu", + "email": "umd.edu;umd.edu;uiowa.edu;pitt.edu;ncsu.edu;uiowa.edu;umd.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;3;1;0", + "aff_unique_norm": "University of Maryland;University of Iowa;University of Pittsburgh;North Carolina State University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www/umd.edu;https://www.uiowa.edu;https://www.pitt.edu;https://www.ncsu.edu", + "aff_unique_abbr": "UMD;UIowa;Pitt;NCSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26201", + "title": "Auto-Weighted Multi-View Clustering for Large-Scale Data", + "track": "main", + "status": "Technical", + "abstract": "Multi-view clustering has gained broad attention owing to its capacity to exploit complementary information across multiple data views. Although existing methods demonstrate delightful clustering performance, most of them are of high time complexity and cannot handle large-scale data. Matrix factorization-based models are a representative of solving this problem. However, they assume that the views share a dimension-fixed consensus coefficient matrix and view-specific base matrices, limiting their representability. Moreover, a series of large-scale algorithms that bear one or more hyperparameters are impractical in real-world applications. To address the two issues, we propose an auto-weighted multi-view clustering (AWMVC) algorithm. Specifically, AWMVC first learns coefficient matrices from corresponding base matrices of different dimensions, then fuses them to obtain an optimal consensus matrix. By mapping original features into distinctive low-dimensional spaces, we can attain more comprehensive knowledge, thus obtaining better clustering results. Moreover, we design a six-step alternative optimization algorithm proven to be convergent theoretically. Also, AWMVC shows excellent performance on various benchmark datasets compared with existing ones. The code of AWMVC is publicly available at https://github.com/wanxinhang/AAAI-2023-AWMVC.", + "primary_area": "machine learning iii", + "author": "Xinhang Wan; Xinwang Liu; Jiyuan Liu; Siwei Wang; Yi Wen; Weixuan Liang; En Zhu; Zhe Liu; Lu Zhou", + "authorids": "", + "aff": "College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, China", + "bibtex": "@article{Wan_Liu_Liu_Wang_Wen_Liang_Zhu_Liu_Zhou_2023, title={Auto-Weighted Multi-View Clustering for Large-Scale Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26201}, DOI={10.1609/aaai.v37i8.26201}, abstractNote={Multi-view clustering has gained broad attention owing to its capacity to exploit complementary information across multiple data views. Although existing methods demonstrate delightful clustering performance, most of them are of high time complexity and cannot handle large-scale data. Matrix factorization-based models are a representative of solving this problem. However, they assume that the views share a dimension-fixed consensus coefficient matrix and view-specific base matrices, limiting their representability. Moreover, a series of large-scale algorithms that bear one or more hyperparameters are impractical in real-world applications. To address the two issues, we propose an auto-weighted multi-view clustering (AWMVC) algorithm. Specifically, AWMVC first learns coefficient matrices from corresponding base matrices of different dimensions, then fuses them to obtain an optimal consensus matrix. By mapping original features into distinctive low-dimensional spaces, we can attain more comprehensive knowledge, thus obtaining better clustering results. Moreover, we design a six-step alternative optimization algorithm proven to be convergent theoretically. Also, AWMVC shows excellent performance on various benchmark datasets compared with existing ones. The code of AWMVC is publicly available at https://github.com/wanxinhang/AAAI-2023-AWMVC.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Xinhang and Liu, Xinwang and Liu, Jiyuan and Wang, Siwei and Wen, Yi and Liang, Weixuan and Zhu, En and Liu, Zhe and Zhou, Lu}, year={2023}, month={Jun.}, pages={10078-10086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26201/25973", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26201", + "pdf_size": 224916, + "gs_citation": 82, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3904207958963067707&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "email": "nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "github": "https://github.com/wanxinhang/AAAI-2023-AWMVC", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;1;1", + "aff_unique_norm": "National University of Defense Technology;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "College of Computer;College of Computer Science and Technology", + "aff_unique_url": "http://www.nudt.edu.cn;http://www.nuaa.edu.cn", + "aff_unique_abbr": "NUDT;NUAA", + "aff_campus_unique_index": "0;0;0;0;0;0;0;1;1", + "aff_campus_unique": "Changsha;Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26734", + "title": "AutoCost: Evolving Intrinsic Cost for Zero-Violation Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Safety is a critical hurdle that limits the application of deep reinforcement learning to real-world control tasks. To this end, constrained reinforcement learning leverages cost functions to improve safety in constrained Markov decision process. However, constrained methods fail to achieve zero violation even when the cost limit is zero. This paper analyzes the reason for such failure, which suggests that a proper cost function plays an important role in constrained RL. Inspired by the analysis, we propose AutoCost, a simple yet effective framework that automatically searches for cost functions that help constrained RL to achieve zero-violation performance. We validate the proposed method and the searched cost function on the safety benchmark Safety Gym. We compare the performance of augmented agents that use our cost function to provide additive intrinsic costs to a Lagrangian-based policy learner and a constrained-optimization policy learner with baseline agents that use the same policy learners but with only extrinsic costs. Results show that the converged policies with intrinsic costs in all environments achieve zero constraint violation and comparable performance with baselines.", + "primary_area": "safe and robust ai", + "author": "Tairan He; Weiye Zhao; Changliu Liu", + "authorids": "", + "aff": "Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University", + "bibtex": "@article{He_Zhao_Liu_2023, title={AutoCost: Evolving Intrinsic Cost for Zero-Violation Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26734}, DOI={10.1609/aaai.v37i12.26734}, abstractNote={Safety is a critical hurdle that limits the application of deep reinforcement learning to real-world control tasks. To this end, constrained reinforcement learning leverages cost functions to improve safety in constrained Markov decision process. However, constrained methods fail to achieve zero violation even when the cost limit is zero. This paper analyzes the reason for such failure, which suggests that a proper cost function plays an important role in constrained RL. Inspired by the analysis, we propose AutoCost, a simple yet effective framework that automatically searches for cost functions that help constrained RL to achieve zero-violation performance. We validate the proposed method and the searched cost function on the safety benchmark Safety Gym. We compare the performance of augmented agents that use our cost function to provide additive intrinsic costs to a Lagrangian-based policy learner and a constrained-optimization policy learner with baseline agents that use the same policy learners but with only extrinsic costs. Results show that the converged policies with intrinsic costs in all environments achieve zero constraint violation and comparable performance with baselines.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Tairan and Zhao, Weiye and Liu, Changliu}, year={2023}, month={Jun.}, pages={14847-14855} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26734/26506", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26734", + "pdf_size": 1344342, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14665857128315029258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Robotics Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26343", + "title": "AutoGraph: Optimizing DNN Computation Graph for Parallel GPU Kernel Execution", + "track": "main", + "status": "Technical", + "abstract": "Deep learning frameworks optimize the computation graphs and intra-operator computations to boost the inference performance on GPUs, while inter-operator parallelism is usually ignored. In this paper, a unified framework, AutoGraph, is proposed to obtain highly optimized computation graphs in favor of parallel executions of GPU kernels. A novel dynamic programming algorithm, combined with backtracking search, is adopted to explore the optimal graph optimization solution, with the fast performance estimation from the mixed critical path cost. Accurate runtime information based on GPU Multi-Stream launched with CUDA Graph is utilized to determine the convergence of the optimization. Experimental results demonstrate that our method achieves up to 3.47x speedup over existing graph optimization methods. Moreover, AutoGraph outperforms state-of-the-art parallel kernel launch frameworks by up to 1.26x.", + "primary_area": "machine learning iv", + "author": "Yuxuan Zhao; Qi Sun; Zhuolun He; Yang Bai; Bei Yu", + "authorids": "", + "aff": "The Chinese University of Hong Kong; The Chinese University of Hong Kong; The Chinese University of Hong Kong; The Chinese University of Hong Kong + SmartMore; The Chinese University of Hong Kong", + "bibtex": "@article{Zhao_Sun_He_Bai_Yu_2023, title={AutoGraph: Optimizing DNN Computation Graph for Parallel GPU Kernel Execution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26343}, DOI={10.1609/aaai.v37i9.26343}, abstractNote={Deep learning frameworks optimize the computation graphs and intra-operator computations to boost the inference performance on GPUs, while inter-operator parallelism is usually ignored. In this paper, a unified framework, AutoGraph, is proposed to obtain highly optimized computation graphs in favor of parallel executions of GPU kernels. A novel dynamic programming algorithm, combined with backtracking search, is adopted to explore the optimal graph optimization solution, with the fast performance estimation from the mixed critical path cost. Accurate runtime information based on GPU Multi-Stream launched with CUDA Graph is utilized to determine the convergence of the optimization. Experimental results demonstrate that our method achieves up to 3.47x speedup over existing graph optimization methods. Moreover, AutoGraph outperforms state-of-the-art parallel kernel launch frameworks by up to 1.26x.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yuxuan and Sun, Qi and He, Zhuolun and Bai, Yang and Yu, Bei}, year={2023}, month={Jun.}, pages={11354-11362} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26343/26115", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26343", + "pdf_size": 1230643, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6379565394166972634&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;SmartMore", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cuhk.edu.hk;", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-25836", + "title": "AutoInit: Analytic Signal-Preserving Weight Initialization for Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Neural networks require careful weight initialization to prevent signals from exploding or vanishing. Existing initialization schemes solve this problem in specific cases by assuming that the network has a certain activation function or topology. It is difficult to derive such weight initialization strategies, and modern architectures therefore often use these same initialization schemes even though their assumptions do not hold. This paper introduces AutoInit, a weight initialization algorithm that automatically adapts to different neural network architectures. By analytically tracking the mean and variance of signals as they propagate through the network, AutoInit appropriately scales the weights at each layer to avoid exploding or vanishing signals. Experiments demonstrate that AutoInit improves performance of convolutional, residual, and transformer networks across a range of activation function, dropout, weight decay, learning rate, and normalizer settings, and does so more reliably than data-dependent initialization methods. This flexibility allows AutoInit to initialize models for everything from small tabular tasks to large datasets such as ImageNet. Such generality turns out particularly useful in neural architecture search and in activation function discovery. In these settings, AutoInit initializes each candidate appropriately, making performance evaluations more accurate. AutoInit thus serves as an automatic configuration tool that makes design of new neural network architectures more robust. The AutoInit package provides a wrapper around TensorFlow models and is available at https://github.com/cognizant-ai-labs/autoinit.", + "primary_area": "machine learning i", + "author": "Garrett Bingham; Risto Miikkulainen", + "authorids": "", + "aff": "The University of Texas at Austin; Cognizant AI Labs", + "bibtex": "@article{Bingham_Miikkulainen_2023, title={AutoInit: Analytic Signal-Preserving Weight Initialization for Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25836}, DOI={10.1609/aaai.v37i6.25836}, abstractNote={Neural networks require careful weight initialization to prevent signals from exploding or vanishing. Existing initialization schemes solve this problem in specific cases by assuming that the network has a certain activation function or topology. It is difficult to derive such weight initialization strategies, and modern architectures therefore often use these same initialization schemes even though their assumptions do not hold. This paper introduces AutoInit, a weight initialization algorithm that automatically adapts to different neural network architectures. By analytically tracking the mean and variance of signals as they propagate through the network, AutoInit appropriately scales the weights at each layer to avoid exploding or vanishing signals. Experiments demonstrate that AutoInit improves performance of convolutional, residual, and transformer networks across a range of activation function, dropout, weight decay, learning rate, and normalizer settings, and does so more reliably than data-dependent initialization methods. This flexibility allows AutoInit to initialize models for everything from small tabular tasks to large datasets such as ImageNet. Such generality turns out particularly useful in neural architecture search and in activation function discovery. In these settings, AutoInit initializes each candidate appropriately, making performance evaluations more accurate. AutoInit thus serves as an automatic configuration tool that makes design of new neural network architectures more robust. The AutoInit package provides a wrapper around TensorFlow models and is available at https://github.com/cognizant-ai-labs/autoinit.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bingham, Garrett and Miikkulainen, Risto}, year={2023}, month={Jun.}, pages={6823-6833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25836/25608", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25836", + "pdf_size": 4497829, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5228015070251524222&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.utexas.edu;cs.utexas.edu", + "email": "cs.utexas.edu;cs.utexas.edu", + "github": "https://github.com/cognizant-ai-labs/autoinit", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Texas at Austin;Cognizant", + "aff_unique_dep": ";AI Labs", + "aff_unique_url": "https://www.utexas.edu;https://www.cognizant.com", + "aff_unique_abbr": "UT Austin;Cognizant", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Austin;", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26220", + "title": "AutoNF: Automated Architecture Optimization of Normalizing Flows with Unconstrained Continuous Relaxation Admitting Optimal Discrete Solution", + "track": "main", + "status": "Technical", + "abstract": "Normalizing flows (NF) build upon invertible neural networks and have wide applications in probabilistic modeling. Currently, building a powerful yet computationally efficient flow model relies on empirical fine-tuning over a large design space. While introducing neural architecture search (NAS) to NF is desirable, the invertibility constraint of NF brings new challenges to existing NAS methods whose application is limited to unstructured neural networks. Developing efficient NAS methods specifically for NF remains an open problem. We present AutoNF, the first automated NF architectural optimization framework. First, we present a new mixture distribution formulation that allows efficient differentiable architecture search of flow models without violating the invertibility constraint. Second, under the new formulation, we convert the original NP-hard combinatorial NF architectural optimization problem to an unconstrained continuous relaxation admitting the discrete optimal architectural solution, circumventing the loss of optimality due to binarization in architectural optimization. We evaluate AutoNF with various density estimation datasets and show its superior performance-cost trade-offs over a set of existing hand-crafted baselines.", + "primary_area": "machine learning iii", + "author": "Yu Wang; J\u00e1n Drgo\u0148a; Jiaxin Zhang; Karthik Somayaji Nanjangud Suryanarayana; Malachi Schram; Frank Liu; Peng Li", + "authorids": "", + "aff": "University of California, Santa Barbara; Pacific Northwest National Laboratory; Intuit AI Research; University of California, Santa Barbara + Thomas Jefferson National Accelerator Facility; Thomas Jefferson National Accelerator Facility; Oak Ridge National Laboratory; University of California, Santa Barbara", + "bibtex": "@article{Wang_Drgo\u0148a_Zhang_Nanjangud Suryanarayana_Schram_Liu_Li_2023, title={AutoNF: Automated Architecture Optimization of Normalizing Flows with Unconstrained Continuous Relaxation Admitting Optimal Discrete Solution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26220}, DOI={10.1609/aaai.v37i8.26220}, abstractNote={Normalizing flows (NF) build upon invertible neural networks and have wide applications in probabilistic modeling. Currently, building a powerful yet computationally efficient flow model relies on empirical fine-tuning over a large design space. While introducing neural architecture search (NAS) to NF is desirable, the invertibility constraint of NF brings new challenges to existing NAS methods whose application is limited to unstructured neural networks. Developing efficient NAS methods specifically for NF remains an open problem. We present AutoNF, the first automated NF architectural optimization framework. First, we present a new mixture distribution formulation that allows efficient differentiable architecture search of flow models without violating the invertibility constraint. Second, under the new formulation, we convert the original NP-hard combinatorial NF architectural optimization problem to an unconstrained continuous relaxation admitting the discrete optimal architectural solution, circumventing the loss of optimality due to binarization in architectural optimization. We evaluate AutoNF with various density estimation datasets and show its superior performance-cost trade-offs over a set of existing hand-crafted baselines.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yu and Drgo\u0148a, J\u00e1n and Zhang, Jiaxin and Nanjangud Suryanarayana, Karthik Somayaji and Schram, Malachi and Liu, Frank and Li, Peng}, year={2023}, month={Jun.}, pages={10244-10252} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26220/25992", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26220", + "pdf_size": 778016, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9398570031382529898&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 2, + "aff_domain": "ucsb.edu;ucsb.edu;ucsb.edu;pnnl.gov;gmail.com;jlab.org;ornl.gov", + "email": "ucsb.edu;ucsb.edu;ucsb.edu;pnnl.gov;gmail.com;jlab.org;ornl.gov", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0+3;3;4;0", + "aff_unique_norm": "University of California, Santa Barbara;Pacific Northwest National Laboratory;Intuit;Thomas Jefferson National Accelerator Facility;Oak Ridge National Laboratory", + "aff_unique_dep": ";;Intuit AI Research;;", + "aff_unique_url": "https://www.ucsb.edu;https://www.pnnl.gov;https://intuit.com/;https://www.jlab.org;https://www.ornl.gov", + "aff_unique_abbr": "UCSB;PNNL;Intuit;Jefferson Lab;ORNL", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;0;0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25616", + "title": "AutoSTL: Automated Spatio-Temporal Multi-Task Learning", + "track": "main", + "status": "Technical", + "abstract": "Spatio-temporal prediction plays a critical role in smart city construction. Jointly modeling multiple spatio-temporal tasks can further promote an intelligent city life by integrating their inseparable relationship. However, existing studies fail to address this joint learning problem well, which generally solve tasks individually or a fixed task combination. The challenges lie in the tangled relation between different properties, the demand for supporting flexible combinations of tasks and the complex spatio-temporal dependency. To cope with the problems above, we propose an Automated Spatio-Temporal multi-task Learning (AutoSTL) method to handle multiple spatio-temporal tasks jointly. Firstly, we propose a scalable architecture consisting of advanced spatio-temporal operations to exploit the complicated dependency. Shared modules and feature fusion mechanism are incorporated to further capture the intrinsic relationship between tasks. Furthermore, our model automatically allocates the operations and fusion weight. Extensive experiments on benchmark datasets verified that our model achieves state-of-the-art performance. As we can know, AutoSTL is the first automated spatio-temporal multi-task learning method.", + "primary_area": "data mining and knowledge management", + "author": "Zijian Zhang; Xiangyu Zhao; Hao Miao; Chunxu Zhang; Hongwei Zhao; Junbo Zhang", + "authorids": "", + "aff": "College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, Jilin University, China+Hong Kong Institute for Data Science, City University of Hong Kong, Hong Kong; School of Data Science, City University of Hong Kong, Hong Kong+Hong Kong Institute for Data Science, City University of Hong Kong, Hong Kong; Department of Computer Science, Aalborg University, Denmark; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, Jilin University, China; JD Intelligent Cities Research, China; JD iCity, JD Technology, China", + "bibtex": "@article{Zhang_Zhao_Miao_Zhang_Zhao_Zhang_2023, title={AutoSTL: Automated Spatio-Temporal Multi-Task Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25616}, DOI={10.1609/aaai.v37i4.25616}, abstractNote={Spatio-temporal prediction plays a critical role in smart city construction. Jointly modeling multiple spatio-temporal tasks can further promote an intelligent city life by integrating their inseparable relationship. However, existing studies fail to address this joint learning problem well, which generally solve tasks individually or a fixed task combination. The challenges lie in the tangled relation between different properties, the demand for supporting flexible combinations of tasks and the complex spatio-temporal dependency. To cope with the problems above, we propose an Automated Spatio-Temporal multi-task Learning (AutoSTL) method to handle multiple spatio-temporal tasks jointly. Firstly, we propose a scalable architecture consisting of advanced spatio-temporal operations to exploit the complicated dependency. Shared modules and feature fusion mechanism are incorporated to further capture the intrinsic relationship between tasks. Furthermore, our model automatically allocates the operations and fusion weight. Extensive experiments on benchmark datasets verified that our model achieves state-of-the-art performance. As we can know, AutoSTL is the first automated spatio-temporal multi-task learning method.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zijian and Zhao, Xiangyu and Miao, Hao and Zhang, Chunxu and Zhao, Hongwei and Zhang, Junbo}, year={2023}, month={Jun.}, pages={4902-4910} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25616/25388", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25616", + "pdf_size": 521431, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13685905009097764230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.jlu.edu.cn;mails.jlu.edu.cn;cityu.edu.hk;cs.aaudk;jlu.edu.cn;outlook.com", + "email": "mails.jlu.edu.cn;mails.jlu.edu.cn;cityu.edu.hk;cs.aaudk;jlu.edu.cn;outlook.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0+1;1+1;2;0+0;3;4", + "aff_unique_norm": "Jilin University;City University of Hong Kong;Aalborg University;JD Intelligent Cities Research;JD.com", + "aff_unique_dep": "College of Computer Science and Technology;Hong Kong Institute for Data Science;Department of Computer Science;;", + "aff_unique_url": "http://www.jlu.edu.cn;https://www.cityu.edu.hk;https://www.aau.dk;;https://www.jd.com", + "aff_unique_abbr": "JLU;CityU;AAU;;JD", + "aff_campus_unique_index": "1;1+1;", + "aff_campus_unique": ";Hong Kong", + "aff_country_unique_index": "0+0+0;0+0;1;0+0;0;0", + "aff_country_unique": "China;Denmark" + }, + { + "id": "article-25425", + "title": "AutoStegaFont: Synthesizing Vector Fonts for Hiding Information in Documents", + "track": "main", + "status": "Technical", + "abstract": "Hiding information in text documents has been a hot topic recently, with the most typical schemes of utilizing fonts. By constructing several fonts with similar appearances, information can be effectively represented and embedded in documents. However, due to the unstructured characteristic, font vectors are more difficult to synthesize than font images. Existing methods mainly use handcrafted features to design the fonts manually, which is time-consuming and labor-intensive. Moreover, due to the diversity of fonts, handcrafted features are not generalizable to different fonts. Besides, in practice, since documents might be distorted through transmission, ensuring extractability under distortions is also an important requirement. Therefore, three requirements are imposed on vector font generation in this domain: automaticity, generalizability, and robustness. However, none of the existing methods can satisfy these requirements well and simultaneously.\nTo satisfy the above requirements, we propose AutoStegaFont, an automatic vector font synthesis scheme for hiding information in documents. Specifically, we design a two-stage and dual-modality learning framework. In the first stage, we jointly train an encoder and a decoder to invisibly encode the font images with different information. To ensure robustness, we target designing a noise layer to work with the encoder and decoder during training. In the second stage, we employ a differentiable rasterizer to establish a connection between the image and the vector modality. Then, we design an optimization algorithm to convey the information from the encoded image to the corresponding vector. Thus the encoded font vectors can be automatically generated. Extensive experiments demonstrate the superior performance of our scheme in automatically synthesizing vector fonts for hiding information in documents, with robustness to distortions caused by low-resolution screenshots, printing, and photography. Besides, the proposed framework has better generalizability to fonts with diverse styles and languages.", + "primary_area": "computer vision iii", + "author": "Xi Yang; Jie Zhang; Han Fang; Chang Liu; Zehua Ma; Weiming Zhang; Nenghai Yu", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China+University of Waterloo; National University of Singapore; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Yang_Zhang_Fang_Liu_Ma_Zhang_Yu_2023, title={AutoStegaFont: Synthesizing Vector Fonts for Hiding Information in Documents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25425}, DOI={10.1609/aaai.v37i3.25425}, abstractNote={Hiding information in text documents has been a hot topic recently, with the most typical schemes of utilizing fonts. By constructing several fonts with similar appearances, information can be effectively represented and embedded in documents. However, due to the unstructured characteristic, font vectors are more difficult to synthesize than font images. Existing methods mainly use handcrafted features to design the fonts manually, which is time-consuming and labor-intensive. Moreover, due to the diversity of fonts, handcrafted features are not generalizable to different fonts. Besides, in practice, since documents might be distorted through transmission, ensuring extractability under distortions is also an important requirement. Therefore, three requirements are imposed on vector font generation in this domain: automaticity, generalizability, and robustness. However, none of the existing methods can satisfy these requirements well and simultaneously.\nTo satisfy the above requirements, we propose AutoStegaFont, an automatic vector font synthesis scheme for hiding information in documents. Specifically, we design a two-stage and dual-modality learning framework. In the first stage, we jointly train an encoder and a decoder to invisibly encode the font images with different information. To ensure robustness, we target designing a noise layer to work with the encoder and decoder during training. In the second stage, we employ a differentiable rasterizer to establish a connection between the image and the vector modality. Then, we design an optimization algorithm to convey the information from the encoded image to the corresponding vector. Thus the encoded font vectors can be automatically generated. Extensive experiments demonstrate the superior performance of our scheme in automatically synthesizing vector fonts for hiding information in documents, with robustness to distortions caused by low-resolution screenshots, printing, and photography. Besides, the proposed framework has better generalizability to fonts with diverse styles and languages.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Xi and Zhang, Jie and Fang, Han and Liu, Chang and Ma, Zehua and Zhang, Weiming and Yu, Nenghai}, year={2023}, month={Jun.}, pages={3198-3205} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25425/25197", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25425", + "pdf_size": 695052, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1428063986777282212&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;gmail.com;nus.edu.sg;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;gmail.com;nus.edu.sg;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0+1;2;0;0;0;0", + "aff_unique_norm": "University of Science and Technology of China;University of Waterloo;National University of Singapore", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://uwaterloo.ca;https://www.nus.edu.sg", + "aff_unique_abbr": "USTC;UW;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;2;0;0;0;0", + "aff_country_unique": "China;Canada;Singapore" + }, + { + "id": "article-26147", + "title": "Automata Cascades: Expressivity and Sample Complexity", + "track": "main", + "status": "Technical", + "abstract": "Every automaton can be decomposed into a cascade of basic prime automata. This is the Prime Decomposition Theorem by Krohn and Rhodes. Guided by this theory, we propose automata cascades as a structured, modular, way to describe automata as complex systems made of many components, each implementing a specific functionality. Any automaton can serve as a component; using specific components allows for a fine-grained control of the expressivity of the resulting class of automata; using prime automata as components implies specific expressivity guarantees. Moreover, specifying automata as cascades allows for describing the sample complexity of automata in terms of their components. We show that the sample complexity is linear in the number of components and the maximum complexity of a single component, modulo logarithmic factors. This opens to the possibility of learning automata representing large dynamical systems consisting of many parts interacting with each other. It is in sharp contrast with the established understanding of the sample complexity of automata, described in terms of the overall number of states and input letters, which implies that it is only possible to learn automata where the number of states is linear in the amount of data available. Instead our results show that one can learn automata with a number of states that is exponential in the amount of data available.", + "primary_area": "machine learning iii", + "author": "Alessandro Ronca; Nadezda Alexandrovna Knorozova; Giuseppe De Giacomo", + "authorids": "", + "aff": "DIAG, Sapienza University of Rome; RelationalAI + IFI, University of Zurich; DIAG, Sapienza University of Rome + Computer Science Department, University of Oxford", + "bibtex": "@article{Ronca_Knorozova_De Giacomo_2023, title={Automata Cascades: Expressivity and Sample Complexity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26147}, DOI={10.1609/aaai.v37i8.26147}, abstractNote={Every automaton can be decomposed into a cascade of basic prime automata. This is the Prime Decomposition Theorem by Krohn and Rhodes. Guided by this theory, we propose automata cascades as a structured, modular, way to describe automata as complex systems made of many components, each implementing a specific functionality. Any automaton can serve as a component; using specific components allows for a fine-grained control of the expressivity of the resulting class of automata; using prime automata as components implies specific expressivity guarantees. Moreover, specifying automata as cascades allows for describing the sample complexity of automata in terms of their components. We show that the sample complexity is linear in the number of components and the maximum complexity of a single component, modulo logarithmic factors. This opens to the possibility of learning automata representing large dynamical systems consisting of many parts interacting with each other. It is in sharp contrast with the established understanding of the sample complexity of automata, described in terms of the overall number of states and input letters, which implies that it is only possible to learn automata where the number of states is linear in the amount of data available. Instead our results show that one can learn automata with a number of states that is exponential in the amount of data available.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ronca, Alessandro and Knorozova, Nadezda Alexandrovna and De Giacomo, Giuseppe}, year={2023}, month={Jun.}, pages={9588-9595} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26147/25919", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26147", + "pdf_size": 181345, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13020306528410022844&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "diag.uniroma1.it;relational.ai;cs.ox.ac.uk", + "email": "diag.uniroma1.it;relational.ai;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0+3", + "aff_unique_norm": "Sapienza University of Rome;RelationalAI;University of Zurich;University of Oxford", + "aff_unique_dep": "DIAG;;Institute of Informatics (IFI);Computer Science Department", + "aff_unique_url": "https://www.uniroma1.it;https://www.relationalai.com;https://www.uzh.ch;https://www.ox.ac.uk", + "aff_unique_abbr": "Sapienza;RelationalAI;UZH;Oxford", + "aff_campus_unique_index": "0;;0+2", + "aff_campus_unique": "Rome;;Oxford", + "aff_country_unique_index": "0;1+2;0+3", + "aff_country_unique": "Italy;United States;Switzerland;United Kingdom" + }, + { + "id": "article-25796", + "title": "Automated Verification of Propositional Agent Abstraction for Classical Planning via CTLK Model Checking", + "track": "main", + "status": "Technical", + "abstract": "Abstraction has long been an effective mechanism to help find a solution in classical planning. Agent abstraction, based on the situation calculus, is a promising explainable framework for agent planning, yet its automation is still far from being tackled. In this paper, we focus on a propositional version of agent abstraction designed for finite-state systems. We investigate the automated verification of the existence of propositional agent abstraction, given a finite-state system and a mapping indicating an abstraction for it. By formalizing sound, complete and deterministic properties of abstractions in a general framework, we show that the verification task can be reduced to the task of model checking against CTLK specifications. We implemented a prototype system, and validated the viability of our approach through experimentation on several domains from classical planning.", + "primary_area": "knowledge representation and reasoning", + "author": "Kailun Luo", + "authorids": "", + "aff": "School of Cyberspace Security, Dongguan University of Technology, Dongguan 523808, China", + "bibtex": "@article{Luo_2023, title={Automated Verification of Propositional Agent Abstraction for Classical Planning via CTLK Model Checking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25796}, DOI={10.1609/aaai.v37i5.25796}, abstractNote={Abstraction has long been an effective mechanism to help find a solution in classical planning. Agent abstraction, based on the situation calculus, is a promising explainable framework for agent planning, yet its automation is still far from being tackled. In this paper, we focus on a propositional version of agent abstraction designed for finite-state systems. We investigate the automated verification of the existence of propositional agent abstraction, given a finite-state system and a mapping indicating an abstraction for it. By formalizing sound, complete and deterministic properties of abstractions in a general framework, we show that the verification task can be reduced to the task of model checking against CTLK specifications. We implemented a prototype system, and validated the viability of our approach through experimentation on several domains from classical planning.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Kailun}, year={2023}, month={Jun.}, pages={6475-6482} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25796/25568", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25796", + "pdf_size": 155560, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2114016045293870853&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 2, + "aff_domain": "dgut.edu.cn", + "email": "dgut.edu.cn", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Dongguan University of Technology", + "aff_unique_dep": "School of Cyberspace Security", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Dongguan", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "article-26425", + "title": "Automated Verification of Social Laws in Numeric Settings", + "track": "main", + "status": "Technical", + "abstract": "It is possible for agents operating in a shared environment to interfere with one another. One mechanism of coordination is called Social Law. Enacting such a law in a multi-agent setting restricts agents' behaviors. Robustness, in this case, ensures that the agents do not harmfully interfere with each other and that each agent achieves its goals regardless of what other agents do. Previous work on social law verification examined only the case of boolean state variables. However, many real-world problems require reasoning with numeric variables. Moreover, numeric fluents allow a more compact representation of multiple planning problems.\n\nIn this paper, we develop a method to verify whether a given social law is robust via compilation to numeric planning. A solution to this compilation constitutes a counterexample to the robustness of the problem, i.e., evidence of cross-agent conflict. Thus, the social law is robust if and only if the proposed compilation is unsolvable. We empirically verify robustness in multiple domains using state-of-the-art numeric planners. Additionally, this compilation raises a challenge by generating a set of non-trivial numeric domains where unsolvability should be either proved or disproved.", + "primary_area": "planning routing and scheduling", + "author": "Ronen Nir; Alexander Shleyfman; Erez Karpas", + "authorids": "", + "aff": "Technion \u2013 Israel Institute of Technology; Bar-Ilan University; Technion \u2013 Israel Institute of Technology", + "bibtex": "@article{Nir_Shleyfman_Karpas_2023, title={Automated Verification of Social Laws in Numeric Settings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26425}, DOI={10.1609/aaai.v37i10.26425}, abstractNote={It is possible for agents operating in a shared environment to interfere with one another. One mechanism of coordination is called Social Law. Enacting such a law in a multi-agent setting restricts agents\u2019 behaviors. Robustness, in this case, ensures that the agents do not harmfully interfere with each other and that each agent achieves its goals regardless of what other agents do. Previous work on social law verification examined only the case of boolean state variables. However, many real-world problems require reasoning with numeric variables. Moreover, numeric fluents allow a more compact representation of multiple planning problems. In this paper, we develop a method to verify whether a given social law is robust via compilation to numeric planning. A solution to this compilation constitutes a counterexample to the robustness of the problem, i.e., evidence of cross-agent conflict. Thus, the social law is robust if and only if the proposed compilation is unsolvable. We empirically verify robustness in multiple domains using state-of-the-art numeric planners. Additionally, this compilation raises a challenge by generating a set of non-trivial numeric domains where unsolvability should be either proved or disproved.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nir, Ronen and Shleyfman, Alexander and Karpas, Erez}, year={2023}, month={Jun.}, pages={12087-12094} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26425/26197", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26425", + "pdf_size": 149329, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=733220953124070590&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;biu.ac.il;technion.ac.il", + "email": "gmail.com;biu.ac.il;technion.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Technion \u2013 Israel Institute of Technology;Bar-Ilan University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.technion.ac.il/en/;https://www.biu.ac.il", + "aff_unique_abbr": "Technion;BIU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25769", + "title": "Automatically Verifying Expressive Epistemic Properties of Programs", + "track": "main", + "status": "Technical", + "abstract": "We propose a new approach to the verification of epistemic properties of programmes. First, we introduce the new ``program-epistemic'' logic L_PK, which is strictly richer and more general than similar formalisms appearing in the literature. To solve the verification problem in an efficient way, we introduce a translation from our language L_PK into first-order logic. Then, we show and prove correct a reduction from the model checking problem for program-epistemic formulas to the satisfiability of their first-order translation. Both our logic and our translation can handle richer specification w.r.t. the state of the art, allowing us to express the knowledge of agents about facts pertaining to programs (i.e., agents' knowledge before a program is executed as well as after is has been executed). Furthermore, we implement our translation in Haskell in a general way (i.e., independently of the programs in the logical statements), and we use existing SMT-solvers to check satisfaction of L_PK formulas on a benchmark example in the AI/agency field.", + "primary_area": "knowledge representation and reasoning", + "author": "Francesco Belardinelli; Ioana Boureanu; Vadim Malvone; Fortunat Rajaona", + "authorids": "", + "aff": "Imperial College London; Surrey Centre for Cyber Security, University of Surrey; T\u00b4el\u00b4ecom Paris; Surrey Centre for Cyber Security, University of Surrey", + "bibtex": "@article{Belardinelli_Boureanu_Malvone_Rajaona_2023, title={Automatically Verifying Expressive Epistemic Properties of Programs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25769}, DOI={10.1609/aaai.v37i5.25769}, abstractNote={We propose a new approach to the verification of epistemic properties of programmes. First, we introduce the new ``program-epistemic\u2019\u2019 logic L_PK, which is strictly richer and more general than similar formalisms appearing in the literature. To solve the verification problem in an efficient way, we introduce a translation from our language L_PK into first-order logic. Then, we show and prove correct a reduction from the model checking problem for program-epistemic formulas to the satisfiability of their first-order translation. Both our logic and our translation can handle richer specification w.r.t. the state of the art, allowing us to express the knowledge of agents about facts pertaining to programs (i.e., agents\u2019 knowledge before a program is executed as well as after is has been executed). Furthermore, we implement our translation in Haskell in a general way (i.e., independently of the programs in the logical statements), and we use existing SMT-solvers to check satisfaction of L_PK formulas on a benchmark example in the AI/agency field.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Belardinelli, Francesco and Boureanu, Ioana and Malvone, Vadim and Rajaona, Fortunat}, year={2023}, month={Jun.}, pages={6245-6252} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25769/25541", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25769", + "pdf_size": 168827, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5860376601659529263&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "imperial.ac.uk;surrey.ac.uk;telecom-paris.fr;surrey.ac.uk", + "email": "imperial.ac.uk;surrey.ac.uk;telecom-paris.fr;surrey.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Imperial College London;University of Surrey;T\u00e9l\u00e9com Paris", + "aff_unique_dep": ";Surrey Centre for Cyber Security;", + "aff_unique_url": "https://www.imperial.ac.uk;https://www.surrey.ac.uk;https://www.telecom-paris.fr", + "aff_unique_abbr": "ICL;UniS;T\u00e9l\u00e9com Paris", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Surrey", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United Kingdom;France" + }, + { + "id": "article-26881", + "title": "Autonomous Agents: An Advanced Course on AI Integration and Deployment", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "A majority of the courses on autonomous systems focus on robotics, despite the growing use of autonomous agents in a wide spectrum of applications, from smart homes to intelligent traffic control. Our goal in designing a new senior-level undergraduate course is to teach the integration of a variety of AI techniques in uncertain environments, without the dependence on topics such as robotic control and localization. We chose the application of an autonomous greenhouse to frame our discussions and our student projects because of the greenhouse's self-contained nature and objective metrics for successfully growing plants. We detail our curriculum design, including lecture topics and assignments, and our iterative process for updating the course over the last four years. Finally, we present some student feedback about the course and opportunities for future improvement.", + "primary_area": "", + "author": "Stephanie Rosenthal; Reid Simmons", + "authorids": "", + "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", + "bibtex": "@article{Rosenthal_Simmons_2024, title={Autonomous Agents: An Advanced Course on AI Integration and Deployment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26881}, DOI={10.1609/aaai.v37i13.26881}, abstractNote={A majority of the courses on autonomous systems focus on robotics, despite the growing use of autonomous agents in a wide spectrum of applications, from smart homes to intelligent traffic control. Our goal in designing a new senior-level undergraduate course is to teach the integration of a variety of AI techniques in uncertain environments, without the dependence on topics such as robotic control and localization. We chose the application of an autonomous greenhouse to frame our discussions and our student projects because of the greenhouse\u2019s self-contained nature and objective metrics for successfully growing plants. We detail our curriculum design, including lecture topics and assignments, and our iterative process for updating the course over the last four years. Finally, we present some student feedback about the course and opportunities for future improvement.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rosenthal, Stephanie and Simmons, Reid}, year={2024}, month={Jul.}, pages={15843-15850} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26881/26653", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26881", + "pdf_size": 7080291, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5638889228997894655&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26479", + "title": "Avocodo: Generative Adversarial Network for Artifact-Free Vocoder", + "track": "main", + "status": "Technical", + "abstract": "Neural vocoders based on the generative adversarial neural network (GAN) have been widely used due to their fast inference speed and lightweight networks while generating high-quality speech waveforms. Since the perceptually important speech components are primarily concentrated in the low-frequency bands, most GAN-based vocoders perform multi-scale analysis that evaluates downsampled speech waveforms. This multi-scale analysis helps the generator improve speech intelligibility. However, in preliminary experiments, we discovered that the multi-scale analysis which focuses on the low-frequency bands causes unintended artifacts, e.g., aliasing and imaging artifacts, which degrade the synthesized speech waveform quality. Therefore, in this paper, we investigate the relationship between these artifacts and GAN-based vocoders and propose a GAN-based vocoder, called Avocodo, that allows the synthesis of high-fidelity speech with reduced artifacts. We introduce two kinds of discriminators to evaluate speech waveforms in various perspectives: a collaborative multi-band discriminator and a sub-band discriminator. We also utilize a pseudo quadrature mirror filter bank to obtain downsampled multi-band speech waveforms while avoiding aliasing. According to experimental results, Avocodo outperforms baseline GAN-based vocoders, both objectively and subjectively, while reproducing speech with fewer artifacts.", + "primary_area": "speech natural language processing", + "author": "Taejun Bak; Junmo Lee; Hanbin Bae; Jinhyeok Yang; Jae-Sung Bae; Young-Sun Joo", + "authorids": "", + "aff": "AI Center, NCSOFT, Seongnam, Korea; SK Telecom, Seoul, Korea; Samsung Research, Seoul, Korea + NCSOFT, Seongnam, Korea; Supertone Inc., Seoul, Korea; Samsung Research, Seoul, Korea; AI Center, NCSOFT, Seongnam, Korea", + "bibtex": "@article{Bak_Lee_Bae_Yang_Bae_Joo_2023, title={Avocodo: Generative Adversarial Network for Artifact-Free Vocoder}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26479}, DOI={10.1609/aaai.v37i11.26479}, abstractNote={Neural vocoders based on the generative adversarial neural network (GAN) have been widely used due to their fast inference speed and lightweight networks while generating high-quality speech waveforms. Since the perceptually important speech components are primarily concentrated in the low-frequency bands, most GAN-based vocoders perform multi-scale analysis that evaluates downsampled speech waveforms. This multi-scale analysis helps the generator improve speech intelligibility. However, in preliminary experiments, we discovered that the multi-scale analysis which focuses on the low-frequency bands causes unintended artifacts, e.g., aliasing and imaging artifacts, which degrade the synthesized speech waveform quality. Therefore, in this paper, we investigate the relationship between these artifacts and GAN-based vocoders and propose a GAN-based vocoder, called Avocodo, that allows the synthesis of high-fidelity speech with reduced artifacts. We introduce two kinds of discriminators to evaluate speech waveforms in various perspectives: a collaborative multi-band discriminator and a sub-band discriminator. We also utilize a pseudo quadrature mirror filter bank to obtain downsampled multi-band speech waveforms while avoiding aliasing. According to experimental results, Avocodo outperforms baseline GAN-based vocoders, both objectively and subjectively, while reproducing speech with fewer artifacts.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bak, Taejun and Lee, Junmo and Bae, Hanbin and Yang, Jinhyeok and Bae, Jae-Sung and Joo, Young-Sun}, year={2023}, month={Jun.}, pages={12562-12570} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26479/26251", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26479", + "pdf_size": 9043389, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1948323494694230309&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ncsoft.com;sk.com; ; ; ;", + "email": "ncsoft.com;sk.com; ; ; ;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2+0;3;2;0", + "aff_unique_norm": "NCSOFT;SK Telecom;Samsung Research;Supertone Inc.", + "aff_unique_dep": "AI Center;;;", + "aff_unique_url": "https://www.ncsoft.com;https://www.sktelecom.com;https://www.samsung.com/global/research/;", + "aff_unique_abbr": "NCSOFT;SKT;Samsung;", + "aff_campus_unique_index": "0;1;1+0;1;0", + "aff_campus_unique": "Seongnam;Seoul;", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "Korea" + }, + { + "id": "article-26582", + "title": "BERT-ERC: Fine-Tuning BERT Is Enough for Emotion Recognition in Conversation", + "track": "main", + "status": "Technical", + "abstract": "Previous works on emotion recognition in conversation (ERC) follow a two-step paradigm, which can be summarized as first producing context-independent features via fine-tuning pretrained language models (PLMs) and then analyzing contextual information and dialogue structure information among the extracted features. However, we discover that this paradigm has several limitations. Accordingly, we propose a novel paradigm, i.e., exploring contextual information and dialogue structure information in the fine-tuning step, and adapting the PLM to the ERC task in terms of input text, classification structure, and training strategy. Furthermore, we develop our model BERT-ERC according to the proposed paradigm, which improves ERC performance in three aspects, namely suggestive text, fine-grained classification module, and two-stage training. Compared to existing methods, BERT-ERC achieves substantial improvement on four datasets, indicating its effectiveness and generalization capability. Besides, we also set up the limited resources scenario and the online prediction scenario to approximate real-world scenarios. Extensive experiments demonstrate that the proposed paradigm significantly outperforms the previous one and can be adapted to various scenes.", + "primary_area": "speech natural language processing", + "author": "Xiangyu Qin; Zhiyu Wu; Tingting Zhang; Yanran Li; Jian Luan; Bin Wang; Li Wang; Jinshi Cui", + "authorids": "", + "aff": "School of Intelligence Science and Technology, Peking University+Xiaomi AI Lab; School of Intelligence Science and Technology, Peking University+Xiaomi AI Lab; School of Intelligence Science and Technology, Peking University; Xiaomi AI Lab; Xiaomi AI Lab; Xiaomi AI Lab; School of Psychological and Cognitive Sciences and Beijing Key Laboratory of Behavior and Mental Health, Peking University; School of Intelligence Science and Technology, Peking University", + "bibtex": "@article{Qin_Wu_Zhang_Li_Luan_Wang_Wang_Cui_2023, title={BERT-ERC: Fine-Tuning BERT Is Enough for Emotion Recognition in Conversation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26582}, DOI={10.1609/aaai.v37i11.26582}, abstractNote={Previous works on emotion recognition in conversation (ERC) follow a two-step paradigm, which can be summarized as first producing context-independent features via fine-tuning pretrained language models (PLMs) and then analyzing contextual information and dialogue structure information among the extracted features. However, we discover that this paradigm has several limitations. Accordingly, we propose a novel paradigm, i.e., exploring contextual information and dialogue structure information in the fine-tuning step, and adapting the PLM to the ERC task in terms of input text, classification structure, and training strategy. Furthermore, we develop our model BERT-ERC according to the proposed paradigm, which improves ERC performance in three aspects, namely suggestive text, fine-grained classification module, and two-stage training. Compared to existing methods, BERT-ERC achieves substantial improvement on four datasets, indicating its effectiveness and generalization capability. Besides, we also set up the limited resources scenario and the online prediction scenario to approximate real-world scenarios. Extensive experiments demonstrate that the proposed paradigm significantly outperforms the previous one and can be adapted to various scenes.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Xiangyu and Wu, Zhiyu and Zhang, Tingting and Li, Yanran and Luan, Jian and Wang, Bin and Wang, Li and Cui, Jinshi}, year={2023}, month={Jun.}, pages={13492-13500} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26582/26354", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26582", + "pdf_size": 4688578, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4751045641495860372&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;gmail.com;gmail.com;hotmail.com;xiaomi.com;pku.edu.cn;cis.pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;gmail.com;gmail.com;hotmail.com;xiaomi.com;pku.edu.cn;cis.pku.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0;1;1;1;0;0", + "aff_unique_norm": "Peking University;Xiaomi Corporation", + "aff_unique_dep": "School of Intelligence Science and Technology;Xiaomi AI Lab", + "aff_unique_url": "http://www.pku.edu.cn;https://www.xiaomi.com", + "aff_unique_abbr": "PKU;Xiaomi", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25470", + "title": "BEST: BERT Pre-training for Sign Language Recognition with Coupling Tokenization", + "track": "main", + "status": "Technical", + "abstract": "In this work, we are dedicated to leveraging the BERT pre-training success and modeling the domain-specific statistics to fertilize the sign language recognition~(SLR) model. Considering the dominance of hand and body in sign language expression, we organize them as pose triplet units and feed them into the Transformer backbone in a frame-wise manner. Pre-training is performed via reconstructing the masked triplet unit from the corrupted input sequence, which learns the hierarchical correlation context cues among internal and external triplet units. Notably, different from the highly semantic word token in BERT, the pose unit is a low-level signal originally locating in continuous space, which prevents the direct adoption of the BERT cross entropy objective. To this end, we bridge this semantic gap via coupling tokenization of the triplet unit. It adaptively extracts the discrete pseudo label from the pose triplet unit, which represents the semantic gesture / body state. After pre-training, we fine-tune the pre-trained encoder on the downstream SLR task, jointly with the newly added task-specific layer. Extensive experiments are conducted to validate the effectiveness of our proposed method, achieving new state-of-the-art performance on all four benchmarks with a notable gain.", + "primary_area": "computer vision iii", + "author": "Weichao Zhao; Hezhen Hu; Wengang Zhou; Jiaxin Shi; Houqiang Li", + "authorids": "", + "aff": "CAS Key Laboratory of GIPAS, EEIS Department, University of Science and Technology of China (USTC); CAS Key Laboratory of GIPAS, EEIS Department, University of Science and Technology of China (USTC); CAS Key Laboratory of GIPAS, EEIS Department, University of Science and Technology of China (USTC)+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center; Huawei Inc.; CAS Key Laboratory of GIPAS, EEIS Department, University of Science and Technology of China (USTC)+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center", + "bibtex": "@article{Zhao_Hu_Zhou_Shi_Li_2023, title={BEST: BERT Pre-training for Sign Language Recognition with Coupling Tokenization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25470}, DOI={10.1609/aaai.v37i3.25470}, abstractNote={In this work, we are dedicated to leveraging the BERT pre-training success and modeling the domain-specific statistics to fertilize the sign language recognition~(SLR) model. Considering the dominance of hand and body in sign language expression, we organize them as pose triplet units and feed them into the Transformer backbone in a frame-wise manner. Pre-training is performed via reconstructing the masked triplet unit from the corrupted input sequence, which learns the hierarchical correlation context cues among internal and external triplet units. Notably, different from the highly semantic word token in BERT, the pose unit is a low-level signal originally locating in continuous space, which prevents the direct adoption of the BERT cross entropy objective. To this end, we bridge this semantic gap via coupling tokenization of the triplet unit. It adaptively extracts the discrete pseudo label from the pose triplet unit, which represents the semantic gesture / body state. After pre-training, we fine-tune the pre-trained encoder on the downstream SLR task, jointly with the newly added task-specific layer. Extensive experiments are conducted to validate the effectiveness of our proposed method, achieving new state-of-the-art performance on all four benchmarks with a notable gain.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Weichao and Hu, Hezhen and Zhou, Wengang and Shi, Jiaxin and Li, Houqiang}, year={2023}, month={Jun.}, pages={3597-3605} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25470/25242", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25470", + "pdf_size": 613797, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5278309598269839972&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;huawei.com;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;huawei.com;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;2;0+1", + "aff_unique_norm": "University of Science and Technology of China;Hefei Comprehensive National Science Center;Huawei", + "aff_unique_dep": "EEIS Department;Institute of Artificial Intelligence;", + "aff_unique_url": "http://www.ustc.edu.cn;http://www.hfcn.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "USTC;;Huawei", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25629", + "title": "BETA-CD: A Bayesian Meta-Learned Cognitive Diagnosis Framework for Personalized Learning", + "track": "main", + "status": "Technical", + "abstract": "Personalized learning is a promising educational approach that aims to provide high-quality personalized services for each student with minimum demands for practice data. The key to achieving that lies in the cognitive diagnosis task, which estimates the cognitive state of the student through his/her logged data of doing practice quizzes. Nevertheless, in the personalized learning scenario, existing cognitive diagnosis models suffer from the inability to (1) quickly adapt to new students using a small amount of data, and (2) measure the reliability of the diagnosis result to avoid improper services that mismatch the student's actual state. In this paper, we propose a general Bayesian mETA-learned Cognitive Diagnosis framework (BETA-CD), which addresses the two challenges by prior knowledge exploitation and model uncertainty quantification, respectively. Specifically, we firstly introduce Bayesian hierarchical modeling to associate each student's cognitive state with a shared prior distribution encoding prior knowledge and a personal posterior distribution indicating model uncertainty. Furthermore, we formulate a meta-learning objective to automatically exploit prior knowledge from historical students, and efficiently solve it with a gradient-based variational inference method. The code will be publicly available at https://github.com/AyiStar/pyat.", + "primary_area": "domain s of application", + "author": "Haoyang Bi; Enhong Chen; Weidong He; Han Wu; Weihao Zhao; Shijin Wang; Jinze Wu", + "authorids": "", + "aff": "Anhui Province Key Laboratory of Big Data Analysis and Application, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; State Key Laboratory of Cognitive Intelligence+iFLYTEK AI Research, iFLYTEK CO., LTD.; iFLYTEK AI Research, iFLYTEK CO., LTD.", + "bibtex": "@article{Bi_Chen_He_Wu_Zhao_Wang_Wu_2023, title={BETA-CD: A Bayesian Meta-Learned Cognitive Diagnosis Framework for Personalized Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25629}, DOI={10.1609/aaai.v37i4.25629}, abstractNote={Personalized learning is a promising educational approach that aims to provide high-quality personalized services for each student with minimum demands for practice data. The key to achieving that lies in the cognitive diagnosis task, which estimates the cognitive state of the student through his/her logged data of doing practice quizzes. Nevertheless, in the personalized learning scenario, existing cognitive diagnosis models suffer from the inability to (1) quickly adapt to new students using a small amount of data, and (2) measure the reliability of the diagnosis result to avoid improper services that mismatch the student\u2019s actual state. In this paper, we propose a general Bayesian mETA-learned Cognitive Diagnosis framework (BETA-CD), which addresses the two challenges by prior knowledge exploitation and model uncertainty quantification, respectively. Specifically, we firstly introduce Bayesian hierarchical modeling to associate each student\u2019s cognitive state with a shared prior distribution encoding prior knowledge and a personal posterior distribution indicating model uncertainty. Furthermore, we formulate a meta-learning objective to automatically exploit prior knowledge from historical students, and efficiently solve it with a gradient-based variational inference method. The code will be publicly available at https://github.com/AyiStar/pyat.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bi, Haoyang and Chen, Enhong and He, Weidong and Wu, Han and Zhao, Weihao and Wang, Shijin and Wu, Jinze}, year={2023}, month={Jun.}, pages={5018-5026} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25629/25401", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25629", + "pdf_size": 245590, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5924906368467179752&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn; ; wuhanhan;mail.ustc.edu.cn; ;i\ufb02ytek.com", + "email": "mail.ustc.edu.cn;ustc.edu.cn; ; wuhanhan;mail.ustc.edu.cn; ;i\ufb02ytek.com", + "github": "https://github.com/AyiStar/pyat", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;1+2;2", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;iFLYTEK CO., LTD.", + "aff_unique_dep": "Anhui Province Key Laboratory of Big Data Analysis and Application;;iFLYTEK AI Research", + "aff_unique_url": "http://www.ustc.edu.cn/;;https://www.iflytek.com", + "aff_unique_abbr": "USTC;;iFLYTEK", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25233", + "title": "BEVDepth: Acquisition of Reliable Depth for Multi-View 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "In this research, we propose a new 3D object detector with a trustworthy depth estimation, dubbed BEVDepth, for camera-based Bird's-Eye-View~(BEV) 3D object detection. Our work is based on a key observation -- depth estimation in recent approaches is surprisingly inadequate given the fact that depth is essential to camera 3D detection. Our BEVDepth resolves this by leveraging explicit depth supervision. A camera-awareness depth estimation module is also introduced to facilitate the depth predicting capability. Besides, we design a novel Depth Refinement Module to counter the side effects carried by imprecise feature unprojection. Aided by customized Efficient Voxel Pooling and multi-frame mechanism, BEVDepth achieves the new state-of-the-art 60.9% NDS on the challenging nuScenes test set while maintaining high efficiency. For the first time, the NDS score of a camera model reaches 60%. Codes have been released.", + "primary_area": "computer vision ii", + "author": "Yinhao Li; Zheng Ge; Guanyi Yu; Jinrong Yang; Zengran Wang; Yukang Shi; Jianjian Sun; Zeming Li", + "authorids": "", + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; MEGVII Technology; MEGVII Technology; Huazhong University of Science and Technology; MEGVII Technology; Xi\u2019an Jiaotong University; MEGVII Technology; MEGVII Technology", + "bibtex": "@article{Li_Ge_Yu_Yang_Wang_Shi_Sun_Li_2023, title={BEVDepth: Acquisition of Reliable Depth for Multi-View 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25233}, DOI={10.1609/aaai.v37i2.25233}, abstractNote={In this research, we propose a new 3D object detector with a trustworthy depth estimation, dubbed BEVDepth, for camera-based Bird\u2019s-Eye-View~(BEV) 3D object detection. Our work is based on a key observation -- depth estimation in recent approaches is surprisingly inadequate given the fact that depth is essential to camera 3D detection. Our BEVDepth resolves this by leveraging explicit depth supervision. A camera-awareness depth estimation module is also introduced to facilitate the depth predicting capability. Besides, we design a novel Depth Refinement Module to counter the side effects carried by imprecise feature unprojection. Aided by customized Efficient Voxel Pooling and multi-frame mechanism, BEVDepth achieves the new state-of-the-art 60.9% NDS on the challenging nuScenes test set while maintaining high efficiency. For the first time, the NDS score of a camera model reaches 60%. Codes have been released.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yinhao and Ge, Zheng and Yu, Guanyi and Yang, Jinrong and Wang, Zengran and Shi, Yukang and Sun, Jianjian and Li, Zeming}, year={2023}, month={Jun.}, pages={1477-1485} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25233/25005", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25233", + "pdf_size": 3017960, + "gs_citation": 714, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3152400530421441308&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.ucas.edu.cn;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com", + "email": "mails.ucas.edu.cn;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com;megvii.com", + "github": "https://github.com/Megvii-BaseDetection/BEVDepth", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;2;3;2;4;2;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;MEGVII Technology;Huazhong University of Science and Technology;Xi'an Jiaotong University", + "aff_unique_dep": "Institute of Computing Technology;;;;", + "aff_unique_url": "http://www.cas.cn/;http://www.ucas.ac.cn;https://www.megvii.com;http://www.hust.edu.cn;https://www.xjtu.edu.cn", + "aff_unique_abbr": "CAS;UCAS;;HUST;XJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25234", + "title": "BEVStereo: Enhancing Depth Estimation in Multi-View 3D Object Detection with Temporal Stereo", + "track": "main", + "status": "Technical", + "abstract": "Restricted by the ability of depth perception, all Multi-view 3D object detection methods fall into the bottleneck of depth accuracy. By constructing temporal stereo, depth estimation is quite reliable in indoor scenarios. However, there are two difficulties in directly integrating temporal stereo into outdoor multi-view 3D object detectors: 1) The construction of temporal stereos for all views results in high computing costs. 2) Unable to adapt to challenging outdoor scenarios. In this study, we propose an effective method for creating temporal stereo by dynamically determining the center and range of the temporal stereo. The most confident center is found using the EM algorithm. Numerous experiments on nuScenes have shown the BEVStereo's ability to deal with complex outdoor scenarios that other stereo-based methods are unable to handle. For the first time, a stereo-based approach shows superiority in scenarios like a static ego vehicle and moving objects. BEVStereo achieves the new state-of-the-art in the camera-only track of nuScenes dataset while maintaining memory efficiency. Codes have been released.", + "primary_area": "computer vision ii", + "author": "Yinhao Li; Han Bao; Zheng Ge; Jinrong Yang; Jianjian Sun; Zeming Li", + "authorids": "", + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; State Key Lab of Processors, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; MEGVII Technology; Huazhong University of Science and Technology; MEGVII Technology; MEGVII Technology", + "bibtex": "@article{Li_Bao_Ge_Yang_Sun_Li_2023, title={BEVStereo: Enhancing Depth Estimation in Multi-View 3D Object Detection with Temporal Stereo}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25234}, DOI={10.1609/aaai.v37i2.25234}, abstractNote={Restricted by the ability of depth perception, all Multi-view 3D object detection methods fall into the bottleneck of depth accuracy. By constructing temporal stereo, depth estimation is quite reliable in indoor scenarios. However, there are two difficulties in directly integrating temporal stereo into outdoor multi-view 3D object detectors: 1) The construction of temporal stereos for all views results in high computing costs. 2) Unable to adapt to challenging outdoor scenarios. In this study, we propose an effective method for creating temporal stereo by dynamically determining the center and range of the temporal stereo. The most confident center is found using the EM algorithm. Numerous experiments on nuScenes have shown the BEVStereo\u2019s ability to deal with complex outdoor scenarios that other stereo-based methods are unable to handle. For the first time, a stereo-based approach shows superiority in scenarios like a static ego vehicle and moving objects. BEVStereo achieves the new state-of-the-art in the camera-only track of nuScenes dataset while maintaining memory efficiency. Codes have been released.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yinhao and Bao, Han and Ge, Zheng and Yang, Jinrong and Sun, Jianjian and Li, Zeming}, year={2023}, month={Jun.}, pages={1486-1494} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25234/25006", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25234", + "pdf_size": 17097781, + "gs_citation": 246, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12403418769079788084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.ucas.edu.cn;ict.ac.cn;megvii.com;megvii.com;megvii.com;megvii.com", + "email": "mails.ucas.edu.cn;ict.ac.cn;megvii.com;megvii.com;megvii.com;megvii.com", + "github": "https://github.com/Megvii-BaseDetection/BEVStereo", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2+1;3;4;3;3", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Institute of Computing Technology;MEGVII Technology;Huazhong University of Science and Technology", + "aff_unique_dep": "Institute of Computing Technology;;State Key Lab of Processors;;", + "aff_unique_url": "http://www.cas.cn/;http://www.ucas.ac.cn;http://www.ict.ac.cn;https://www.megvii.com;http://www.hust.edu.cn", + "aff_unique_abbr": "CAS;UCAS;ICT;;HUST", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26784", + "title": "BIFRNet: A Brain-Inspired Feature Restoration DNN for Partially Occluded Image Recognition", + "track": "aaai special track", + "status": "Technical", + "abstract": "The partially occluded image recognition (POIR) problem has been a challenge for artificial intelligence for a long time. A common strategy to handle the POIR problem is using the non-occluded features for classification. Unfortunately, this strategy will lose effectiveness when the image is severely occluded, since the visible parts can only provide limited information. Several studies in neuroscience reveal that feature restoration which fills in the occluded information and is called amodal completion is essential for human brains to recognize partially occluded images. However, feature restoration is commonly ignored by CNNs, which may be the reason why CNNs are ineffective for the POIR problem. Inspired by this, we propose a novel brain-inspired feature restoration network (BIFRNet) to solve the POIR problem. It mimics a ventral visual pathway to extract image features and a dorsal visual pathway to distinguish occluded and visible image regions. In addition, it also uses a knowledge module to store classification prior knowledge and uses a completion module to restore occluded features based on visible features and prior knowledge. Thorough experiments on synthetic and real-world occluded image datasets show that BIFRNet outperforms the existing methods in solving the POIR problem. Especially for severely occluded images, BIRFRNet surpasses other methods by a large margin and is close to the human brain performance. Furthermore, the brain-inspired design makes BIFRNet more interpretable.", + "primary_area": "safe and robust ai", + "author": "Jiahong Zhang; Lihong Cao; Qiuxia Lai; Bingyao Li; Yunxiao Qin", + "authorids": "", + "aff": "State Key Laboratory of Media Convergence and Communication, Communication University of China, Beijing, China+Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China; State Key Laboratory of Media Convergence and Communication, Communication University of China, Beijing, China+Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China; State Key Laboratory of Media Convergence and Communication, Communication University of China, Beijing, China+Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China; State Key Laboratory of Media Convergence and Communication, Communication University of China, Beijing, China+Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China; Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China", + "bibtex": "@article{Zhang_Cao_Lai_Li_Qin_2023, title={BIFRNet: A Brain-Inspired Feature Restoration DNN for Partially Occluded Image Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26784}, DOI={10.1609/aaai.v37i12.26784}, abstractNote={The partially occluded image recognition (POIR) problem has been a challenge for artificial intelligence for a long time. A common strategy to handle the POIR problem is using the non-occluded features for classification. Unfortunately, this strategy will lose effectiveness when the image is severely occluded, since the visible parts can only provide limited information. Several studies in neuroscience reveal that feature restoration which fills in the occluded information and is called amodal completion is essential for human brains to recognize partially occluded images. However, feature restoration is commonly ignored by CNNs, which may be the reason why CNNs are ineffective for the POIR problem. Inspired by this, we propose a novel brain-inspired feature restoration network (BIFRNet) to solve the POIR problem. It mimics a ventral visual pathway to extract image features and a dorsal visual pathway to distinguish occluded and visible image regions. In addition, it also uses a knowledge module to store classification prior knowledge and uses a completion module to restore occluded features based on visible features and prior knowledge. Thorough experiments on synthetic and real-world occluded image datasets show that BIFRNet outperforms the existing methods in solving the POIR problem. Especially for severely occluded images, BIRFRNet surpasses other methods by a large margin and is close to the human brain performance. Furthermore, the brain-inspired design makes BIFRNet more interpretable.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jiahong and Cao, Lihong and Lai, Qiuxia and Li, Bingyao and Qin, Yunxiao}, year={2023}, month={Jun.}, pages={15296-15304} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26784/26556", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26784", + "pdf_size": 1341852, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16808353467798075917&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "cuc.edu.cn;cuc.edu.cn;cuc.edu.cn;cuc.edu.cn;163.com", + "email": "cuc.edu.cn;cuc.edu.cn;cuc.edu.cn;cuc.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0", + "aff_unique_norm": "Communication University of China", + "aff_unique_dep": "State Key Laboratory of Media Convergence and Communication", + "aff_unique_url": "http://www.cuc.edu.cn/", + "aff_unique_abbr": "CUC", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25070", + "title": "Back to the Future: Toward a Hybrid Architecture for Ad Hoc Teamwork", + "track": "main", + "status": "Technical", + "abstract": "State of the art methods for ad hoc teamwork, i.e., for collaboration without prior coordination, often use a long history of prior observations to model the behavior of other agents (or agent types) and to determine the ad hoc agent's behavior. In many practical domains, it is difficult to obtain large training datasets, and necessary to quickly revise the existing models to account for changes in team composition or domain attributes. Our architecture builds on the principles of step-wise refinement and ecological rationality to enable an ad hoc agent to perform non-monotonic logical reasoning with prior commonsense domain knowledge and models learned rapidly from limited examples to predict the behavior of other agents. In the simulated multiagent collaboration domain Fort Attack, we experimentally demonstrate that our architecture enables an ad hoc agent to adapt to changes in the behavior of other agents, and provides enhanced transparency and better performance than a state of the art data-driven baseline.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Hasra Dodampegama; Mohan Sridharan", + "authorids": "", + "aff": "Intelligent Robotics Lab, School of Computer Science, University of Birmingham, UK; Intelligent Robotics Lab, School of Computer Science, University of Birmingham, UK", + "bibtex": "@article{Dodampegama_Sridharan_2023, title={Back to the Future: Toward a Hybrid Architecture for Ad Hoc Teamwork}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25070}, DOI={10.1609/aaai.v37i1.25070}, abstractNote={State of the art methods for ad hoc teamwork, i.e., for collaboration without prior coordination, often use a long history of prior observations to model the behavior of other agents (or agent types) and to determine the ad hoc agent\u2019s behavior. In many practical domains, it is difficult to obtain large training datasets, and necessary to quickly revise the existing models to account for changes in team composition or domain attributes. Our architecture builds on the principles of step-wise refinement and ecological rationality to enable an ad hoc agent to perform non-monotonic logical reasoning with prior commonsense domain knowledge and models learned rapidly from limited examples to predict the behavior of other agents. In the simulated multiagent collaboration domain Fort Attack, we experimentally demonstrate that our architecture enables an ad hoc agent to adapt to changes in the behavior of other agents, and provides enhanced transparency and better performance than a state of the art data-driven baseline.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dodampegama, Hasra and Sridharan, Mohan}, year={2023}, month={Jun.}, pages={3-10} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25070/24842", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25070", + "pdf_size": 397114, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13184168884471886427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "student.bham.ac.uk;bham.ac.uk", + "email": "student.bham.ac.uk;bham.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Birmingham", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.birmingham.ac.uk", + "aff_unique_abbr": "UoB", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Birmingham", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-27029", + "title": "Backforward Propagation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this paper we introduce Backforward Propagation, a method of completely eliminating Internal Covariate Shift (ICS). Unlike previous methods, which only indirectly reduce the impact of ICS while introducing other biases, we are able to have a surgical view at the effects ICS has on training neural networks. Our experiments show that ICS has a weight regularizing effect on models, and completely removing it enables for faster convergence of the neural network.", + "primary_area": "", + "author": "George Stoica; Cristian Simionescu", + "authorids": "", + "aff": "\u201dAlexandru Ioan Cuza\u201d University; \u201dAlexandru Ioan Cuza\u201d University", + "bibtex": "@article{Stoica_Simionescu_2024, title={Backforward Propagation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27029}, DOI={10.1609/aaai.v37i13.27029}, abstractNote={In this paper we introduce Backforward Propagation, a method of completely eliminating Internal Covariate Shift (ICS). Unlike previous methods, which only indirectly reduce the impact of ICS while introducing other biases, we are able to have a surgical view at the effects ICS has on training neural networks. Our experiments show that ICS has a weight regularizing effect on models, and completely removing it enables for faster convergence of the neural network.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Stoica, George and Simionescu, Cristian}, year={2024}, month={Jul.}, pages={16338-16339} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27029/26801", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27029", + "pdf_size": 143631, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:uuzgHpVxsIgJ:scholar.google.com/&scioq=Backforward+Propagation+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "gmail.com;nexusmedia.ro", + "email": "gmail.com;nexusmedia.ro", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Alexandru Ioan Cuza University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uaic.ro", + "aff_unique_abbr": "UAIC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Romania" + }, + { + "id": "article-25958", + "title": "Background-Mixed Augmentation for Weakly Supervised Change Detection", + "track": "main", + "status": "Technical", + "abstract": "Change detection (CD) is to decouple object changes (i.e., object missing or appearing) from background changes (i.e., environment variations) like light and season variations in two images captured in the same scene over a long time span, presenting critical applications in disaster management, urban development, etc. In particular, the endless patterns of background changes require detectors to have a high generalization against unseen environment variations, making this task significantly challenging. Recent deep learning-based methods develop novel network architectures or optimization strategies with paired-training examples, which do not handle the generalization issue explicitly and require huge manual pixel-level annotation efforts. In this work, for the first attempt in the CD community, we study the generalization issue of CD from the perspective of data augmentation and develop a novel weakly supervised training algorithm that only needs image-level labels. Different from general augmentation techniques for classification, we propose the background-mixed augmentation that is specifically designed for change detection by augmenting examples under the guidance of a set of background changing images and letting deep CD models see diverse environment variations. Moreover, we propose the augmented & real data consistency loss that encourages the generalization increase significantly. Our method as a general framework can enhance a wide range of existing deep learning-based detectors. We conduct extensive experiments in two public datasets and enhance four state-of-the-art methods, demonstrating the advantages of our method. We release the code at https://github.com/tsingqguo/bgmix.", + "primary_area": "machine learning ii", + "author": "Rui Huang; Ruofei Wang; Qing Guo; Jieda Wei; Yuxiang Zhang; Wei Fan; Yang Liu", + "authorids": "", + "aff": "School of Computer Science and Technology, Civil Aviation University of China, China; School of Computer Science and Technology, Civil Aviation University of China, China; Center for Frontier AI Research (CFAR), A*STAR, Singapore; School of Computer Science and Technology, Civil Aviation University of China, China; School of Computer Science and Technology, Civil Aviation University of China, China; School of Computer Science and Technology, Civil Aviation University of China, China; Zhejiang Sci-Tech University, China+ Nanyang Technological University, Singapore", + "bibtex": "@article{Huang_Wang_Guo_Wei_Zhang_Fan_Liu_2023, title={Background-Mixed Augmentation for Weakly Supervised Change Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25958}, DOI={10.1609/aaai.v37i7.25958}, abstractNote={Change detection (CD) is to decouple object changes (i.e., object missing or appearing) from background changes (i.e., environment variations) like light and season variations in two images captured in the same scene over a long time span, presenting critical applications in disaster management, urban development, etc. In particular, the endless patterns of background changes require detectors to have a high generalization against unseen environment variations, making this task significantly challenging. Recent deep learning-based methods develop novel network architectures or optimization strategies with paired-training examples, which do not handle the generalization issue explicitly and require huge manual pixel-level annotation efforts. In this work, for the first attempt in the CD community, we study the generalization issue of CD from the perspective of data augmentation and develop a novel weakly supervised training algorithm that only needs image-level labels. Different from general augmentation techniques for classification, we propose the background-mixed augmentation that is specifically designed for change detection by augmenting examples under the guidance of a set of background changing images and letting deep CD models see diverse environment variations. Moreover, we propose the augmented & real data consistency loss that encourages the generalization increase significantly. Our method as a general framework can enhance a wide range of existing deep learning-based detectors. We conduct extensive experiments in two public datasets and enhance four state-of-the-art methods, demonstrating the advantages of our method. We release the code at https://github.com/tsingqguo/bgmix.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Rui and Wang, Ruofei and Guo, Qing and Wei, Jieda and Zhang, Yuxiang and Fan, Wei and Liu, Yang}, year={2023}, month={Jun.}, pages={7919-7927} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25958/25730", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25958", + "pdf_size": 2574520, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10562415242261358226&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "cauc.edu.cn;yeah.net;ieee.org;yeah.net;cauc.edu.cn;cauc.edu.cn;ntu.edu.sg", + "email": "cauc.edu.cn;yeah.net;ieee.org;yeah.net;cauc.edu.cn;cauc.edu.cn;ntu.edu.sg", + "github": "https://github.com/tsingqguo/bgmix", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;2+3", + "aff_unique_norm": "Civil Aviation University of China;A*STAR;Zhejiang Sci-Tech University;Nanyang Technological University", + "aff_unique_dep": "School of Computer Science and Technology;Center for Frontier AI Research (CFAR);;", + "aff_unique_url": ";https://www.a-star.edu.sg;http://www.zstu.edu.cn;https://www.ntu.edu.sg", + "aff_unique_abbr": ";A*STAR;;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0;0+1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26118", + "title": "Backpropagation-Free Deep Learning with Recursive Local Representation Alignment", + "track": "main", + "status": "Technical", + "abstract": "Training deep neural networks on large-scale datasets requires significant hardware resources whose costs (even on cloud platforms) put them out of reach of smaller organizations, groups, and individuals. Backpropagation (backprop), the workhorse for training these networks, is an inherently sequential process that is difficult to parallelize. Furthermore, researchers must continually develop various specialized techniques, such as particular weight initializations and enhanced activation functions, to ensure stable parameter optimization. Our goal is to seek an effective, neuro-biologically plausible alternative to backprop that can be used to train deep networks. In this paper, we propose a backprop-free procedure, recursive local representation alignment, for training large-scale architectures. Experiments with residual networks on CIFAR-10 and the large benchmark, ImageNet, show that our algorithm generalizes as well as backprop while converging sooner due to weight updates that are parallelizable and computationally less demanding. This is empirical evidence that a backprop-free algorithm can scale up to larger datasets.", + "primary_area": "machine learning iii", + "author": "Alexander G. Ororbia; Ankur Mali; Daniel Kifer; C. Lee Giles", + "authorids": "", + "aff": "Rochester Institute of Technology; University of South Florida; The Pennsylvania State University; The Pennsylvania State University", + "bibtex": "@article{Ororbia_Mali_Kifer_Giles_2023, title={Backpropagation-Free Deep Learning with Recursive Local Representation Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26118}, DOI={10.1609/aaai.v37i8.26118}, abstractNote={Training deep neural networks on large-scale datasets requires significant hardware resources whose costs (even on cloud platforms) put them out of reach of smaller organizations, groups, and individuals. Backpropagation (backprop), the workhorse for training these networks, is an inherently sequential process that is difficult to parallelize. Furthermore, researchers must continually develop various specialized techniques, such as particular weight initializations and enhanced activation functions, to ensure stable parameter optimization. Our goal is to seek an effective, neuro-biologically plausible alternative to backprop that can be used to train deep networks. In this paper, we propose a backprop-free procedure, recursive local representation alignment, for training large-scale architectures. Experiments with residual networks on CIFAR-10 and the large benchmark, ImageNet, show that our algorithm generalizes as well as backprop while converging sooner due to weight updates that are parallelizable and computationally less demanding. This is empirical evidence that a backprop-free algorithm can scale up to larger datasets.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ororbia, Alexander G. and Mali, Ankur and Kifer, Daniel and Giles, C. Lee}, year={2023}, month={Jun.}, pages={9327-9335} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26118/25890", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26118", + "pdf_size": 569780, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4185703538134171799&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.rit.edu;usf.edu;psu.edu;psu.edu", + "email": "cs.rit.edu;usf.edu;psu.edu;psu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2", + "aff_unique_norm": "Rochester Institute of Technology;University of South Florida;The Pennsylvania State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.rit.edu;https://www.usf.edu;https://www.psu.edu", + "aff_unique_abbr": "RIT;USF;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26126", + "title": "Balanced Column-Wise Block Pruning for Maximizing GPU Parallelism", + "track": "main", + "status": "Technical", + "abstract": "Pruning has been an effective solution to reduce the number of computations and the memory requirement in deep learning.\nThe pruning unit plays an important role in exploiting the GPU resources efficiently. \nThe filter is proposed as a simple pruning unit of structured pruning.\nHowever, since the filter is quite large as pruning unit, the accuracy drop is considerable with a high pruning ratio.\nGPU rearranges the weight and input tensors into tiles (blocks) for efficient computation. \nTo fully utilize GPU resources, this tile structure should be considered, which is the goal of block pruning. \nHowever, previous block pruning prunes both row vectors and column vectors. \nPruning of row vectors in a tile corresponds to filter pruning, and it also interferes with column-wise block pruning of the following layer.\nIn contrast, column vectors are much smaller than row vectors and can achieve lower accuracy drop.\nAdditionally, if the pruning ratio for each tile is different,\nGPU utilization can be limited by imbalanced workloads by irregular-sized blocks.\nThe same pruning ratio for the weight tiles processed in parallel enables the actual inference process to fully utilize the resources without idle time.\nThis paper proposes balanced column-wise block pruning, named BCBP, to satisfy two conditions: the column-wise minimal size of the pruning unit and balanced workloads. \nWe demonstrate that BCBP is superior to previous pruning methods through comprehensive experiments.", + "primary_area": "machine learning iii", + "author": "Cheonjun Park; Mincheol Park; Hyun Jae Oh; Minkyu Kim; Myung Kuk Yoon; Suhyun Kim; Won Woo Ro", + "authorids": "", + "aff": "Yonsei University; Yonsei University+Korea Institute of Science and Technology; Samsung Electronics; Yonsei University; Ewha Womans University; Korea Institute of Science and Technology; Yonsei University", + "bibtex": "@article{Park_Park_Oh_Kim_Yoon_Kim_Ro_2023, title={Balanced Column-Wise Block Pruning for Maximizing GPU Parallelism}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26126}, DOI={10.1609/aaai.v37i8.26126}, abstractNote={Pruning has been an effective solution to reduce the number of computations and the memory requirement in deep learning.\nThe pruning unit plays an important role in exploiting the GPU resources efficiently. The filter is proposed as a simple pruning unit of structured pruning.\nHowever, since the filter is quite large as pruning unit, the accuracy drop is considerable with a high pruning ratio.\nGPU rearranges the weight and input tensors into tiles (blocks) for efficient computation. To fully utilize GPU resources, this tile structure should be considered, which is the goal of block pruning. However, previous block pruning prunes both row vectors and column vectors. Pruning of row vectors in a tile corresponds to filter pruning, and it also interferes with column-wise block pruning of the following layer.\nIn contrast, column vectors are much smaller than row vectors and can achieve lower accuracy drop.\nAdditionally, if the pruning ratio for each tile is different,\nGPU utilization can be limited by imbalanced workloads by irregular-sized blocks.\nThe same pruning ratio for the weight tiles processed in parallel enables the actual inference process to fully utilize the resources without idle time.\nThis paper proposes balanced column-wise block pruning, named BCBP, to satisfy two conditions: the column-wise minimal size of the pruning unit and balanced workloads. We demonstrate that BCBP is superior to previous pruning methods through comprehensive experiments.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Park, Cheonjun and Park, Mincheol and Oh, Hyun Jae and Kim, Minkyu and Yoon, Myung Kuk and Kim, Suhyun and Ro, Won Woo}, year={2023}, month={Jun.}, pages={9398-9407} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26126/25898", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26126", + "pdf_size": 4921202, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16556808108309452440&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;samsung.com;yonsei.ac.kr;ewha.ac.kr;gmail.com;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;samsung.com;yonsei.ac.kr;ewha.ac.kr;gmail.com;yonsei.ac.kr", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0+1;2;0;3;1;0", + "aff_unique_norm": "Yonsei University;Korea Institute of Science and Technology;Samsung Electronics;Ewha Womans University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.yonsei.ac.kr;https://www.kist.re.kr;https://www.samsung.com;http://www.ewha.ac.kr", + "aff_unique_abbr": "Yonsei;KIST;Samsung;Ewha", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26621", + "title": "Balanced Meta Learning and Diverse Sampling for Lifelong Task-Oriented Dialogue Systems", + "track": "main", + "status": "Technical", + "abstract": "In real-world scenarios, it is crucial to build a lifelong taskoriented dialogue system (TDS) that continually adapts to new knowledge without forgetting previously acquired experiences. Existing approaches mainly focus on mitigating the catastrophic forgetting in lifelong TDS. However, the transfer ability to generalize the accumulated old knowledge to new tasks is underexplored. In this paper, we propose a two-stage lifelong task-oriented dialogue generation method to mitigate catastrophic forgetting and encourage knowledge transfer simultaneously, inspired by the learning process. In the first stage, we learn task-specific masks which adaptively preserve the knowledge of each visited task so as to mitigate catastrophic forgetting. In this stage, we are expected to learn the task-specific knowledge which is tailored for each task. In the second stage, we bring the knowledge from the encountered tasks together and understand thoroughly. To this end, we devise a balanced meta learning strategy for both forward and backward knowledge transfer in the lifelong learning process. In particular, we perform meta-update with a meta-test set sampled from the current training data for forward knowledge transfer. In addition, we employ an uncertainty-based sampling strategy to select and store representative dialogue samples into episodic memory and perform meta-update with a meta-test set sampled from the memory for backward knowledge transfer. With extensive experiments on 29 tasks, we show that MetaLTDS outperforms the strong baselines in terms of both effectiveness and efficiency. For reproducibility, we submit our code at: https: //github.com/travis-xu/MetaLTDS.", + "primary_area": "speech natural language processing", + "author": "Qiancheng Xu; Min Yang; Ruifeng Xu", + "authorids": "", + "aff": "Georgia Institute of Technology; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences; Harbin Institute of Technology (Shenzhen)", + "bibtex": "@article{Xu_Yang_Xu_2023, title={Balanced Meta Learning and Diverse Sampling for Lifelong Task-Oriented Dialogue Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26621}, DOI={10.1609/aaai.v37i11.26621}, abstractNote={In real-world scenarios, it is crucial to build a lifelong taskoriented dialogue system (TDS) that continually adapts to new knowledge without forgetting previously acquired experiences. Existing approaches mainly focus on mitigating the catastrophic forgetting in lifelong TDS. However, the transfer ability to generalize the accumulated old knowledge to new tasks is underexplored. In this paper, we propose a two-stage lifelong task-oriented dialogue generation method to mitigate catastrophic forgetting and encourage knowledge transfer simultaneously, inspired by the learning process. In the first stage, we learn task-specific masks which adaptively preserve the knowledge of each visited task so as to mitigate catastrophic forgetting. In this stage, we are expected to learn the task-specific knowledge which is tailored for each task. In the second stage, we bring the knowledge from the encountered tasks together and understand thoroughly. To this end, we devise a balanced meta learning strategy for both forward and backward knowledge transfer in the lifelong learning process. In particular, we perform meta-update with a meta-test set sampled from the current training data for forward knowledge transfer. In addition, we employ an uncertainty-based sampling strategy to select and store representative dialogue samples into episodic memory and perform meta-update with a meta-test set sampled from the memory for backward knowledge transfer. With extensive experiments on 29 tasks, we show that MetaLTDS outperforms the strong baselines in terms of both effectiveness and efficiency. For reproducibility, we submit our code at: https: //github.com/travis-xu/MetaLTDS.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Qiancheng and Yang, Min and Xu, Ruifeng}, year={2023}, month={Jun.}, pages={13843-13852} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26621/26393", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26621", + "pdf_size": 884050, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14059155929113114659&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gatech.edu;siat.ac.cn;hit.edu.cn", + "email": "gatech.edu;siat.ac.cn;hit.edu.cn", + "github": "https://github.com/travis-xu/MetaLTDS", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Georgia Institute of Technology;Chinese Academy of Sciences;Harbin Institute of Technology", + "aff_unique_dep": ";Shenzhen Institutes of Advanced Technology;", + "aff_unique_url": "https://www.gatech.edu;http://www.siat.cas.cn;http://en.hhit.edu.cn/", + "aff_unique_abbr": "Georgia Tech;SIAT;HIT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-25724", + "title": "Ballot Length in Instant Runoff Voting", + "track": "main", + "status": "Technical", + "abstract": "Instant runoff voting (IRV) is an increasingly-popular alternative to traditional plurality voting in which voters submit rankings over the candidates rather than single votes. In practice, elections using IRV often restrict the ballot length, the number of candidates a voter is allowed to rank on their ballot. We theoretically and empirically analyze how ballot length can influence the outcome of an election, given fixed voter preferences. We show that there exist preference profiles over k candidates such that up to k-1 different candidates win at different ballot lengths. We derive exact lower bounds on the number of voters required for such profiles and provide a construction matching the lower bound for unrestricted voter preferences. Additionally, we characterize which sequences of winners are possible over ballot lengths and provide explicit profile constructions achieving any feasible winner sequence. We also examine how classic preference restrictions influence our results\u2014for instance, single-peakedness makes k-1 different winners impossible but still allows at least \u03a9(\u221ak). Finally, we analyze a collection of 168 real-world elections, where we truncate rankings to simulate shorter ballots. We find that shorter ballots could have changed the outcome in one quarter of these elections. Our results highlight ballot length as a consequential degree of freedom in the design of IRV elections.", + "primary_area": "game theory and economic paradigms", + "author": "Kiran Tomlinson; Johan Ugander; Jon Kleinberg", + "authorids": "", + "aff": "Cornell University; Stanford University; Cornell University", + "bibtex": "@article{Tomlinson_Ugander_Kleinberg_2023, title={Ballot Length in Instant Runoff Voting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25724}, DOI={10.1609/aaai.v37i5.25724}, abstractNote={Instant runoff voting (IRV) is an increasingly-popular alternative to traditional plurality voting in which voters submit rankings over the candidates rather than single votes. In practice, elections using IRV often restrict the ballot length, the number of candidates a voter is allowed to rank on their ballot. We theoretically and empirically analyze how ballot length can influence the outcome of an election, given fixed voter preferences. We show that there exist preference profiles over k candidates such that up to k-1 different candidates win at different ballot lengths. We derive exact lower bounds on the number of voters required for such profiles and provide a construction matching the lower bound for unrestricted voter preferences. Additionally, we characterize which sequences of winners are possible over ballot lengths and provide explicit profile constructions achieving any feasible winner sequence. We also examine how classic preference restrictions influence our results\u2014for instance, single-peakedness makes k-1 different winners impossible but still allows at least \u03a9(\u221ak). Finally, we analyze a collection of 168 real-world elections, where we truncate rankings to simulate shorter ballots. We find that shorter ballots could have changed the outcome in one quarter of these elections. Our results highlight ballot length as a consequential degree of freedom in the design of IRV elections.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tomlinson, Kiran and Ugander, Johan and Kleinberg, Jon}, year={2023}, month={Jun.}, pages={5841-5849} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25724/25496", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25724", + "pdf_size": 186676, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14730561634225077349&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "cs.cornell.edu;stanford.edu;cornell.edu", + "email": "cs.cornell.edu;stanford.edu;cornell.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Cornell University;Stanford University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cornell.edu;https://www.stanford.edu", + "aff_unique_abbr": "Cornell;Stanford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26355", + "title": "Bayesian Cross-Modal Alignment Learning for Few-Shot Out-of-Distribution Generalization", + "track": "main", + "status": "Technical", + "abstract": "Recent advances in large pre-trained models showed promising results in few-shot learning. However, their generalization ability on two-dimensional Out-of-Distribution (OoD) data, i.e., correlation shift and diversity shift, has not been thoroughly investigated. Researches have shown that even with a significant amount of training data, few methods can achieve better performance than the standard empirical risk minimization method (ERM) in OoD generalization. This few-shot OoD generalization dilemma emerges as a challenging direction in deep neural network generalization research, where the performance suffers from overfitting on few-shot examples and OoD generalization errors. In this paper, leveraging a broader supervision source, we explore a novel Bayesian cross-modal image-text alignment learning method (Bayes-CAL) to address this issue. Specifically, the model is designed as only text representations are fine-tuned via a Bayesian modelling approach with gradient orthogonalization loss and invariant risk minimization (IRM) loss. The Bayesian approach is essentially introduced to avoid overfitting the base classes observed during training and improve generalization to broader unseen classes. The dedicated loss is introduced to achieve better image-text alignment by disentangling the causal and non-casual parts of image features. Numerical experiments demonstrate that Bayes-CAL achieved state-of-the-art OoD generalization performances on two-dimensional distribution shifts. Moreover, compared with CLIP-like models, Bayes-CAL yields more stable generalization performances on unseen classes. Our code is available at https://github.com/LinLLLL/BayesCAL.", + "primary_area": "machine learning iv", + "author": "Lin Zhu; Xinbing Wang; Chenghu Zhou; Nanyang Ye", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Zhu_Wang_Zhou_Ye_2023, title={Bayesian Cross-Modal Alignment Learning for Few-Shot Out-of-Distribution Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26355}, DOI={10.1609/aaai.v37i9.26355}, abstractNote={Recent advances in large pre-trained models showed promising results in few-shot learning. However, their generalization ability on two-dimensional Out-of-Distribution (OoD) data, i.e., correlation shift and diversity shift, has not been thoroughly investigated. Researches have shown that even with a significant amount of training data, few methods can achieve better performance than the standard empirical risk minimization method (ERM) in OoD generalization. This few-shot OoD generalization dilemma emerges as a challenging direction in deep neural network generalization research, where the performance suffers from overfitting on few-shot examples and OoD generalization errors. In this paper, leveraging a broader supervision source, we explore a novel Bayesian cross-modal image-text alignment learning method (Bayes-CAL) to address this issue. Specifically, the model is designed as only text representations are fine-tuned via a Bayesian modelling approach with gradient orthogonalization loss and invariant risk minimization (IRM) loss. The Bayesian approach is essentially introduced to avoid overfitting the base classes observed during training and improve generalization to broader unseen classes. The dedicated loss is introduced to achieve better image-text alignment by disentangling the causal and non-casual parts of image features. Numerical experiments demonstrate that Bayes-CAL achieved state-of-the-art OoD generalization performances on two-dimensional distribution shifts. Moreover, compared with CLIP-like models, Bayes-CAL yields more stable generalization performances on unseen classes. Our code is available at https://github.com/LinLLLL/BayesCAL.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Lin and Wang, Xinbing and Zhou, Chenghu and Ye, Nanyang}, year={2023}, month={Jun.}, pages={11461-11469} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26355/26127", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26355", + "pdf_size": 1280296, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16584164003240146782&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;gmail.com;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;gmail.com;sjtu.edu.cn", + "github": "https://github.com/LinLLLL/BayesCAL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26245", + "title": "Bayesian Federated Neural Matching That Completes Full Information", + "track": "main", + "status": "Technical", + "abstract": "Federated learning is a contemporary machine learning paradigm where locally trained models are distilled into a global model. Due to the intrinsic permutation invariance of neural networks, Probabilistic Federated Neural Matching (PFNM) employs a Bayesian nonparametric framework in the generation process of local neurons, and then creates a linear sum assignment formulation in each alternative optimization iteration. But according to our theoretical analysis, the optimization iteration in PFNM omits global information from existing. In this study, we propose a novel approach that overcomes this flaw by introducing a Kullback-Leibler divergence penalty at each iteration.\nThe effectiveness of our approach is demonstrated by experiments on both image classification and semantic segmentation tasks.", + "primary_area": "machine learning iv", + "author": "Peng Xiao; Samuel Cheng", + "authorids": "", + "aff": "Department of Computer Science and Technology, Tongji University; School of Electrical and Computer Engineering, University of Oklahoma", + "bibtex": "@article{Xiao_Cheng_2023, title={Bayesian Federated Neural Matching That Completes Full Information}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26245}, DOI={10.1609/aaai.v37i9.26245}, abstractNote={Federated learning is a contemporary machine learning paradigm where locally trained models are distilled into a global model. Due to the intrinsic permutation invariance of neural networks, Probabilistic Federated Neural Matching (PFNM) employs a Bayesian nonparametric framework in the generation process of local neurons, and then creates a linear sum assignment formulation in each alternative optimization iteration. But according to our theoretical analysis, the optimization iteration in PFNM omits global information from existing. In this study, we propose a novel approach that overcomes this flaw by introducing a Kullback-Leibler divergence penalty at each iteration.\nThe effectiveness of our approach is demonstrated by experiments on both image classification and semantic segmentation tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Peng and Cheng, Samuel}, year={2023}, month={Jun.}, pages={10473-10480} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26245/26017", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26245", + "pdf_size": 953887, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2840257315802816&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com; ", + "email": "gmail.com; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Tongji University;University of Oklahoma", + "aff_unique_dep": "Department of Computer Science and Technology;School of Electrical and Computer Engineering", + "aff_unique_url": "https://www.tongji.edu.cn;https://www.ou.edu", + "aff_unique_abbr": "Tongji;OU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-27022", + "title": "Bayesian Models for Targeted Cyber Deception Strategies (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We propose a model-driven decision support system (DSS) based on a Bayesian belief network (BBN) to support cyber deception based on a detailed model of attacker beliefs. We discuss this approach using a case study based on passively observed operating system (OS) fingerprinting data. In passive reconnaissance attackers can remain undetected while collecting information to identify systems and plan attacks. Our DSS is intended to support preventative measures to protect the network from successful reconnaissance, such as by modifying features using deception. We validate the prediction accuracy of the model in comparison with a sequential artificial neural network (ANN). We then introduce a deceptive algorithm to select a minimal set of features for OS obfuscation. We show the effectiveness of feature-modification strategies based on our methods using passively collected data to decide what features from a real operating system (OS) to modify to appear as a fake [different] OS.", + "primary_area": "", + "author": "Nazia Sharmin", + "authorids": "", + "aff": "University of Texas, El Paso", + "bibtex": "@article{Sharmin_2024, title={Bayesian Models for Targeted Cyber Deception Strategies (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27022}, DOI={10.1609/aaai.v37i13.27022}, abstractNote={We propose a model-driven decision support system (DSS) based on a Bayesian belief network (BBN) to support cyber deception based on a detailed model of attacker beliefs. We discuss this approach using a case study based on passively observed operating system (OS) fingerprinting data. In passive reconnaissance attackers can remain undetected while collecting information to identify systems and plan attacks. Our DSS is intended to support preventative measures to protect the network from successful reconnaissance, such as by modifying features using deception. We validate the prediction accuracy of the model in comparison with a sequential artificial neural network (ANN). We then introduce a deceptive algorithm to select a minimal set of features for OS obfuscation. We show the effectiveness of feature-modification strategies based on our methods using passively collected data to decide what features from a real operating system (OS) to modify to appear as a fake [different] OS.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharmin, Nazia}, year={2024}, month={Jul.}, pages={16324-16325} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27022/26794", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27022", + "pdf_size": 162929, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8782617104619722051&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "miners.utep.edu", + "email": "miners.utep.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Texas at El Paso", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utep.edu", + "aff_unique_abbr": "UTEP", + "aff_campus_unique_index": "0", + "aff_campus_unique": "El Paso", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25726", + "title": "Bayesian Optimization-Based Combinatorial Assignment", + "track": "main", + "status": "Technical", + "abstract": "We study the combinatorial assignment domain, which includes combinatorial auctions and course allocation. The main challenge in this domain is that the bundle space grows exponentially in the number of items. To address this, several papers have recently proposed machine learning-based preference elicitation algorithms that aim to elicit only the most important information from agents. However, the main shortcoming of this prior work is that it does not model a mechanism's uncertainty over values for not yet elicited bundles. In this paper, we address this shortcoming by presenting a Bayesian optimization-based combinatorial assignment (BOCA) mechanism. Our key technical contribution is to integrate a method for capturing model uncertainty into an iterative combinatorial auction mechanism. Concretely, we design a new method for estimating an upper uncertainty bound that can be used to define an acquisition function to determine the next query to the agents. This enables the mechanism to properly explore (and not just exploit) the bundle space during its preference elicitation phase. We run computational experiments in several spectrum auction domains to evaluate BOCA's performance. Our results show that BOCA achieves higher allocative efficiency than state-of-the-art approaches.", + "primary_area": "game theory and economic paradigms", + "author": "Jakob Weissteiner; Jakob Heiss; Julien Siems; Sven Seuken", + "authorids": "", + "aff": "University of Zurich + ETH AI Center; ETH Zurich + ETH AI Center; University of Zurich; University of Zurich + ETH AI Center", + "bibtex": "@article{Weissteiner_Heiss_Siems_Seuken_2023, title={Bayesian Optimization-Based Combinatorial Assignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25726}, DOI={10.1609/aaai.v37i5.25726}, abstractNote={We study the combinatorial assignment domain, which includes combinatorial auctions and course allocation. The main challenge in this domain is that the bundle space grows exponentially in the number of items. To address this, several papers have recently proposed machine learning-based preference elicitation algorithms that aim to elicit only the most important information from agents. However, the main shortcoming of this prior work is that it does not model a mechanism\u2019s uncertainty over values for not yet elicited bundles. In this paper, we address this shortcoming by presenting a Bayesian optimization-based combinatorial assignment (BOCA) mechanism. Our key technical contribution is to integrate a method for capturing model uncertainty into an iterative combinatorial auction mechanism. Concretely, we design a new method for estimating an upper uncertainty bound that can be used to define an acquisition function to determine the next query to the agents. This enables the mechanism to properly explore (and not just exploit) the bundle space during its preference elicitation phase. We run computational experiments in several spectrum auction domains to evaluate BOCA\u2019s performance. Our results show that BOCA achieves higher allocative efficiency than state-of-the-art approaches.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Weissteiner, Jakob and Heiss, Jakob and Siems, Julien and Seuken, Sven}, year={2023}, month={Jun.}, pages={5858-5866} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25726/25498", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25726", + "pdf_size": 217653, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7528775900918037386&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "ifi.uzh.ch;math.ethz.ch;gmail.com;ifi.uzh.ch", + "email": "ifi.uzh.ch;math.ethz.ch;gmail.com;ifi.uzh.ch", + "github": "", + "project": "https://arxiv.org/abs/2208.14698", + "author_num": 4, + "aff_unique_index": "0+1;1+1;0;0+1", + "aff_unique_norm": "University of Zurich;ETH Zurich", + "aff_unique_dep": ";AI Center", + "aff_unique_url": "https://www.unizh.ch;https://www.ethz.ch", + "aff_unique_abbr": "UZH;ETH", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Zurich", + "aff_country_unique_index": "0+0;0+0;0;0+0", + "aff_country_unique": "Switzerland" + }, + { + "id": "article-25751", + "title": "Beam Search Optimized Batch Bayesian Active Learning", + "track": "main", + "status": "Technical", + "abstract": "Active Learning is an essential method for label-efficient deep learning. As a Bayesian active learning method, Bayesian Active Learning by Disagreement (BALD) successfully selects the most representative samples by maximizing the mutual information between the model prediction and model parameters. However, when applied to a batch acquisition mode, like batch construction with greedy search, BALD suffers from poor performance, especially with noises of near-duplicate data. To address this shortcoming, we propose a diverse beam search optimized batch active learning method, which explores a graph for every batch construction by expanding the highest-scored samples of a predetermined number. To avoid near duplicate beam branches (very similar beams generated from the same root and similar samples), which is undesirable for lacking diverse representations in the feature space, we design a self-adapted constraint within candidate beams. The proposed method is able to acquire data that can better represent the distribution of the unlabeled pool, and at the same time, be significantly different from existing beams. We observe that the proposed method achieves higher batch performance than the baseline methods on three benchmark datasets.", + "primary_area": "humans and ai", + "author": "Jingyu Sun; Hongjie Zhai; Osamu Saisho; Susumu Takeuchi", + "authorids": "", + "aff": "NTT Computer and Data Science Laboratories; NTT Software Innovation Center; NTT Social Informatics Laboratories; NTT Computer and Data Science Laboratories", + "bibtex": "@article{Sun_Zhai_Saisho_Takeuchi_2023, title={Beam Search Optimized Batch Bayesian Active Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25751}, DOI={10.1609/aaai.v37i5.25751}, abstractNote={Active Learning is an essential method for label-efficient deep learning. As a Bayesian active learning method, Bayesian Active Learning by Disagreement (BALD) successfully selects the most representative samples by maximizing the mutual information between the model prediction and model parameters. However, when applied to a batch acquisition mode, like batch construction with greedy search, BALD suffers from poor performance, especially with noises of near-duplicate data. To address this shortcoming, we propose a diverse beam search optimized batch active learning method, which explores a graph for every batch construction by expanding the highest-scored samples of a predetermined number. To avoid near duplicate beam branches (very similar beams generated from the same root and similar samples), which is undesirable for lacking diverse representations in the feature space, we design a self-adapted constraint within candidate beams. The proposed method is able to acquire data that can better represent the distribution of the unlabeled pool, and at the same time, be significantly different from existing beams. We observe that the proposed method achieves higher batch performance than the baseline methods on three benchmark datasets.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Jingyu and Zhai, Hongjie and Saisho, Osamu and Takeuchi, Susumu}, year={2023}, month={Jun.}, pages={6084-6091} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25751/25523", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25751", + "pdf_size": 1220429, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5237214195839492130&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "email": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "NTT Data Science Laboratories;NTT Software Innovation Center;NTT Data", + "aff_unique_dep": "Computer and Data Science;;Social Informatics Laboratories", + "aff_unique_url": "https://www.ntt.co.jp;https://www.ntt-sic.com/;https://www.nttdata.com", + "aff_unique_abbr": "NTT;;NTT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26326", + "title": "Behavior Estimation from Multi-Source Data for Offline Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Offline reinforcement learning (RL) have received rising interest due to its appealing data efficiency. The present study addresses behavior estimation, a task that aims at estimating the data-generating policy. In particular, this work considers a scenario where data are collected from multiple sources. Neglecting data heterogeneity, existing approaches cannot provide good estimates and impede policy learning. To overcome this drawback, the present study proposes a latent variable model and a model-learning algorithm to infer a set of policies from data, which allows an agent to use as behavior policy the policy that best describes a particular trajectory. To illustrate the benefit of such a fine-grained characterization for multi-source data, this work showcases how the proposed model can be incorporated into an existing offline RL algorithm. Lastly, with extensive empirical evaluation this work confirms the risks of neglecting data heterogeneity and the efficacy of the proposed model.", + "primary_area": "machine learning iv", + "author": "Guoxi Zhang; Hisashi Kashima", + "authorids": "", + "aff": "Graduate School of Informatics, Kyoto University; RIKEN Guardian Robot Project", + "bibtex": "@article{Zhang_Kashima_2023, title={Behavior Estimation from Multi-Source Data for Offline Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26326}, DOI={10.1609/aaai.v37i9.26326}, abstractNote={Offline reinforcement learning (RL) have received rising interest due to its appealing data efficiency. The present study addresses behavior estimation, a task that aims at estimating the data-generating policy. In particular, this work considers a scenario where data are collected from multiple sources. Neglecting data heterogeneity, existing approaches cannot provide good estimates and impede policy learning. To overcome this drawback, the present study proposes a latent variable model and a model-learning algorithm to infer a set of policies from data, which allows an agent to use as behavior policy the policy that best describes a particular trajectory. To illustrate the benefit of such a fine-grained characterization for multi-source data, this work showcases how the proposed model can be incorporated into an existing offline RL algorithm. Lastly, with extensive empirical evaluation this work confirms the risks of neglecting data heterogeneity and the efficacy of the proposed model.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Guoxi and Kashima, Hisashi}, year={2023}, month={Jun.}, pages={11201-11209} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26326/26098", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26326", + "pdf_size": 4468612, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16055912369391193700&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ml.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp", + "email": "ml.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Kyoto University;RIKEN", + "aff_unique_dep": "Graduate School of Informatics;Guardian Robot Project", + "aff_unique_url": "https://www.kyoto-u.ac.jp;https://www.riken.jp", + "aff_unique_abbr": "Kyoto U;RIKEN", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Kyoto;", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26115", + "title": "Behavioral Learning in Security Games: Threat of Multi-Step Manipulative Attacks", + "track": "main", + "status": "Technical", + "abstract": "This paper studies the problem of multi-step manipulative attacks in Stackelberg security games, in which a clever attacker attempts to orchestrate its attacks over multiple time steps to mislead the defender's learning of the attacker's behavior. This attack manipulation eventually influences the defender's patrol strategy towards the attacker's benefit. Previous work along this line of research only focuses on one-shot games in which the defender learns the attacker's behavior and then designs a corresponding strategy only once. Our work, on the other hand, investigates the long-term impact of the attacker's manipulation in which current attack and defense choices of players determine the future learning and patrol planning of the defender. This paper has three key contributions. First, we introduce a new multi-step manipulative attack game model that captures the impact of sequential manipulative attacks carried out by the attacker over the entire time horizon. Second, we propose a new algorithm to compute an optimal manipulative attack plan for the attacker, which tackles the challenge of multiple connected optimization components involved in the computation across multiple time steps. Finally, we present extensive experimental results on the impact of such misleading attacks, showing a significant benefit for the attacker and loss for the defender.", + "primary_area": "machine learning iii", + "author": "Thanh H. Nguyen; Arunesh Sinha", + "authorids": "", + "aff": "University of Oregon; Rutgers University", + "bibtex": "@article{Nguyen_Sinha_2023, title={Behavioral Learning in Security Games: Threat of Multi-Step Manipulative Attacks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26115}, DOI={10.1609/aaai.v37i8.26115}, abstractNote={This paper studies the problem of multi-step manipulative attacks in Stackelberg security games, in which a clever attacker attempts to orchestrate its attacks over multiple time steps to mislead the defender\u2019s learning of the attacker\u2019s behavior. This attack manipulation eventually influences the defender\u2019s patrol strategy towards the attacker\u2019s benefit. Previous work along this line of research only focuses on one-shot games in which the defender learns the attacker\u2019s behavior and then designs a corresponding strategy only once. Our work, on the other hand, investigates the long-term impact of the attacker\u2019s manipulation in which current attack and defense choices of players determine the future learning and patrol planning of the defender. This paper has three key contributions. First, we introduce a new multi-step manipulative attack game model that captures the impact of sequential manipulative attacks carried out by the attacker over the entire time horizon. Second, we propose a new algorithm to compute an optimal manipulative attack plan for the attacker, which tackles the challenge of multiple connected optimization components involved in the computation across multiple time steps. Finally, we present extensive experimental results on the impact of such misleading attacks, showing a significant benefit for the attacker and loss for the defender.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Thanh H. and Sinha, Arunesh}, year={2023}, month={Jun.}, pages={9302-9309} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26115/25887", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26115", + "pdf_size": 209596, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:9uwLQXBXPqYJ:scholar.google.com/&scioq=Behavioral+Learning+in+Security+Games:+Threat+of+Multi-Step+Manipulative+Attacks&hl=en&as_sdt=0,33", + "gs_version_total": 8, + "aff_domain": "cs.uoregon.edu;rutgers.edu", + "email": "cs.uoregon.edu;rutgers.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Oregon;Rutgers University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uoregon.edu;https://www.rutgers.edu", + "aff_unique_abbr": "UO;Rutgers", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26142", + "title": "Bellman Meets Hawkes: Model-Based Reinforcement Learning via Temporal Point Processes", + "track": "main", + "status": "Technical", + "abstract": "We consider a sequential decision making problem where the agent faces the environment characterized by the stochastic discrete events and seeks an optimal intervention policy such that its long-term reward is maximized. This problem exists ubiquitously in social media, finance and health informatics but is rarely investigated by the conventional research in reinforcement learning. To this end, we present a novel framework of the model-based reinforcement learning where the agent's actions and observations are asynchronous stochastic discrete events occurring in continuous-time. We model the dynamics of the environment by Hawkes process with external intervention control term and develop an algorithm to embed such process in the Bellman equation which guides the direction of the value gradient. We demonstrate the superiority of our method in both synthetic simulator and real-data experiments.", + "primary_area": "machine learning iii", + "author": "Chao Qu; Xiaoyu Tan; Siqiao Xue; Xiaoming Shi; James Zhang; Hongyuan Mei", + "authorids": "", + "aff": "Ant Group, Hangzhou, China; Ant Group, Hangzhou, China + Toyota Technological Institute at Chicago, Chicago, IL, United States; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Toyota Technological Institute at Chicago, Chicago, IL, United States", + "bibtex": "@article{Qu_Tan_Xue_Shi_Zhang_Mei_2023, title={Bellman Meets Hawkes: Model-Based Reinforcement Learning via Temporal Point Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26142}, DOI={10.1609/aaai.v37i8.26142}, abstractNote={We consider a sequential decision making problem where the agent faces the environment characterized by the stochastic discrete events and seeks an optimal intervention policy such that its long-term reward is maximized. This problem exists ubiquitously in social media, finance and health informatics but is rarely investigated by the conventional research in reinforcement learning. To this end, we present a novel framework of the model-based reinforcement learning where the agent\u2019s actions and observations are asynchronous stochastic discrete events occurring in continuous-time. We model the dynamics of the environment by Hawkes process with external intervention control term and develop an algorithm to embed such process in the Bellman equation which guides the direction of the value gradient. We demonstrate the superiority of our method in both synthetic simulator and real-data experiments.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qu, Chao and Tan, Xiaoyu and Xue, Siqiao and Shi, Xiaoming and Zhang, James and Mei, Hongyuan}, year={2023}, month={Jun.}, pages={9543-9551} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26142/25914", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26142", + "pdf_size": 6453624, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18000344354812194676&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;outlook.com;antgroup.com;antgroup.com;antgroup.com;ttic.edu", + "email": "gmail.com;outlook.com;antgroup.com;antgroup.com;antgroup.com;ttic.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;1", + "aff_unique_norm": "Ant Group;Toyota Technological Institute at Chicago", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.antgroup.com;https://www.tti-chicago.org", + "aff_unique_abbr": "Ant Group;TTI Chicago", + "aff_campus_unique_index": "0;0+1;0;0;0;1", + "aff_campus_unique": "Hangzhou;Chicago", + "aff_country_unique_index": "0;0+1;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26020", + "title": "Bespoke: A Block-Level Neural Network Optimization Framework for Low-Cost Deployment", + "track": "main", + "status": "Technical", + "abstract": "As deep learning models become popular, there is a lot of need for deploying them to diverse device environments. Because it is costly to develop and optimize a neural network for every single environment, there is a line of research to search neural networks for multiple target environments efficiently. However, existing works for such a situation still suffer from requiring many GPUs and expensive costs. Motivated by this, we propose a novel neural network optimization framework named Bespoke for low-cost deployment. Our framework searches for a lightweight model by replacing parts of an original model with randomly selected alternatives, each of which comes from a pretrained neural network or the original model. In the practical sense, Bespoke has two significant merits. One is that it requires near zero cost for designing the search space of neural networks. The other merit is that it exploits the sub-networks of public pretrained neural networks, so the total cost is minimal compared to the existing works. We conduct experiments exploring Bespoke's the merits, and the results show that it finds efficient models for multiple targets with meager cost.", + "primary_area": "machine learning ii", + "author": "Jong-Ryul Lee; Yong-Hyuk Moon", + "authorids": "", + "aff": "Electronics and Telecommunications Research Institute (ETRI), Daejeon, Korea; Electronics and Telecommunications Research Institute (ETRI), Daejeon, Korea + University of Science and Technology (UST), Daejeon, Korea", + "bibtex": "@article{Lee_Moon_2023, title={Bespoke: A Block-Level Neural Network Optimization Framework for Low-Cost Deployment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26020}, DOI={10.1609/aaai.v37i7.26020}, abstractNote={As deep learning models become popular, there is a lot of need for deploying them to diverse device environments. Because it is costly to develop and optimize a neural network for every single environment, there is a line of research to search neural networks for multiple target environments efficiently. However, existing works for such a situation still suffer from requiring many GPUs and expensive costs. Motivated by this, we propose a novel neural network optimization framework named Bespoke for low-cost deployment. Our framework searches for a lightweight model by replacing parts of an original model with randomly selected alternatives, each of which comes from a pretrained neural network or the original model. In the practical sense, Bespoke has two significant merits. One is that it requires near zero cost for designing the search space of neural networks. The other merit is that it exploits the sub-networks of public pretrained neural networks, so the total cost is minimal compared to the existing works. We conduct experiments exploring Bespoke\u2019s the merits, and the results show that it finds efficient models for multiple targets with meager cost.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Jong-Ryul and Moon, Yong-Hyuk}, year={2023}, month={Jun.}, pages={8465-8472} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26020/25792", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26020", + "pdf_size": 237630, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9824707848753773050&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "etri.re.kr;etri.re.kr", + "email": "etri.re.kr;etri.re.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Electronics and Telecommunications Research Institute;University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.etri.re.kr;https://www.ust.ac.kr", + "aff_unique_abbr": "ETRI;UST", + "aff_campus_unique_index": "0;0+0", + "aff_campus_unique": "Daejeon", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "Korea" + }, + { + "id": "article-25653", + "title": "Better Context Makes Better Code Language Models: A Case Study on Function Call Argument Completion", + "track": "main", + "status": "Technical", + "abstract": "Pretrained code language models have enabled great progress towards program synthesis. However, common approaches only consider in-file local context and thus miss information and constraints imposed by other parts of the codebase and its external dependencies. Existing code completion benchmarks also lack such context. To resolve these restrictions we curate a new dataset of permissively licensed Python packages that includes full projects and their dependencies and provide tools to extract non-local information with the help of program analyzers. We then focus on the task of function call argument completion which requires predicting the arguments to function calls. We show that existing code completion models do not yield good results on our completion task. To better solve this task, we query a program analyzer for information relevant to a given function call, and consider ways to provide the analyzer results to different code completion models during inference and training. Our experiments show that providing access to the function implementation and function usages greatly improves the argument completion performance. Our ablation study provides further insights on how different types of information available from the program analyzer and different ways of incorporating the information affect the model performance.", + "primary_area": "domain s of application", + "author": "Hengzhi Pei; Jinman Zhao; Leonard Lausen; Sheng Zha; George Karypis", + "authorids": "", + "aff": "University of Illinois Urbana-Champaign; Amazon Web Services; Amazon Web Services; Amazon Web Services; Amazon Web Services", + "bibtex": "@article{Pei_Zhao_Lausen_Zha_Karypis_2023, title={Better Context Makes Better Code Language Models: A Case Study on Function Call Argument Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25653}, DOI={10.1609/aaai.v37i4.25653}, abstractNote={Pretrained code language models have enabled great progress towards program synthesis. However, common approaches only consider in-file local context and thus miss information and constraints imposed by other parts of the codebase and its external dependencies. Existing code completion benchmarks also lack such context. To resolve these restrictions we curate a new dataset of permissively licensed Python packages that includes full projects and their dependencies and provide tools to extract non-local information with the help of program analyzers. We then focus on the task of function call argument completion which requires predicting the arguments to function calls. We show that existing code completion models do not yield good results on our completion task. To better solve this task, we query a program analyzer for information relevant to a given function call, and consider ways to provide the analyzer results to different code completion models during inference and training. Our experiments show that providing access to the function implementation and function usages greatly improves the argument completion performance. Our ablation study provides further insights on how different types of information available from the program analyzer and different ways of incorporating the information affect the model performance.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pei, Hengzhi and Zhao, Jinman and Lausen, Leonard and Zha, Sheng and Karypis, George}, year={2023}, month={Jun.}, pages={5230-5238} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25653/25425", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25653", + "pdf_size": 123689, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9549841186526247931&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Amazon Web Services", + "aff_unique_dep": ";", + "aff_unique_url": "https://illinois.edu;https://aws.amazon.com", + "aff_unique_abbr": "UIUC;AWS", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26809", + "title": "Better Environments for Better AI", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Most past research aimed at increasing the capabilities of AI methods has focused exclusively on the AI agent itself, i.e., given some input, what are the improvements to the agent\u2019s reasoning that will yield the best possible output. In my research, I take a novel approach to increasing the capabilities of AI agents via the design of the environments in which they are intended to act. My methods for automated design identify the inherent capabilities and limitations of AI agents with respect to their environment and find the best way to modify the environment to account for those limitations and maximize the agents\u2019 performance.\n\nThe future will bring an ever increasing set of interactions between people and automated agents, whether at home, at the workplace, on the road, or across many other everyday settings. Autonomous vehicles, robotic tools, medical devices, and smart\nhomes, all allow ample opportunity for human-robot and multi-agent interactions. In these settings, recognizing what agents are trying to achieve, providing relevant assistance, and supporting an effective collaboration are essential tasks, and tasks\nthat can all be enhanced via careful environment design. However, the increasing complexity of the systems we use and the environments in which we operate makes devising good design solutions extremely challenging. This stresses the importance\nof developing automated design tools to help determine the most effective ways to apply change and enable robust AI systems. My long-term goal is to provide theoretical foundations for designing AI systems that are capable of effective partnership in sustainable and efficient collaborations of automated agents as well as of automated agents and people.", + "primary_area": "", + "author": "Sarah Keren", + "authorids": "", + "aff": "Technion - Israel Institute of Technology, Taub Faculty of Computer Science", + "bibtex": "@article{Keren_2024, title={Better Environments for Better AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26809}, DOI={10.1609/aaai.v37i13.26809}, abstractNote={Most past research aimed at increasing the capabilities of AI methods has focused exclusively on the AI agent itself, i.e., given some input, what are the improvements to the agent\u2019s reasoning that will yield the best possible output. In my research, I take a novel approach to increasing the capabilities of AI agents via the design of the environments in which they are intended to act. My methods for automated design identify the inherent capabilities and limitations of AI agents with respect to their environment and find the best way to modify the environment to account for those limitations and maximize the agents\u2019 performance. The future will bring an ever increasing set of interactions between people and automated agents, whether at home, at the workplace, on the road, or across many other everyday settings. Autonomous vehicles, robotic tools, medical devices, and smart\nhomes, all allow ample opportunity for human-robot and multi-agent interactions. In these settings, recognizing what agents are trying to achieve, providing relevant assistance, and supporting an effective collaboration are essential tasks, and tasks\nthat can all be enhanced via careful environment design. However, the increasing complexity of the systems we use and the environments in which we operate makes devising good design solutions extremely challenging. This stresses the importance\nof developing automated design tools to help determine the most effective ways to apply change and enable robust AI systems. My long-term goal is to provide theoretical foundations for designing AI systems that are capable of effective partnership in sustainable and efficient collaborations of automated agents as well as of automated agents and people.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Keren, Sarah}, year={2024}, month={Jul.}, pages={15442-15442} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26809/26581", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26809", + "pdf_size": 38866, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17973656015169460605&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Technion - Israel Institute of Technology", + "aff_unique_dep": "Taub Faculty of Computer Science", + "aff_unique_url": "https://www.technion.ac.il", + "aff_unique_abbr": "Technion", + "aff_country_unique_index": "0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25999", + "title": "Better Generalized Few-Shot Learning Even without Base Data", + "track": "main", + "status": "Technical", + "abstract": "This paper introduces and studies zero-base generalized few-shot learning (zero-base GFSL), which is an extreme yet practical version of few-shot learning problem. Motivated by the cases where base data is not available due to privacy or ethical issues, the goal of zero-base GFSL is to newly incorporate the knowledge of few samples of novel classes into a pretrained model without any samples of base classes. According to our analysis, we discover the fact that both mean and variance of the weight distribution of novel classes are not properly established, compared to those of base classes. The existing GFSL methods attempt to make the weight norms balanced, which we find help only the variance part, but discard the importance of mean of weights particularly for novel classes, leading to the limited performance in the GFSL problem even with base data. In this paper, we overcome this limitation by proposing a simple yet effective normalization method that can effectively control both mean and variance of the weight distribution of novel classes without using any base samples and thereby achieve a satisfactory performance on both novel and base classes. Our experimental results somewhat surprisingly show that the proposed zero-base GFSL method that does not utilize any base samples even outperforms the existing GFSL methods that make the best use of base data. Our implementation is available at: https://github.com/bigdata-inha/Zero-Base-GFSL.", + "primary_area": "machine learning ii", + "author": "Seong-Woong Kim; Dong-Wan Choi", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Inha University, South Korea; Department of Computer Science and Engineering, Inha University, South Korea", + "bibtex": "@article{Kim_Choi_2023, title={Better Generalized Few-Shot Learning Even without Base Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25999}, DOI={10.1609/aaai.v37i7.25999}, abstractNote={This paper introduces and studies zero-base generalized few-shot learning (zero-base GFSL), which is an extreme yet practical version of few-shot learning problem. Motivated by the cases where base data is not available due to privacy or ethical issues, the goal of zero-base GFSL is to newly incorporate the knowledge of few samples of novel classes into a pretrained model without any samples of base classes. According to our analysis, we discover the fact that both mean and variance of the weight distribution of novel classes are not properly established, compared to those of base classes. The existing GFSL methods attempt to make the weight norms balanced, which we find help only the variance part, but discard the importance of mean of weights particularly for novel classes, leading to the limited performance in the GFSL problem even with base data. In this paper, we overcome this limitation by proposing a simple yet effective normalization method that can effectively control both mean and variance of the weight distribution of novel classes without using any base samples and thereby achieve a satisfactory performance on both novel and base classes. Our experimental results somewhat surprisingly show that the proposed zero-base GFSL method that does not utilize any base samples even outperforms the existing GFSL methods that make the best use of base data. Our implementation is available at: https://github.com/bigdata-inha/Zero-Base-GFSL.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Seong-Woong and Choi, Dong-Wan}, year={2023}, month={Jun.}, pages={8282-8290} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25999/25771", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25999", + "pdf_size": 1031220, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8559796284218964993&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;inha.ac.kr", + "email": "gmail.com;inha.ac.kr", + "github": "https://github.com/bigdata-inha/Zero-Base-GFSL", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Inha University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.inha.edu", + "aff_unique_abbr": "Inha", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25757", + "title": "Better Peer Grading through Bayesian Inference", + "track": "main", + "status": "Technical", + "abstract": "Peer grading systems aggregate noisy reports from multiple students to approximate a \"true\" grade as closely as possible. Most current systems either take the mean or median of reported grades; others aim to estimate students\u2019 grading accuracy under a probabilistic model. This paper extends the state of the art in the latter approach in three key ways: \n(1) recognizing that students can behave strategically (e.g., reporting grades close to the class average without doing the work); (2) appropriately handling censored data that arises from discrete-valued grading rubrics; and (3) using mixed integer programming to improve the interpretability of the grades assigned to students. We demonstrate how to make Bayesian inference practical in this model and evaluate our approach on both synthetic and real-world data obtained by using our implemented system in four large classes. These extensive experiments show that grade aggregation using our model accurately estimates true grades, students' likelihood of submitting uninformative grades, and the variation in their inherent grading error; we also characterize our models' robustness.", + "primary_area": "humans and ai", + "author": "Hedayat Zarkoob; Greg d'Eon; Lena Podina; Kevin Leyton-Brown", + "authorids": "", + "aff": "Department of Computer Science, University of British Columbia; Department of Computer Science, University of British Columbia; Department of Computer Science, University of British Columbia + Cheriton School of Computer Science, University of Waterloo; Department of Computer Science, University of British Columbia", + "bibtex": "@article{Zarkoob_d\u2019Eon_Podina_Leyton-Brown_2023, title={Better Peer Grading through Bayesian Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25757}, DOI={10.1609/aaai.v37i5.25757}, abstractNote={Peer grading systems aggregate noisy reports from multiple students to approximate a "true" grade as closely as possible. Most current systems either take the mean or median of reported grades; others aim to estimate students\u2019 grading accuracy under a probabilistic model. This paper extends the state of the art in the latter approach in three key ways: (1) recognizing that students can behave strategically (e.g., reporting grades close to the class average without doing the work); (2) appropriately handling censored data that arises from discrete-valued grading rubrics; and (3) using mixed integer programming to improve the interpretability of the grades assigned to students. We demonstrate how to make Bayesian inference practical in this model and evaluate our approach on both synthetic and real-world data obtained by using our implemented system in four large classes. These extensive experiments show that grade aggregation using our model accurately estimates true grades, students\u2019 likelihood of submitting uninformative grades, and the variation in their inherent grading error; we also characterize our models\u2019 robustness.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zarkoob, Hedayat and d\u2019Eon, Greg and Podina, Lena and Leyton-Brown, Kevin}, year={2023}, month={Jun.}, pages={6137-6144} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25757/25529", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25757", + "pdf_size": 193500, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12037281555117332963&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.ubc.ca;cs.ubc.ca;uwaterloo.ca;cs.ubc.ca", + "email": "cs.ubc.ca;cs.ubc.ca;uwaterloo.ca;cs.ubc.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "University of British Columbia;University of Waterloo", + "aff_unique_dep": "Department of Computer Science;Cheriton School of Computer Science", + "aff_unique_url": "https://www.ubc.ca;https://uwaterloo.ca", + "aff_unique_abbr": "UBC;UWaterloo", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Vancouver;", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25298", + "title": "Better and Faster: Adaptive Event Conversion for Event-Based Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Event cameras are a kind of bio-inspired imaging sensor, which asynchronously collect sparse event streams with many advantages. In this paper, we focus on building better and faster event-based object detectors. To this end, we first propose a computationally efficient event representation Hyper Histogram, which adequately preserves both the polarity and temporal information of events. Then we devise an Adaptive Event Conversion module, which converts events into Hyper Histograms according to event density via an adaptive queue. Moreover, we introduce a novel event-based augmentation method Shadow Mosaic, which significantly improves the event sample diversity and enhances the generalization ability of detection models. We equip our proposed modules on three representative object detection models: YOLOv5, Deformable-DETR, and RetinaNet. Experimental results on three event-based detection datasets (1Mpx, Gen1, and MVSEC-NIGHTL21) demonstrate that our proposed approach outperforms other state-of-the-art methods by a large margin, while achieving a much faster running speed (< 14 ms and < 4 ms for 50 ms event data on the 1Mpx and Gen1 datasets).", + "primary_area": "computer vision ii", + "author": "Yansong Peng; Yueyi Zhang; Peilin Xiao; Xiaoyan Sun; Feng Wu", + "authorids": "", + "aff": "University of Science and Technology of China, Hefei, China, 230026; University of Science and Technology of China, Hefei, China, 230026; University of Science and Technology of China, Hefei, China, 230026; University of Science and Technology of China, Hefei, China, 230026; University of Science and Technology of China, Hefei, China, 230026", + "bibtex": "@article{Peng_Zhang_Xiao_Sun_Wu_2023, title={Better and Faster: Adaptive Event Conversion for Event-Based Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25298}, DOI={10.1609/aaai.v37i2.25298}, abstractNote={Event cameras are a kind of bio-inspired imaging sensor, which asynchronously collect sparse event streams with many advantages. In this paper, we focus on building better and faster event-based object detectors. To this end, we first propose a computationally efficient event representation Hyper Histogram, which adequately preserves both the polarity and temporal information of events. Then we devise an Adaptive Event Conversion module, which converts events into Hyper Histograms according to event density via an adaptive queue. Moreover, we introduce a novel event-based augmentation method Shadow Mosaic, which significantly improves the event sample diversity and enhances the generalization ability of detection models. We equip our proposed modules on three representative object detection models: YOLOv5, Deformable-DETR, and RetinaNet. Experimental results on three event-based detection datasets (1Mpx, Gen1, and MVSEC-NIGHTL21) demonstrate that our proposed approach outperforms other state-of-the-art methods by a large margin, while achieving a much faster running speed (< 14 ms and < 4 ms for 50 ms event data on the 1Mpx and Gen1 datasets).}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Yansong and Zhang, Yueyi and Xiao, Peilin and Sun, Xiaoyan and Wu, Feng}, year={2023}, month={Jun.}, pages={2056-2064} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25298/25070", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25298", + "pdf_size": 3601299, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17399265523337303767&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Hefei", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26212", + "title": "Beyond ADMM: A Unified Client-Variance-Reduced Adaptive Federated Learning Framework", + "track": "main", + "status": "Technical", + "abstract": "As a novel distributed learning paradigm, federated learning (FL) faces serious challenges in dealing with massive clients with heterogeneous data distribution and computation and communication resources. Various client-variance-reduction schemes and client sampling strategies have been respectively introduced to improve the robustness of FL. Among others, primal-dual algorithms such as the alternating direction of method multipliers (ADMM) have been found being resilient to data distribution and outperform most of the primal-only FL algorithms. However, the reason behind remains a mystery still. In this paper, we firstly reveal the fact that the federated ADMM is essentially a client-variance-reduced algorithm. While this explains the inherent robustness of federated ADMM, the vanilla version of it lacks the ability to be adaptive to the degree of client heterogeneity. Besides, the global model at the server under client sampling is biased which slows down the practical convergence. To go beyond ADMM, we propose a novel primal-dual FL algorithm, termed FedVRA, that allows one to adaptively control the variance-reduction level and biasness of the global model. In addition, FedVRA unifies several representative FL algorithms in the sense that they are either special instances of FedVRA or are close to it. Extensions of FedVRA to semi/un-supervised learning are also presented. Experiments based on (semi-)supervised image classification tasks demonstrate superiority of FedVRA over the existing schemes in learning scenarios with massive heterogeneous clients and client sampling.", + "primary_area": "machine learning iii", + "author": "Shuai Wang; Yanqing Xu; Zhiguo Wang; Tsung-Hui Chang; Tony Q. S. Quek; Defeng Sun", + "authorids": "", + "aff": "Information Systems Technology and Design, Singapore University of Technology and Design, 487372 Singapore; School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen 518172, China; College of Mathematics, Sichuan University, Chengdu, Sichuan 610064, China; School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen 518172, China; Information Systems Technology and Design, Singapore University of Technology and Design, 487372 Singapore; Department of Applied Mathematics, The Hong Kong Polytechnic University, Hong Kong", + "bibtex": "@article{Wang_Xu_Wang_Chang_Quek_Sun_2023, title={Beyond ADMM: A Unified Client-Variance-Reduced Adaptive Federated Learning Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26212}, DOI={10.1609/aaai.v37i8.26212}, abstractNote={As a novel distributed learning paradigm, federated learning (FL) faces serious challenges in dealing with massive clients with heterogeneous data distribution and computation and communication resources. Various client-variance-reduction schemes and client sampling strategies have been respectively introduced to improve the robustness of FL. Among others, primal-dual algorithms such as the alternating direction of method multipliers (ADMM) have been found being resilient to data distribution and outperform most of the primal-only FL algorithms. However, the reason behind remains a mystery still. In this paper, we firstly reveal the fact that the federated ADMM is essentially a client-variance-reduced algorithm. While this explains the inherent robustness of federated ADMM, the vanilla version of it lacks the ability to be adaptive to the degree of client heterogeneity. Besides, the global model at the server under client sampling is biased which slows down the practical convergence. To go beyond ADMM, we propose a novel primal-dual FL algorithm, termed FedVRA, that allows one to adaptively control the variance-reduction level and biasness of the global model. In addition, FedVRA unifies several representative FL algorithms in the sense that they are either special instances of FedVRA or are close to it. Extensions of FedVRA to semi/un-supervised learning are also presented. Experiments based on (semi-)supervised image classification tasks demonstrate superiority of FedVRA over the existing schemes in learning scenarios with massive heterogeneous clients and client sampling.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Shuai and Xu, Yanqing and Wang, Zhiguo and Chang, Tsung-Hui and Quek, Tony Q. S. and Sun, Defeng}, year={2023}, month={Jun.}, pages={10175-10183} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26212/25984", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26212", + "pdf_size": 1047643, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8435739030185248512&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "link.cuhk.edu.cn;cuhk.edu.cn;scu.edu.cn;cuhk.edu.cn;sutd.edu.sg;polyu.edu.hk", + "email": "link.cuhk.edu.cn;cuhk.edu.cn;scu.edu.cn;cuhk.edu.cn;sutd.edu.sg;polyu.edu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;0;3", + "aff_unique_norm": "Singapore University of Technology and Design;The Chinese University of Hong Kong;Sichuan University;The Hong Kong Polytechnic University", + "aff_unique_dep": "Information Systems Technology and Design;School of Science and Engineering;College of Mathematics;Department of Applied Mathematics", + "aff_unique_url": "https://www.sutd.edu.sg;https://www.cuhk.edu.hk;https://www.scu.edu.cn;https://www.polyu.edu.hk", + "aff_unique_abbr": "SUTD;CUHK;SCU;PolyU", + "aff_campus_unique_index": "1;2;1;3", + "aff_campus_unique": ";Shenzhen;Chengdu;Hong Kong", + "aff_country_unique_index": "0;1;1;1;0;1", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26898", + "title": "Beyond Black-Boxes: Teaching Complex Machine Learning Ideas through Scaffolded Interactive Activities", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Existing approaches to teaching artificial intelligence and machine learning (ML) often focus on the use of pre-trained models or fine-tuning an existing black-box architecture. We believe ML techniques and core ML topics, such as optimization and adversarial examples, can be designed for high school age students given appropriate support. Our curricular approach focuses on teaching ML ideas by enabling students to develop deep intuition about these complex concepts by first making them accessible to novices through interactive tools, pre-programmed games, and carefully designed programming activities. Then, students are able to engage with the concepts via meaningful, hands-on experiences that span the entire ML process from data collection to model optimization and inspection. This paper describes our 'AI & Cybersecurity for Teens' suite of curricular activities aimed at high school students and teachers.", + "primary_area": "", + "author": "Brian Broll; Shuchi Grover", + "authorids": "", + "aff": "Vanderbilt University; Looking Glass Ventures", + "bibtex": "@article{Broll_Grover_2024, title={Beyond Black-Boxes: Teaching Complex Machine Learning Ideas through Scaffolded Interactive Activities}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26898}, DOI={10.1609/aaai.v37i13.26898}, abstractNote={Existing approaches to teaching artificial intelligence and machine learning (ML) often focus on the use of pre-trained models or fine-tuning an existing black-box architecture. We believe ML techniques and core ML topics, such as optimization and adversarial examples, can be designed for high school age students given appropriate support. Our curricular approach focuses on teaching ML ideas by enabling students to develop deep intuition about these complex concepts by first making them accessible to novices through interactive tools, pre-programmed games, and carefully designed programming activities. Then, students are able to engage with the concepts via meaningful, hands-on experiences that span the entire ML process from data collection to model optimization and inspection. This paper describes our \u2019AI & Cybersecurity for Teens\u2019 suite of curricular activities aimed at high school students and teachers.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Broll, Brian and Grover, Shuchi}, year={2024}, month={Jul.}, pages={15990-15998} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26898/26670", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26898", + "pdf_size": 752087, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10543757278572955685&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "vanderbilt.edu;cs.stanford.edu", + "email": "vanderbilt.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Vanderbilt University;Looking Glass Ventures", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.vanderbilt.edu;https://www.lookingglassventures.com", + "aff_unique_abbr": "Vanderbilt;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25593", + "title": "Beyond Graph Convolutional Network: An Interpretable Regularizer-Centered Optimization Framework", + "track": "main", + "status": "Technical", + "abstract": "Graph convolutional networks (GCNs) have been attracting widespread attentions due to their encouraging performance and powerful generalizations. However, few work provide a general view to interpret various GCNs and guide GCNs' designs. In this paper, by revisiting the original GCN, we induce an interpretable regularizer-centerd optimization framework, in which by building appropriate regularizers we can interpret most GCNs, such as APPNP, JKNet, DAGNN, and GNN-LF/HF. Further, under the proposed framework, we devise a dual-regularizer graph convolutional network (dubbed tsGCN) to capture topological and semantic structures from graph data. Since the derived learning rule for tsGCN contains an inverse of a large matrix and thus is time-consuming, we leverage the Woodbury matrix identity and low-rank approximation tricks to successfully decrease the high computational complexity of computing infinite-order graph convolutions. Extensive experiments on eight public datasets demonstrate that tsGCN achieves superior performance against quite a few state-of-the-art competitors w.r.t. classification tasks.", + "primary_area": "data mining and knowledge management", + "author": "Shiping Wang; Zhihao Wu; Yuhong Chen; Yong Chen", + "authorids": "", + "aff": "1College of Computer and Data Science, Fuzhou University, China+2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, China; 1College of Computer and Data Science, Fuzhou University, China+2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, China; 1College of Computer and Data Science, Fuzhou University, China+2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, China; 3School of Computer Science, Beijing University of Posts and Telecommunications, China", + "bibtex": "@article{Wang_Wu_Chen_Chen_2023, title={Beyond Graph Convolutional Network: An Interpretable Regularizer-Centered Optimization Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25593}, DOI={10.1609/aaai.v37i4.25593}, abstractNote={Graph convolutional networks (GCNs) have been attracting widespread attentions due to their encouraging performance and powerful generalizations. However, few work provide a general view to interpret various GCNs and guide GCNs\u2019 designs. In this paper, by revisiting the original GCN, we induce an interpretable regularizer-centerd optimization framework, in which by building appropriate regularizers we can interpret most GCNs, such as APPNP, JKNet, DAGNN, and GNN-LF/HF. Further, under the proposed framework, we devise a dual-regularizer graph convolutional network (dubbed tsGCN) to capture topological and semantic structures from graph data. Since the derived learning rule for tsGCN contains an inverse of a large matrix and thus is time-consuming, we leverage the Woodbury matrix identity and low-rank approximation tricks to successfully decrease the high computational complexity of computing infinite-order graph convolutions. Extensive experiments on eight public datasets demonstrate that tsGCN achieves superior performance against quite a few state-of-the-art competitors w.r.t. classification tasks.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Shiping and Wu, Zhihao and Chen, Yuhong and Chen, Yong}, year={2023}, month={Jun.}, pages={4693-4701} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25593/25365", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25593", + "pdf_size": 4348442, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3945472843616421259&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "163.com;gmail.com;163.com;gmail.com", + "email": "163.com;gmail.com;163.com;gmail.com", + "github": "https://github.com/ZhihaoWu99/tsGCN", + "project": "https://arxiv.org/abs/2301.04318", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;1", + "aff_unique_norm": "Fuzhou University;Beijing University of Posts and Telecommunications", + "aff_unique_dep": "College of Computer and Data Science;School of Computer Science", + "aff_unique_url": ";http://www.bupt.edu.cn/", + "aff_unique_abbr": ";BUPT", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Fuzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26778", + "title": "Beyond NaN: Resiliency of Optimization Layers in the Face of Infeasibility", + "track": "aaai special track", + "status": "Technical", + "abstract": "Prior work has successfully incorporated optimization layers as the last layer in neural networks for various problems, thereby allowing joint learning and planning in one neural network forward pass. In this work, we identify a weakness in such a set-up where inputs to the optimization layer lead to undefined output of the neural network. Such undefined decision outputs can lead to possible catastrophic outcomes in critical real time applications. We show that an adversary can cause such failures by forcing rank deficiency on the matrix fed to the optimization layer which results in the optimization failing to produce a solution. We provide a defense for the failure cases by controlling the condition number of the input matrix. We study the problem in the settings of synthetic data, Jigsaw Sudoku, and in speed planning for autonomous driving. We show that our proposed defense effectively prevents the framework from failing with undefined output. Finally, we surface a number of edge cases which lead to serious bugs in popular optimization solvers which can be abused as well.", + "primary_area": "safe and robust ai", + "author": "Wai Tuck Wong; Sarah Kinsey; Ramesha Karunasena; Thanh H. Nguyen; Arunesh Sinha", + "authorids": "", + "aff": "Singapore Management University; University of Oregon; Singapore Management University; University of Oregon; Rutgers University", + "bibtex": "@article{Wong_Kinsey_Karunasena_Nguyen_Sinha_2023, title={Beyond NaN: Resiliency of Optimization Layers in the Face of Infeasibility}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26778}, DOI={10.1609/aaai.v37i12.26778}, abstractNote={Prior work has successfully incorporated optimization layers as the last layer in neural networks for various problems, thereby allowing joint learning and planning in one neural network forward pass. In this work, we identify a weakness in such a set-up where inputs to the optimization layer lead to undefined output of the neural network. Such undefined decision outputs can lead to possible catastrophic outcomes in critical real time applications. We show that an adversary can cause such failures by forcing rank deficiency on the matrix fed to the optimization layer which results in the optimization failing to produce a solution. We provide a defense for the failure cases by controlling the condition number of the input matrix. We study the problem in the settings of synthetic data, Jigsaw Sudoku, and in speed planning for autonomous driving. We show that our proposed defense effectively prevents the framework from failing with undefined output. Finally, we surface a number of edge cases which lead to serious bugs in popular optimization solvers which can be abused as well.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wong, Wai Tuck and Kinsey, Sarah and Karunasena, Ramesha and Nguyen, Thanh H. and Sinha, Arunesh}, year={2023}, month={Jun.}, pages={15242-15250} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26778/26550", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26778", + "pdf_size": 1161840, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:j7nqv3BspbYJ:scholar.google.com/&scioq=Beyond+NaN:+Resiliency+of+Optimization+Layers+in+the+Face+of+Infeasibility&hl=en&as_sdt=0,5", + "gs_version_total": 9, + "aff_domain": "msc.smu.edu.sg;gmail.com;smu.edu.sg;cs.uoregon.edu;rutgers.edu", + "email": "msc.smu.edu.sg;gmail.com;smu.edu.sg;cs.uoregon.edu;rutgers.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;2", + "aff_unique_norm": "Singapore Management University;University of Oregon;Rutgers University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.smu.edu.sg;https://www.uoregon.edu;https://www.rutgers.edu", + "aff_unique_abbr": "SMU;UO;Rutgers", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1;1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "article-25573", + "title": "Beyond Smoothing: Unsupervised Graph Representation Learning with Edge Heterophily Discriminating", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised graph representation learning (UGRL) has drawn increasing research attention and achieved promising results in several graph analytic tasks. Relying on the homophily assumption, existing UGRL methods tend to smooth the learned node representations along all edges, ignoring the existence of heterophilic edges that connect nodes with distinct attributes. As a result, current methods are hard to generalize to heterophilic graphs where dissimilar nodes are widely connected, and also vulnerable to adversarial attacks. To address this issue, we propose a novel unsupervised Graph Representation learning method with Edge hEterophily discriminaTing (GREET) which learns representations by discriminating and leveraging homophilic edges and heterophilic edges. To distinguish two types of edges, we build an edge discriminator that infers edge homophily/heterophily from feature and structure information. We train the edge discriminator in an unsupervised way through minimizing the crafted pivot-anchored ranking loss, with randomly sampled node pairs acting as pivots. Node representations are learned through contrasting the dual-channel encodings obtained from the discriminated homophilic and heterophilic edges. With an effective interplaying scheme, edge discriminating and representation learning can mutually boost each other during the training phase. We conducted extensive experiments on 14 benchmark datasets and multiple learning scenarios to demonstrate the superiority of GREET.", + "primary_area": "data mining and knowledge management", + "author": "Yixin Liu; Yizhen Zheng; Daokun Zhang; Vincent CS Lee; Shirui Pan", + "authorids": "", + "aff": "Monash University, Australia; Monash University, Australia; Monash University, Australia; Monash University, Australia; Griffith University, Australia", + "bibtex": "@article{Liu_Zheng_Zhang_Lee_Pan_2023, title={Beyond Smoothing: Unsupervised Graph Representation Learning with Edge Heterophily Discriminating}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25573}, DOI={10.1609/aaai.v37i4.25573}, abstractNote={Unsupervised graph representation learning (UGRL) has drawn increasing research attention and achieved promising results in several graph analytic tasks. Relying on the homophily assumption, existing UGRL methods tend to smooth the learned node representations along all edges, ignoring the existence of heterophilic edges that connect nodes with distinct attributes. As a result, current methods are hard to generalize to heterophilic graphs where dissimilar nodes are widely connected, and also vulnerable to adversarial attacks. To address this issue, we propose a novel unsupervised Graph Representation learning method with Edge hEterophily discriminaTing (GREET) which learns representations by discriminating and leveraging homophilic edges and heterophilic edges. To distinguish two types of edges, we build an edge discriminator that infers edge homophily/heterophily from feature and structure information. We train the edge discriminator in an unsupervised way through minimizing the crafted pivot-anchored ranking loss, with randomly sampled node pairs acting as pivots. Node representations are learned through contrasting the dual-channel encodings obtained from the discriminated homophilic and heterophilic edges. With an effective interplaying scheme, edge discriminating and representation learning can mutually boost each other during the training phase. We conducted extensive experiments on 14 benchmark datasets and multiple learning scenarios to demonstrate the superiority of GREET.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yixin and Zheng, Yizhen and Zhang, Daokun and Lee, Vincent CS and Pan, Shirui}, year={2023}, month={Jun.}, pages={4516-4524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25573/25345", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25573", + "pdf_size": 5126579, + "gs_citation": 96, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7316053768766123751&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "monash.edu;monash.edu;monash.edu;monash.edu;griffith.edu.au", + "email": "monash.edu;monash.edu;monash.edu;monash.edu;griffith.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Monash University;Griffith University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.monash.edu;https://www.griffith.edu.au", + "aff_unique_abbr": "Monash;Griffith", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25383", + "title": "Bi-directional Feature Reconstruction Network for Fine-Grained Few-Shot Image Classification", + "track": "main", + "status": "Technical", + "abstract": "The main challenge for fine-grained few-shot image classification is to learn feature representations with higher inter-class and lower intra-class variations, with a mere few labelled samples. Conventional few-shot learning methods however cannot be naively adopted for this fine-grained setting -- a quick pilot study reveals that they in fact push for the opposite (i.e., lower inter-class variations and higher intra-class variations). To alleviate this problem, prior works predominately use a support set to reconstruct the query image and then utilize metric learning to determine its category. Upon careful inspection, we further reveal that such unidirectional reconstruction methods only help to increase inter-class variations and are not effective in tackling intra-class variations. In this paper, we for the first time introduce a bi-reconstruction mechanism that can simultaneously accommodate for inter-class and intra-class variations. In addition to using the support set to reconstruct the query set for increasing inter-class variations, we further use the query set to reconstruct the support set for reducing intra-class variations. This design effectively helps the model to explore more subtle and discriminative features which is key for the fine-grained problem in hand. Furthermore, we also construct a self-reconstruction module to work alongside the bi-directional module to make the features even more discriminative. Experimental results on three widely used fine-grained image classification datasets consistently show considerable improvements compared with other methods. Codes are available at: https://github.com/PRIS-CV/Bi-FRN.", + "primary_area": "computer vision iii", + "author": "Jijie Wu; Dongliang Chang; Aneeshan Sain; Xiaoxu Li; Zhanyu Ma; Jie Cao; Jun Guo; Yi-Zhe Song", + "authorids": "", + "aff": "School of Computer and Communications, Lanzhou University of Technology, Lanzhou, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China; SketchX, CVSSP, University of Surrey, United Kingdom; School of Computer and Communications, Lanzhou University of Technology, Lanzhou, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer and Communications, Lanzhou University of Technology, Lanzhou, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China; SketchX, CVSSP, University of Surrey, United Kingdom", + "bibtex": "@article{Wu_Chang_Sain_Li_Ma_Cao_Guo_Song_2023, title={Bi-directional Feature Reconstruction Network for Fine-Grained Few-Shot Image Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25383}, DOI={10.1609/aaai.v37i3.25383}, abstractNote={The main challenge for fine-grained few-shot image classification is to learn feature representations with higher inter-class and lower intra-class variations, with a mere few labelled samples. Conventional few-shot learning methods however cannot be naively adopted for this fine-grained setting -- a quick pilot study reveals that they in fact push for the opposite (i.e., lower inter-class variations and higher intra-class variations). To alleviate this problem, prior works predominately use a support set to reconstruct the query image and then utilize metric learning to determine its category. Upon careful inspection, we further reveal that such unidirectional reconstruction methods only help to increase inter-class variations and are not effective in tackling intra-class variations. In this paper, we for the first time introduce a bi-reconstruction mechanism that can simultaneously accommodate for inter-class and intra-class variations. In addition to using the support set to reconstruct the query set for increasing inter-class variations, we further use the query set to reconstruct the support set for reducing intra-class variations. This design effectively helps the model to explore more subtle and discriminative features which is key for the fine-grained problem in hand. Furthermore, we also construct a self-reconstruction module to work alongside the bi-directional module to make the features even more discriminative. Experimental results on three widely used fine-grained image classification datasets consistently show considerable improvements compared with other methods. Codes are available at: https://github.com/PRIS-CV/Bi-FRN.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Jijie and Chang, Dongliang and Sain, Aneeshan and Li, Xiaoxu and Ma, Zhanyu and Cao, Jie and Guo, Jun and Song, Yi-Zhe}, year={2023}, month={Jun.}, pages={2821-2829} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25383/25155", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25383", + "pdf_size": 1162184, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8943357533801484125&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "lut.edu.cn;bupt.edu.cn;surrey.ac.uk;lut.edu.cn;bupt.edu.cn;lut.edu.cn;bupt.edu.cn;surrey.ac.uk", + "email": "lut.edu.cn;bupt.edu.cn;surrey.ac.uk;lut.edu.cn;bupt.edu.cn;lut.edu.cn;bupt.edu.cn;surrey.ac.uk", + "github": "https://github.com/PRIS-CV/Bi-FRN", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;0;1;0;1;2", + "aff_unique_norm": "Lanzhou University of Technology;Beijing University of Posts and Telecommunications;University of Surrey", + "aff_unique_dep": "School of Computer and Communications;School of Artificial Intelligence;CVSSP", + "aff_unique_url": ";http://www.bupt.edu.cn/;https://www.surrey.ac.uk", + "aff_unique_abbr": ";BUPT;Surrey", + "aff_campus_unique_index": "0;1;0;1;0;1", + "aff_campus_unique": "Lanzhou;Beijing;", + "aff_country_unique_index": "0;0;1;0;0;0;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-27080", + "title": "BiRDy: Bullying Role Detection in Multi-Party Chats", + "track": "demonstrations", + "status": "Technical", + "abstract": "Recent studies have highlighted that private instant messaging platforms and channels are major media of cyber aggression, especially among teens. Due to the private nature of the verbal exchanges on these media, few studies have addressed the task of hate speech detection in this context. Moreover, the recent release of resources mimicking online aggression situations that may occur among teens on private instant messaging platforms is encouraging the development of solutions aiming at dealing with diversity in digital harassment. In this study, we present BiRDy: a fully Web-based platform performing participant role detection in multi-party chats. Leveraging the pre-trained language model mBERT (multilingual BERT), we release fine-tuned models relying on various contextual window strategies to classify exchanged messages according to the role of involvement in cyberbullying of the authors. Integrating a role scoring function, the proposed pipeline predicts a unique role for each chat participant. In addition, detailed confidence scoring are displayed. Currently, BiRDy publicly releases models for French and Italian.", + "primary_area": "", + "author": "Ana\u00efs Ollagnier; Elena Cabrio; Serena Villata; Sara Tonelli", + "authorids": "", + "aff": "Universit \u00b4e C\u02c6ote d\u2019Azur, Inria, CNRS, I3S; Universit \u00b4e C\u02c6ote d\u2019Azur, Inria, CNRS, I3S; Universit \u00b4e C\u02c6ote d\u2019Azur, Inria, CNRS, I3S; Fondazione Bruno Kessler", + "bibtex": "@article{Ollagnier_Cabrio_Villata_Tonelli_2024, title={BiRDy: Bullying Role Detection in Multi-Party Chats}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27080}, DOI={10.1609/aaai.v37i13.27080}, abstractNote={Recent studies have highlighted that private instant messaging platforms and channels are major media of cyber aggression, especially among teens. Due to the private nature of the verbal exchanges on these media, few studies have addressed the task of hate speech detection in this context. Moreover, the recent release of resources mimicking online aggression situations that may occur among teens on private instant messaging platforms is encouraging the development of solutions aiming at dealing with diversity in digital harassment. In this study, we present BiRDy: a fully Web-based platform performing participant role detection in multi-party chats. Leveraging the pre-trained language model mBERT (multilingual BERT), we release fine-tuned models relying on various contextual window strategies to classify exchanged messages according to the role of involvement in cyberbullying of the authors. Integrating a role scoring function, the proposed pipeline predicts a unique role for each chat participant. In addition, detailed confidence scoring are displayed. Currently, BiRDy publicly releases models for French and Italian.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ollagnier, Ana\u00efs and Cabrio, Elena and Villata, Serena and Tonelli, Sara}, year={2024}, month={Jul.}, pages={16464-16466} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27080/26852", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27080", + "pdf_size": 116683, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10407562970728261860&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 8, + "aff_domain": "inria.fr;inria.fr;inria.fr;fbk.eu", + "email": "inria.fr;inria.fr;inria.fr;fbk.eu", + "github": "", + "project": "http://134.59.134.227/demo prd/index.html", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur;Fondazione Bruno Kessler", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.univ-cotedazur.fr;https://www.fbk.eu", + "aff_unique_abbr": "UCA;FBK", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "France;Italy" + }, + { + "id": "article-25679", + "title": "Bidding Graph Games with Partially-Observable Budgets", + "track": "main", + "status": "Technical", + "abstract": "Two-player zero-sum \"graph games\" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In \"bidding games\", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. \nIn this work we initiate the study of partial-information bidding games: we study bidding games in which a player's initial budget is drawn from a known probability distribution. \nWe show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. \nSpecifically, we study games with \"mean-payoff\" objectives in combination with \"poorman\" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the \"value\" under pure strategies does not necessarily exist in such games.", + "primary_area": "game theory and economic paradigms", + "author": "Guy Avni; Ismael Jecker; \u0110or\u0111e \u017dikeli\u0107", + "authorids": "", + "aff": "University of Haifa; University of Warsaw; Institute of Science and Technology Austria (ISTA)", + "bibtex": "@article{Avni_Jecker_\u017dikeli\u0107_2023, title={Bidding Graph Games with Partially-Observable Budgets}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25679}, DOI={10.1609/aaai.v37i5.25679}, abstractNote={Two-player zero-sum "graph games" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In "bidding games", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. In this work we initiate the study of partial-information bidding games: we study bidding games in which a player\u2019s initial budget is drawn from a known probability distribution. We show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. Specifically, we study games with "mean-payoff" objectives in combination with "poorman" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the "value" under pure strategies does not necessarily exist in such games.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Avni, Guy and Jecker, Ismael and \u017dikeli\u0107, \u0110or\u0111e}, year={2023}, month={Jun.}, pages={5464-5471} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25679/25451", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25679", + "pdf_size": 159825, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17498083728521468171&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Haifa;University of Warsaw;Institute of Science and Technology Austria", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.haifa.ac.il;https://www.uw.edu.pl;https://www.ista.ac.at", + "aff_unique_abbr": "UoH;UW;ISTA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2", + "aff_country_unique": "Israel;Poland;Austria" + }, + { + "id": "article-25193", + "title": "Bidirectional Domain Mixup for Domain Adaptive Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Mixup provides interpolated training samples and allows the model to obtain smoother decision boundaries for better generalization. The idea can be naturally applied to the domain adaptation task, where we can mix the source and target samples to obtain domain-mixed samples for better adaptation. However, the extension of the idea from classification to segmentation (i.e., structured output) is nontrivial. This paper systematically studies the impact of mixup under the domain adaptive semantic segmentation task and presents a simple yet effective mixup strategy called Bidirectional Domain Mixup (BDM). In specific, we achieve domain mixup in two-step: cut and paste. Given the warm-up model trained from any adaptation techniques, we forward the source and target samples and perform a simple threshold-based cut out of the unconfident regions (cut). After then, we fill-in the dropped regions with the other domain region patches (paste). In doing so, we jointly consider class distribution, spatial structure, and pseudo label confidence. Based on our analysis, we found that BDM leaves domain transferable regions by cutting, balances the dataset-level class distribution while preserving natural scene context by pasting. We coupled our proposal with various state-of-the-art adaptation models and observe significant improvement consistently. We also provide extensive ablation experiments to empirically verify our main components of the framework. Visit our project page with the code at https://sites.google.com/view/bidirectional-domain-mixup", + "primary_area": "computer vision i", + "author": "Daehan Kim; Minseok Seo; Kwanyong Park; Inkyu Shin; Sanghyun Woo; In So Kweon; Dong-Geol Choi", + "authorids": "", + "aff": "Hanbat National University; SI Analytics; Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST); Hanbat National University", + "bibtex": "@article{Kim_Seo_Park_Shin_Woo_Kweon_Choi_2023, title={Bidirectional Domain Mixup for Domain Adaptive Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25193}, DOI={10.1609/aaai.v37i1.25193}, abstractNote={Mixup provides interpolated training samples and allows the model to obtain smoother decision boundaries for better generalization. The idea can be naturally applied to the domain adaptation task, where we can mix the source and target samples to obtain domain-mixed samples for better adaptation. However, the extension of the idea from classification to segmentation (i.e., structured output) is nontrivial. This paper systematically studies the impact of mixup under the domain adaptive semantic segmentation task and presents a simple yet effective mixup strategy called Bidirectional Domain Mixup (BDM). In specific, we achieve domain mixup in two-step: cut and paste. Given the warm-up model trained from any adaptation techniques, we forward the source and target samples and perform a simple threshold-based cut out of the unconfident regions (cut). After then, we fill-in the dropped regions with the other domain region patches (paste). In doing so, we jointly consider class distribution, spatial structure, and pseudo label confidence. Based on our analysis, we found that BDM leaves domain transferable regions by cutting, balances the dataset-level class distribution while preserving natural scene context by pasting. We coupled our proposal with various state-of-the-art adaptation models and observe significant improvement consistently. We also provide extensive ablation experiments to empirically verify our main components of the framework. Visit our project page with the code at https://sites.google.com/view/bidirectional-domain-mixup}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Daehan and Seo, Minseok and Park, Kwanyong and Shin, Inkyu and Woo, Sanghyun and Kweon, In So and Choi, Dong-Geol}, year={2023}, month={Jun.}, pages={1114-1123} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25193/24965", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25193", + "pdf_size": 7527713, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5287781601258165757&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "edu.hanbat.ac.kr;si-analytics.ai;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;hanbat.ac.kr", + "email": "edu.hanbat.ac.kr;si-analytics.ai;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;hanbat.ac.kr", + "github": "", + "project": "https://sites.google.com/view/bidirectional-domain-mixup", + "author_num": 7, + "aff_unique_index": "0;1;2;2;2;2;0", + "aff_unique_norm": "Hanbat National University;SI Analytics;Korea Advanced Institute of Science and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.hanbat.ac.kr;;https://www.kaist.ac.kr", + "aff_unique_abbr": "HNU;;KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "article-25109", + "title": "Bidirectional Optical Flow NeRF: High Accuracy and High Quality under Fewer Views", + "track": "main", + "status": "Technical", + "abstract": "Neural Radiance Fields (NeRF) can implicitly represent 3D-consistent RGB images and geometric by optimizing an underlying continuous volumetric scene function using a sparse set of input views, which has greatly benefited view synthesis tasks. However, NeRF fails to estimate correct geometry when given fewer views, resulting in failure to synthesize novel views. Existing works rely on introducing depth images or adding depth estimation networks to resolve the problem of poor synthetic view in NeRF with fewer views. However, due to the lack of spatial consistency of the single-depth image and the poor performance of depth estimation with fewer views, the existing methods still have challenges in addressing this problem. So this paper proposes Bidirectional Optical Flow NeRF(BOF-NeRF), which addresses this problem by mining optical flow information between 2D images. Our key insight is that utilizing 2D optical flow images to design a loss can effectively guide NeRF to learn the correct geometry and synthesize the right novel view. We also propose a view-enhanced fusion method based on geometry and color consistency to solve the problem of novel view details loss in NeRF. We conduct extensive experiments on the NeRF-LLFF and DTU MVS benchmarks for novel view synthesis tasks with fewer images in different complex real scenes. We further demonstrate the robustness of BOF-NeRF under different baseline distances on the Middlebury dataset. In all cases, BOF-NeRF outperforms current state-of-the-art baselines for novel view synthesis and scene geometry estimation.", + "primary_area": "computer vision i", + "author": "Shuo Chen; Binbin Yan; Xinzhu Sang; Duo Chen; Peng Wang; Xiao Guo; Chongli Zhong; Huaming Wan", + "authorids": "", + "aff": "State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications; State Key Laboratory of Information Photonics and Optical Communications, Beijing University of Posts and Telecommunications", + "bibtex": "@article{Chen_Yan_Sang_Chen_Wang_Guo_Zhong_Wan_2023, title={Bidirectional Optical Flow NeRF: High Accuracy and High Quality under Fewer Views}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25109}, DOI={10.1609/aaai.v37i1.25109}, abstractNote={Neural Radiance Fields (NeRF) can implicitly represent 3D-consistent RGB images and geometric by optimizing an underlying continuous volumetric scene function using a sparse set of input views, which has greatly benefited view synthesis tasks. However, NeRF fails to estimate correct geometry when given fewer views, resulting in failure to synthesize novel views. Existing works rely on introducing depth images or adding depth estimation networks to resolve the problem of poor synthetic view in NeRF with fewer views. However, due to the lack of spatial consistency of the single-depth image and the poor performance of depth estimation with fewer views, the existing methods still have challenges in addressing this problem. So this paper proposes Bidirectional Optical Flow NeRF(BOF-NeRF), which addresses this problem by mining optical flow information between 2D images. Our key insight is that utilizing 2D optical flow images to design a loss can effectively guide NeRF to learn the correct geometry and synthesize the right novel view. We also propose a view-enhanced fusion method based on geometry and color consistency to solve the problem of novel view details loss in NeRF. We conduct extensive experiments on the NeRF-LLFF and DTU MVS benchmarks for novel view synthesis tasks with fewer images in different complex real scenes. We further demonstrate the robustness of BOF-NeRF under different baseline distances on the Middlebury dataset. In all cases, BOF-NeRF outperforms current state-of-the-art baselines for novel view synthesis and scene geometry estimation.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Shuo and Yan, Binbin and Sang, Xinzhu and Chen, Duo and Wang, Peng and Guo, Xiao and Zhong, Chongli and Wan, Huaming}, year={2023}, month={Jun.}, pages={359-368} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25109/24881", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25109", + "pdf_size": 18154151, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10798177747775739637&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory of Information Photonics and Optical Communications", + "aff_unique_url": "http://www.bupt.edu.cn", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26119", + "title": "Bilinear Exponential Family of MDPs: Frequentist Regret Bound with Tractable Exploration & Planning", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of episodic reinforcement learning in continuous state-action spaces with unknown rewards and transitions. Specifically, we consider the setting where the rewards and transitions are modeled using parametric bilinear exponential families. We propose an algorithm, that a) uses penalized maximum likelihood estimators to learn the unknown parameters, b) injects a calibrated Gaussian noise in the parameter of rewards to ensure exploration, and c) leverages linearity of the bilinear exponential family transitions with respect to an underlying RKHS to perform tractable planning. We provide a frequentist regret upper-bound for our algorithm which, in the case of tabular MDPs, is order-optimal with respect to H and K, where H is the episode length and K is the number of episodes. Our analysis improves the existing bounds for the bilinear exponential family of MDPs by square root of H and removes the handcrafted clipping deployed in existing RLSVI-type algorithms.", + "primary_area": "machine learning iii", + "author": "Reda Ouhamma; Debabrota Basu; Odalric Maillard", + "authorids": "", + "aff": "Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9189 - CRIStAL, F-59000; Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9189 - CRIStAL, F-59000; Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9189 - CRIStAL, F-59000", + "bibtex": "@article{Ouhamma_Basu_Maillard_2023, title={Bilinear Exponential Family of MDPs: Frequentist Regret Bound with Tractable Exploration & Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26119}, DOI={10.1609/aaai.v37i8.26119}, abstractNote={We study the problem of episodic reinforcement learning in continuous state-action spaces with unknown rewards and transitions. Specifically, we consider the setting where the rewards and transitions are modeled using parametric bilinear exponential families. We propose an algorithm, that a) uses penalized maximum likelihood estimators to learn the unknown parameters, b) injects a calibrated Gaussian noise in the parameter of rewards to ensure exploration, and c) leverages linearity of the bilinear exponential family transitions with respect to an underlying RKHS to perform tractable planning. We provide a frequentist regret upper-bound for our algorithm which, in the case of tabular MDPs, is order-optimal with respect to H and K, where H is the episode length and K is the number of episodes. Our analysis improves the existing bounds for the bilinear exponential family of MDPs by square root of H and removes the handcrafted clipping deployed in existing RLSVI-type algorithms.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ouhamma, Reda and Basu, Debabrota and Maillard, Odalric}, year={2023}, month={Jun.}, pages={9336-9344} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26119/25891", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26119", + "pdf_size": 196322, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12765293624561471479&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 16, + "aff_domain": "gmail.com;inria.fr;inria.fr", + "email": "gmail.com;inria.fr;inria.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Lille", + "aff_unique_dep": "UMR 9189 - CRIStAL", + "aff_unique_url": "https://www.univ-lille.fr", + "aff_unique_abbr": "Univ. Lille", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-25896", + "title": "Black-Box Adversarial Attack on Time Series Classification", + "track": "main", + "status": "Technical", + "abstract": "With the increasing use of deep neural network (DNN) in time series classification (TSC), recent work reveals the threat of adversarial attack, where the adversary can construct adversarial examples to cause model mistakes. However, existing researches on the adversarial attack of TSC typically adopt an unrealistic white-box setting with model details transparent to the adversary. In this work, we study a more rigorous black-box setting with attack detection applied, which restricts gradient access and requires the adversarial example to be also stealthy. Theoretical analyses reveal that the key lies in: estimating black-box gradient with diversity and non-convexity of TSC models resolved, and restricting the l0 norm of the perturbation to construct adversarial samples. Towards this end, we propose a new framework named BlackTreeS, which solves the hard optimization issue for adversarial example construction with two simple yet effective modules. In particular, we propose a tree search strategy to find influential positions in a sequence, and independently estimate the black-box gradients for these positions. Extensive experiments on three real-world TSC datasets and five DNN based models validate the effectiveness of BlackTreeS, e.g., it improves the attack success rate from 19.3% to 27.3%, and decreases the detection success rate from 90.9% to 6.8% for LSTM on the UWave dataset.", + "primary_area": "machine learning i", + "author": "Daizong Ding; Mi Zhang; Fuli Feng; Yuanmin Huang; Erling Jiang; Min Yang", + "authorids": "", + "aff": "School of Computer Science, Fudan University, China; School of Computer Science, Fudan University, China; University of Science and Technology of China; School of Computer Science, Fudan University, China; School of Computer Science, Fudan University, China; School of Computer Science, Fudan University, China", + "bibtex": "@article{Ding_Zhang_Feng_Huang_Jiang_Yang_2023, title={Black-Box Adversarial Attack on Time Series Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25896}, DOI={10.1609/aaai.v37i6.25896}, abstractNote={With the increasing use of deep neural network (DNN) in time series classification (TSC), recent work reveals the threat of adversarial attack, where the adversary can construct adversarial examples to cause model mistakes. However, existing researches on the adversarial attack of TSC typically adopt an unrealistic white-box setting with model details transparent to the adversary. In this work, we study a more rigorous black-box setting with attack detection applied, which restricts gradient access and requires the adversarial example to be also stealthy. Theoretical analyses reveal that the key lies in: estimating black-box gradient with diversity and non-convexity of TSC models resolved, and restricting the l0 norm of the perturbation to construct adversarial samples. Towards this end, we propose a new framework named BlackTreeS, which solves the hard optimization issue for adversarial example construction with two simple yet effective modules. In particular, we propose a tree search strategy to find influential positions in a sequence, and independently estimate the black-box gradients for these positions. Extensive experiments on three real-world TSC datasets and five DNN based models validate the effectiveness of BlackTreeS, e.g., it improves the attack success rate from 19.3% to 27.3%, and decreases the detection success rate from 90.9% to 6.8% for LSTM on the UWave dataset.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Daizong and Zhang, Mi and Feng, Fuli and Huang, Yuanmin and Jiang, Erling and Yang, Min}, year={2023}, month={Jun.}, pages={7358-7368} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25896/25668", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25896", + "pdf_size": 822873, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15205694820929392384&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;gmail.com;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;gmail.com;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Fudan University;University of Science and Technology of China", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.fudan.edu.cn;http://www.ustc.edu.cn", + "aff_unique_abbr": "Fudan;USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26835", + "title": "Blending Advertising with Organic Content in E-commerce via Virtual Bids", + "track": "iaai technical track", + "status": "Technical", + "abstract": "It has become increasingly common that sponsored content (i.e., paid ads) and non-sponsored content are jointly displayed to users, especially on e-commerce platforms. Thus, both of these contents may interact together to influence their engagement behaviors. In general, sponsored content helps brands achieve their marketing goals and provides ad revenue to the platforms. In contrast, non-sponsored content contributes to the long-term health of the platform through increasing users' engagement. A key conundrum to platforms is learning how to blend both of these contents allowing their interactions to be considered and balancing these business objectives. This paper proposes a system built for this purpose and applied to product detail pages of JD.COM, an e-commerce company. This system achieves three objectives: (a) Optimization of competing business objectives via Virtual Bids allowing the expressiveness of the valuation of the platform for these objectives. (b) Modeling the users' click behaviors considering explicitly the influence exerted by the sponsored and non-sponsored content displayed alongside through a deep learning approach. (c) Consideration of a Vickrey-Clarke-Groves (VCG) Auction design compatible with the allocation of ads and its induced externalities. Experiments are presented demonstrating the performance of the proposed system. Moreover, our approach is fully deployed and serves all traffic through JD.COM's mobile application.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Carlos Carrion; Zenan Wang; Harikesh Nair; Xianghong Luo; Yulin Lei; Peiqin Gu; Xiliang Lin; Wenlong Chen; Junsheng Jin; Fanan Zhu; Changping Peng; Yongjun Bao; Zhangang Lin; Weipeng Yan; Jingping Shao", + "authorids": "", + "aff": "JD.COM; Stanford University; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM; JD.COM", + "bibtex": "@article{Carrion_Wang_Nair_Luo_Lei_Gu_Lin_Chen_Jin_Zhu_Peng_Bao_Lin_Yan_Shao_2024, title={Blending Advertising with Organic Content in E-commerce via Virtual Bids}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26835}, DOI={10.1609/aaai.v37i13.26835}, abstractNote={It has become increasingly common that sponsored content (i.e., paid ads) and non-sponsored content are jointly displayed to users, especially on e-commerce platforms. Thus, both of these contents may interact together to influence their engagement behaviors. In general, sponsored content helps brands achieve their marketing goals and provides ad revenue to the platforms. In contrast, non-sponsored content contributes to the long-term health of the platform through increasing users\u2019 engagement. A key conundrum to platforms is learning how to blend both of these contents allowing their interactions to be considered and balancing these business objectives. This paper proposes a system built for this purpose and applied to product detail pages of JD.COM, an e-commerce company. This system achieves three objectives: (a) Optimization of competing business objectives via Virtual Bids allowing the expressiveness of the valuation of the platform for these objectives. (b) Modeling the users\u2019 click behaviors considering explicitly the influence exerted by the sponsored and non-sponsored content displayed alongside through a deep learning approach. (c) Consideration of a Vickrey-Clarke-Groves (VCG) Auction design compatible with the allocation of ads and its induced externalities. Experiments are presented demonstrating the performance of the proposed system. Moreover, our approach is fully deployed and serves all traffic through JD.COM\u2019s mobile application.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carrion, Carlos and Wang, Zenan and Nair, Harikesh and Luo, Xianghong and Lei, Yulin and Gu, Peiqin and Lin, Xiliang and Chen, Wenlong and Jin, Junsheng and Zhu, Fanan and Peng, Changping and Bao, Yongjun and Lin, Zhangang and Yan, Weipeng and Shao, Jingping}, year={2024}, month={Jul.}, pages={15476-15484} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26835/26607", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26835", + "pdf_size": 7188869, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17405469768780623941&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "jd.com;jd.com;stanford.edu;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com", + "email": "jd.com;jd.com;stanford.edu;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com;jd.com", + "github": "", + "project": "", + "author_num": 15, + "aff_unique_index": "0;1;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "JD.com;Stanford University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.jd.com;https://www.stanford.edu", + "aff_unique_abbr": "JD;Stanford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;1;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26302", + "title": "Boosted Dynamic Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Early-exiting dynamic neural networks (EDNN), as one type of dynamic neural networks, has been widely studied recently. A typical EDNN has multiple prediction heads at different layers of the network backbone. During inference, the model will exit at either the last prediction head or an intermediate prediction head where the prediction confidence is higher than a predefined threshold. To optimize the model, these prediction heads together with the network backbone are trained on every batch of training data. This brings a train-test mismatch problem that all the prediction heads are optimized on all types of data in training phase while the deeper heads will only see difficult inputs in testing phase. Treating training and testing inputs differently at the two phases will cause the mismatch between training and testing data distributions. To mitigate this problem, we formulate an EDNN as an additive model inspired by gradient boosting, and propose multiple training techniques to optimize the model effectively. We name our method BoostNet. Our experiments show it achieves the state-of-the-art performance on CIFAR100 and ImageNet datasets in both anytime and budgeted-batch prediction modes. Our code is released at https://github.com/SHI-Labs/Boosted-Dynamic-Networks.", + "primary_area": "machine learning iv", + "author": "Haichao Yu; Haoxiang Li; Gang Hua; Gao Huang; Humphrey Shi", + "authorids": "", + "aff": "University of Illinois Urbana-Champaign; Wormpex AI Research; Wormpex AI Research; Tsinghua University; University of Illinois Urbana-Champaign+University of Oregon", + "bibtex": "@article{Yu_Li_Hua_Huang_Shi_2023, title={Boosted Dynamic Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26302}, DOI={10.1609/aaai.v37i9.26302}, abstractNote={Early-exiting dynamic neural networks (EDNN), as one type of dynamic neural networks, has been widely studied recently. A typical EDNN has multiple prediction heads at different layers of the network backbone. During inference, the model will exit at either the last prediction head or an intermediate prediction head where the prediction confidence is higher than a predefined threshold. To optimize the model, these prediction heads together with the network backbone are trained on every batch of training data. This brings a train-test mismatch problem that all the prediction heads are optimized on all types of data in training phase while the deeper heads will only see difficult inputs in testing phase. Treating training and testing inputs differently at the two phases will cause the mismatch between training and testing data distributions. To mitigate this problem, we formulate an EDNN as an additive model inspired by gradient boosting, and propose multiple training techniques to optimize the model effectively. We name our method BoostNet. Our experiments show it achieves the state-of-the-art performance on CIFAR100 and ImageNet datasets in both anytime and budgeted-batch prediction modes. Our code is released at https://github.com/SHI-Labs/Boosted-Dynamic-Networks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Haichao and Li, Haoxiang and Hua, Gang and Huang, Gao and Shi, Humphrey}, year={2023}, month={Jun.}, pages={10989-10997} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26302/26074", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26302", + "pdf_size": 826586, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17128908816863061400&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "illinois.edu; ; ; ;gmail.com", + "email": "illinois.edu; ; ; ;gmail.com", + "github": "https://github.com/SHI-Labs/Boosted-Dynamic-Networks", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;0+3", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Wormpex AI Research;Tsinghua University;University of Oregon", + "aff_unique_dep": ";AI Research;;", + "aff_unique_url": "https://illinois.edu;;https://www.tsinghua.edu.cn;https://www.uoregon.edu", + "aff_unique_abbr": "UIUC;Wormpex AI;THU;UO", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;1;0+0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26552", + "title": "Boosting Few-Shot Text Classification via Distribution Estimation", + "track": "main", + "status": "Technical", + "abstract": "Distribution estimation has been demonstrated as one of the most effective approaches in dealing with few-shot image classification, as the low-level patterns and underlying representations can be easily transferred across different tasks in computer vision domain. However, directly applying this approach to few-shot text classification is challenging, since leveraging the statistics of known classes with sufficient samples to calibrate the distributions of novel classes may cause negative effects due to serious category difference in text domain. To alleviate this issue, we propose two simple yet effective strategies to estimate the distributions of the novel classes by utilizing unlabeled query samples, thus avoiding the potential negative transfer issue. Specifically, we first assume a class or sample follows the Gaussian distribution, and use the original support set and the nearest few query samples to estimate the corresponding mean and covariance. Then, we augment the labeled samples by sampling from the estimated distribution, which can provide sufficient supervision for training the classification model. Extensive experiments on eight few-shot text classification datasets show that the proposed method outperforms state-of-the-art baselines significantly.", + "primary_area": "speech natural language processing", + "author": "Han Liu; Feng Zhang; Xiaotong Zhang; Siyang Zhao; Fenglong Ma; Xiao-Ming Wu; Hongyang Chen; Hong Yu; Xianchao Zhang", + "authorids": "", + "aff": "Dalian University of Technology; Peking University; Dalian University of Technology; Dalian University of Technology; The Pennsylvania State University; The Hong Kong Polytechnic University; Zhejiang Lab; Dalian University of Technology; Dalian University of Technology", + "bibtex": "@article{Liu_Zhang_Zhang_Zhao_Ma_Wu_Chen_Yu_Zhang_2023, title={Boosting Few-Shot Text Classification via Distribution Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26552}, DOI={10.1609/aaai.v37i11.26552}, abstractNote={Distribution estimation has been demonstrated as one of the most effective approaches in dealing with few-shot image classification, as the low-level patterns and underlying representations can be easily transferred across different tasks in computer vision domain. However, directly applying this approach to few-shot text classification is challenging, since leveraging the statistics of known classes with sufficient samples to calibrate the distributions of novel classes may cause negative effects due to serious category difference in text domain. To alleviate this issue, we propose two simple yet effective strategies to estimate the distributions of the novel classes by utilizing unlabeled query samples, thus avoiding the potential negative transfer issue. Specifically, we first assume a class or sample follows the Gaussian distribution, and use the original support set and the nearest few query samples to estimate the corresponding mean and covariance. Then, we augment the labeled samples by sampling from the estimated distribution, which can provide sufficient supervision for training the classification model. Extensive experiments on eight few-shot text classification datasets show that the proposed method outperforms state-of-the-art baselines significantly.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Han and Zhang, Feng and Zhang, Xiaotong and Zhao, Siyang and Ma, Fenglong and Wu, Xiao-Ming and Chen, Hongyang and Yu, Hong and Zhang, Xianchao}, year={2023}, month={Jun.}, pages={13219-13227} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26552/26324", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26552", + "pdf_size": 3598445, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18272118784095925472&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com;hotmail.com;mail.dlut.edu.cn;psu.edu;comp.polyu.edu.hk;ieee.org;dlut.edu.cn;dlut.edu.cn", + "email": "gmail.com;gmail.com;hotmail.com;mail.dlut.edu.cn;psu.edu;comp.polyu.edu.hk;ieee.org;dlut.edu.cn;dlut.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;0;2;3;4;0;0", + "aff_unique_norm": "Dalian University of Technology;Peking University;The Pennsylvania State University;The Hong Kong Polytechnic University;Zhejiang Lab", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.dlut.edu.cn/;http://www.pku.edu.cn;https://www.psu.edu;https://www.polyu.edu.hk;http://www.zhejianglab.com", + "aff_unique_abbr": "DUT;Peking U;PSU;PolyU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25944", + "title": "Boosting Graph Neural Networks via Adaptive Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) have shown remarkable performance on diverse graph mining tasks. While sharing the same message passing framework, our study shows that different GNNs learn distinct knowledge from the same graph. This implies potential performance improvement by distilling the complementary knowledge from multiple models. However, knowledge distillation (KD) transfers knowledge from high-capacity teachers to a lightweight student, which deviates from our scenario: GNNs are often shallow. To transfer knowledge effectively, we need to tackle two challenges: how to transfer knowledge from compact teachers to a student with the same capacity; and, how to exploit student GNN's own learning ability. In this paper, we propose a novel adaptive KD framework, called BGNN, which sequentially transfers knowledge from multiple GNNs into a student GNN. We also introduce an adaptive temperature module and a weight boosting module. These modules guide the student to the appropriate knowledge for effective learning. Extensive experiments have demonstrated the effectiveness of BGNN. In particular, we achieve up to 3.05% improvement for node classification and 6.35% improvement for graph classification over vanilla GNNs.", + "primary_area": "machine learning i", + "author": "Zhichun Guo; Chunhui Zhang; Yujie Fan; Yijun Tian; Chuxu Zhang; Nitesh V. Chawla", + "authorids": "", + "aff": "University of Notre Dame; Brandeis University; Case Western Reserve University; University of Notre Dame; Brandeis University; University of Notre Dame", + "bibtex": "@article{Guo_Zhang_Fan_Tian_Zhang_Chawla_2023, title={Boosting Graph Neural Networks via Adaptive Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25944}, DOI={10.1609/aaai.v37i6.25944}, abstractNote={Graph neural networks (GNNs) have shown remarkable performance on diverse graph mining tasks. While sharing the same message passing framework, our study shows that different GNNs learn distinct knowledge from the same graph. This implies potential performance improvement by distilling the complementary knowledge from multiple models. However, knowledge distillation (KD) transfers knowledge from high-capacity teachers to a lightweight student, which deviates from our scenario: GNNs are often shallow. To transfer knowledge effectively, we need to tackle two challenges: how to transfer knowledge from compact teachers to a student with the same capacity; and, how to exploit student GNN\u2019s own learning ability. In this paper, we propose a novel adaptive KD framework, called BGNN, which sequentially transfers knowledge from multiple GNNs into a student GNN. We also introduce an adaptive temperature module and a weight boosting module. These modules guide the student to the appropriate knowledge for effective learning. Extensive experiments have demonstrated the effectiveness of BGNN. In particular, we achieve up to 3.05% improvement for node classification and 6.35% improvement for graph classification over vanilla GNNs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Zhichun and Zhang, Chunhui and Fan, Yujie and Tian, Yijun and Zhang, Chuxu and Chawla, Nitesh V.}, year={2023}, month={Jun.}, pages={7793-7801} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25944/25716", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25944", + "pdf_size": 560618, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16793839963992670449&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 9, + "aff_domain": "nd.edu;brandeis.edu;case.edu;nd.edu;brandeis.edu;nd.edu", + "email": "nd.edu;brandeis.edu;case.edu;nd.edu;brandeis.edu;nd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;1;0", + "aff_unique_norm": "University of Notre Dame;Brandeis University;Case Western Reserve University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nd.edu;https://www.brandeis.edu;https://www.case.edu", + "aff_unique_abbr": "Notre Dame;Brandeis;CWRU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25175", + "title": "Boosting Point Clouds Rendering via Radiance Mapping", + "track": "main", + "status": "Technical", + "abstract": "Recent years we have witnessed rapid development in NeRF-based image rendering due to its high quality. However, point clouds rendering is somehow less explored. Compared to NeRF-based rendering which suffers from dense spatial sampling, point clouds rendering is naturally less computation intensive, which enables its deployment in mobile computing device. In this work, we focus on boosting the image quality of point clouds rendering with a compact model design. We first analyze the adaption of the volume rendering formulation on point clouds. Based on the analysis, we simplify the NeRF representation to a spatial mapping function which only requires single evaluation per pixel. Further, motivated by ray marching, we rectify the the noisy raw point clouds to the estimated intersection between rays and surfaces as queried coordinates, which could avoid spatial frequency collapse and neighbor point disturbance. Composed of rasterization, spatial mapping and the refinement stages, our method achieves the state-of-the-art performance on point clouds rendering, outperforming prior works by notable margins, with a smaller model size. We obtain a PSNR of 31.74 on NeRF-Synthetic, 25.88 on ScanNet and 30.81 on DTU. Code and data are publicly available in https://github.com/seanywang0408/RadianceMapping.", + "primary_area": "computer vision i", + "author": "Xiaoyang Huang; Yi Zhang; Bingbing Ni; Teng Li; Kai Chen; Wenjun Zhang", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University+Shanghai AI Lab; Anhui University; Shanghai AI Lab; Shanghai Jiao Tong University", + "bibtex": "@article{Huang_Zhang_Ni_Li_Chen_Zhang_2023, title={Boosting Point Clouds Rendering via Radiance Mapping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25175}, DOI={10.1609/aaai.v37i1.25175}, abstractNote={Recent years we have witnessed rapid development in NeRF-based image rendering due to its high quality. However, point clouds rendering is somehow less explored. Compared to NeRF-based rendering which suffers from dense spatial sampling, point clouds rendering is naturally less computation intensive, which enables its deployment in mobile computing device. In this work, we focus on boosting the image quality of point clouds rendering with a compact model design. We first analyze the adaption of the volume rendering formulation on point clouds. Based on the analysis, we simplify the NeRF representation to a spatial mapping function which only requires single evaluation per pixel. Further, motivated by ray marching, we rectify the the noisy raw point clouds to the estimated intersection between rays and surfaces as queried coordinates, which could avoid spatial frequency collapse and neighbor point disturbance. Composed of rasterization, spatial mapping and the refinement stages, our method achieves the state-of-the-art performance on point clouds rendering, outperforming prior works by notable margins, with a smaller model size. We obtain a PSNR of 31.74 on NeRF-Synthetic, 25.88 on ScanNet and 30.81 on DTU. Code and data are publicly available in https://github.com/seanywang0408/RadianceMapping.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Xiaoyang and Zhang, Yi and Ni, Bingbing and Li, Teng and Chen, Kai and Zhang, Wenjun}, year={2023}, month={Jun.}, pages={953-961} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25175/24947", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25175", + "pdf_size": 820768, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17194866340184624160&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn; ; ; ", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn; ; ; ", + "github": "https://github.com/seanywang0408/RadianceMapping", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1;2;1;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Shanghai AI Lab;Anhui University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.shanghaiailab.com;http://www.ahu.edu.cn/", + "aff_unique_abbr": "SJTU;SAIL;AHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25396", + "title": "Boosting Semi-Supervised Semantic Segmentation with Probabilistic Representations", + "track": "main", + "status": "Technical", + "abstract": "Recent breakthroughs in semi-supervised semantic segmentation have been developed through contrastive learning. In prevalent pixel-wise contrastive learning solutions, the model maps pixels to deterministic representations and regularizes them in the latent space. However, there exist inaccurate pseudo-labels which map the ambiguous representations of pixels to the wrong classes due to the limited cognitive ability of the model. In this paper, we define pixel-wise representations from a new perspective of probability theory and propose a Probabilistic Representation Contrastive Learning (PRCL) framework that improves representation quality by taking its probability into consideration. Through modelling the mapping from pixels to representations as the probability via multivariate Gaussian distributions, we can tune the contribution of the ambiguous representations to tolerate the risk of inaccurate pseudo-labels. Furthermore, we define prototypes in the form of distributions, which indicates the confidence of a class, while the point prototype cannot. More- over, we propose to regularize the distribution variance to enhance the reliability of representations. Taking advantage of these benefits, high-quality feature representations can be derived in the latent space, thereby the performance of se- mantic segmentation can be further improved. We conduct sufficient experiment to evaluate PRCL on Pascal VOC and CityScapes to demonstrate its superiority. The code is available at https://github.com/Haoyu-Xie/PRCL.", + "primary_area": "computer vision iii", + "author": "Haoyu Xie; Changqi Wang; Mingkai Zheng; Minjing Dong; Shan You; Chong Fu; Chang Xu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science, Faculty of Engineer, The University of Sydney, Sydney, Australia; School of Computer Science, Faculty of Engineer, The University of Sydney, Sydney, Australia; SenseTime Research, Beijing, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science, Faculty of Engineer, The University of Sydney, Sydney, Australia", + "bibtex": "@article{Xie_Wang_Zheng_Dong_You_Fu_Xu_2023, title={Boosting Semi-Supervised Semantic Segmentation with Probabilistic Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25396}, DOI={10.1609/aaai.v37i3.25396}, abstractNote={Recent breakthroughs in semi-supervised semantic segmentation have been developed through contrastive learning. In prevalent pixel-wise contrastive learning solutions, the model maps pixels to deterministic representations and regularizes them in the latent space. However, there exist inaccurate pseudo-labels which map the ambiguous representations of pixels to the wrong classes due to the limited cognitive ability of the model. In this paper, we define pixel-wise representations from a new perspective of probability theory and propose a Probabilistic Representation Contrastive Learning (PRCL) framework that improves representation quality by taking its probability into consideration. Through modelling the mapping from pixels to representations as the probability via multivariate Gaussian distributions, we can tune the contribution of the ambiguous representations to tolerate the risk of inaccurate pseudo-labels. Furthermore, we define prototypes in the form of distributions, which indicates the confidence of a class, while the point prototype cannot. More- over, we propose to regularize the distribution variance to enhance the reliability of representations. Taking advantage of these benefits, high-quality feature representations can be derived in the latent space, thereby the performance of se- mantic segmentation can be further improved. We conduct sufficient experiment to evaluate PRCL on Pascal VOC and CityScapes to demonstrate its superiority. The code is available at https://github.com/Haoyu-Xie/PRCL.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Haoyu and Wang, Changqi and Zheng, Mingkai and Dong, Minjing and You, Shan and Fu, Chong and Xu, Chang}, year={2023}, month={Jun.}, pages={2938-2946} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25396/25168", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25396", + "pdf_size": 5211554, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10604164639618341876&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "stumail.neu.edu.cn;stu.neu.edu.cn;outlook.com;uni.sydney.edu.au;sensetime.com;mail.neu.edu.cn;sydney.edu.au", + "email": "stumail.neu.edu.cn;stu.neu.edu.cn;outlook.com;uni.sydney.edu.au;sensetime.com;mail.neu.edu.cn;sydney.edu.au", + "github": "https://github.com/Haoyu-Xie/PRCL", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;2;0;1", + "aff_unique_norm": "Northeastern University;The University of Sydney;SenseTime Research", + "aff_unique_dep": "School of Computer Science and Engineering;School of Computer Science;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.sydney.edu.au;https://www.sensetime.com", + "aff_unique_abbr": "NEU;USYD;SenseTime", + "aff_campus_unique_index": "0;0;1;1;2;0;1", + "aff_campus_unique": "Shenyang;Sydney;Beijing", + "aff_country_unique_index": "0;0;1;1;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25670", + "title": "Bootstrapping Multi-View Representations for Fake News Detection", + "track": "main", + "status": "Technical", + "abstract": "Previous researches on multimedia fake news detection include a series of complex feature extraction and fusion networks to gather useful information from the news. However, how cross-modal consistency relates to the fidelity of news and how features from different modalities affect the decision-making are still open questions. This paper presents a novel scheme of Bootstrapping Multi-view Representations (BMR) for fake news detection. Given a multi-modal news, we extract representations respectively from the views of the text, the image pattern and the image semantics. Improved Multi-gate Mixture-of-Expert networks (iMMoE) are proposed for feature refinement and fusion. Representations from each view are separately used to coarsely predict the fidelity of the whole news, and the multimodal representations are able to predict the cross-modal consistency. With the prediction scores, we reweigh each view of the representations and bootstrap them for fake news detection. Extensive experiments conducted on typical fake news detection datasets prove that BMR outperforms state-of-the-art schemes.", + "primary_area": "domain s of application", + "author": "Qichao Ying; Xiaoxiao Hu; Yangming Zhou; Zhenxing Qian; Dan Zeng; Shiming Ge", + "authorids": "", + "aff": "Fudan University; Fudan University; Fudan University; Fudan University; Shanghai University; Chinese Academy of Sciences", + "bibtex": "@article{Ying_Hu_Zhou_Qian_Zeng_Ge_2023, title={Bootstrapping Multi-View Representations for Fake News Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25670}, DOI={10.1609/aaai.v37i4.25670}, abstractNote={Previous researches on multimedia fake news detection include a series of complex feature extraction and fusion networks to gather useful information from the news. However, how cross-modal consistency relates to the fidelity of news and how features from different modalities affect the decision-making are still open questions. This paper presents a novel scheme of Bootstrapping Multi-view Representations (BMR) for fake news detection. Given a multi-modal news, we extract representations respectively from the views of the text, the image pattern and the image semantics. Improved Multi-gate Mixture-of-Expert networks (iMMoE) are proposed for feature refinement and fusion. Representations from each view are separately used to coarsely predict the fidelity of the whole news, and the multimodal representations are able to predict the cross-modal consistency. With the prediction scores, we reweigh each view of the representations and bootstrap them for fake news detection. Extensive experiments conducted on typical fake news detection datasets prove that BMR outperforms state-of-the-art schemes.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ying, Qichao and Hu, Xiaoxiao and Zhou, Yangming and Qian, Zhenxing and Zeng, Dan and Ge, Shiming}, year={2023}, month={Jun.}, pages={5384-5392} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25670/25442", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25670", + "pdf_size": 1849603, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12070243421574332131&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;shu.edu.cn;iie.ac.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;shu.edu.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "Fudan University;Shanghai University;Chinese Academy of Sciences", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.shu.edu.cn;https://www.cas.cn", + "aff_unique_abbr": "Fudan;SHU;CAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26092", + "title": "Boundary Graph Neural Networks for 3D Simulations", + "track": "main", + "status": "Technical", + "abstract": "The abundance of data has given machine learning considerable momentum in natural sciences and engineering, though modeling of physical processes is often difficult. A particularly tough problem is the efficient representation of geometric boundaries. Triangularized geometric boundaries are well understood and ubiquitous in engineering applications. However, it is notoriously difficult to integrate them into machine learning approaches due to their heterogeneity with respect to size and orientation. In this work, we introduce an effective theory to model particle-boundary interactions, which leads to our new Boundary Graph Neural Networks (BGNNs) that dynamically modify graph structures to obey boundary conditions. The new BGNNs are tested on complex 3D granular flow processes of hoppers, rotating drums and mixers, which are all standard components of modern industrial machinery but still have complicated geometry. BGNNs are evaluated in terms of computational efficiency as well as prediction accuracy of particle flows and mixing entropies. BGNNs are able to accurately reproduce 3D granular flows within simulation uncertainties over hundreds of thousands of simulation timesteps. Most notably, in our experiments, particles stay within the geometric objects without using handcrafted conditions or restrictions.", + "primary_area": "machine learning iii", + "author": "Andreas Mayr; Sebastian Lehner; Arno Mayrhofer; Christoph Kloss; Sepp Hochreiter; Johannes Brandstetter", + "authorids": "", + "aff": "ELLIS Unit Linz & LIT AI Lab, Johannes Kepler University Linz, Linz, Austria; ELLIS Unit Linz & LIT AI Lab, Johannes Kepler University Linz, Linz, Austria; DCS Computing GmbH, Linz, Austria; DCS Computing GmbH, Linz, Austria; ELLIS Unit Linz & LIT AI Lab, Johannes Kepler University Linz, Linz, Austria+Institute of Advanced Research in Artificial Intelligence (IARAI), Vienna, Austria; ELLIS Unit Linz & LIT AI Lab, Johannes Kepler University Linz, Linz, Austria+Microsoft Research AI4Science", + "bibtex": "@article{Mayr_Lehner_Mayrhofer_Kloss_Hochreiter_Brandstetter_2023, title={Boundary Graph Neural Networks for 3D Simulations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26092}, DOI={10.1609/aaai.v37i8.26092}, abstractNote={The abundance of data has given machine learning considerable momentum in natural sciences and engineering, though modeling of physical processes is often difficult. A particularly tough problem is the efficient representation of geometric boundaries. Triangularized geometric boundaries are well understood and ubiquitous in engineering applications. However, it is notoriously difficult to integrate them into machine learning approaches due to their heterogeneity with respect to size and orientation. In this work, we introduce an effective theory to model particle-boundary interactions, which leads to our new Boundary Graph Neural Networks (BGNNs) that dynamically modify graph structures to obey boundary conditions. The new BGNNs are tested on complex 3D granular flow processes of hoppers, rotating drums and mixers, which are all standard components of modern industrial machinery but still have complicated geometry. BGNNs are evaluated in terms of computational efficiency as well as prediction accuracy of particle flows and mixing entropies. BGNNs are able to accurately reproduce 3D granular flows within simulation uncertainties over hundreds of thousands of simulation timesteps. Most notably, in our experiments, particles stay within the geometric objects without using handcrafted conditions or restrictions.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mayr, Andreas and Lehner, Sebastian and Mayrhofer, Arno and Kloss, Christoph and Hochreiter, Sepp and Brandstetter, Johannes}, year={2023}, month={Jun.}, pages={9099-9107} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26092/25864", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26092", + "pdf_size": 3082852, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=801640499274385022&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "ml.jku.at; ; ; ; ;ml.jku.at", + "email": "ml.jku.at; ; ; ; ;ml.jku.at", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;0+2;0+3", + "aff_unique_norm": "Johannes Kepler University Linz;DCS Computing GmbH;Institute of Advanced Research in Artificial Intelligence;Microsoft Research", + "aff_unique_dep": "ELLIS Unit Linz & LIT AI Lab;;;AI4Science", + "aff_unique_url": "https://www.jku.at;;;https://www.microsoft.com/en-us/research/group/ai4science", + "aff_unique_abbr": "JKU;;IARAI;Microsoft Research AI4Science", + "aff_campus_unique_index": "0;0;0+2;0", + "aff_campus_unique": "Linz;;Vienna", + "aff_country_unique_index": "0;0;0;0;0+0;0+1", + "aff_country_unique": "Austria;United States" + }, + { + "id": "article-25274", + "title": "Breaking Immutable: Information-Coupled Prototype Elaboration for Few-Shot Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Few-shot object detection, expecting detectors to detect novel classes with a few instances, has made conspicuous progress. However, the prototypes extracted by existing meta-learning based methods still suffer from insufficient representative information and lack awareness of query images, which cannot be adaptively tailored to different query images. Firstly, only the support images are involved for extracting prototypes, resulting in scarce perceptual information of query images. Secondly, all pixels of all support images are treated equally when aggregating features into prototype vectors, thus the salient objects are overwhelmed by the cluttered background. In this paper, we propose an Information-Coupled Prototype Elaboration (ICPE) method to generate specific and representative prototypes for each query image. Concretely, a conditional information coupling module is introduced to couple information from the query branch to the support branch, strengthening the query-perceptual information in support features. Besides, we design a prototype dynamic aggregation module that dynamically adjusts intra-image and inter-image aggregation weights to highlight the salient information useful for detecting query images. Experimental results on both Pascal VOC and MS COCO demonstrate that our method achieves state-of-the-art performance in almost all settings. Code will be available at: https://github.com/lxn96/ICPE.", + "primary_area": "computer vision ii", + "author": "Xiaonan Lu; Wenhui Diao; Yongqiang Mao; Junxi Li; Peijin Wang; Xian Sun; Kun Fu", + "authorids": "", + "aff": "Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences+Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute+University of Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences", + "bibtex": "@article{Lu_Diao_Mao_Li_Wang_Sun_Fu_2023, title={Breaking Immutable: Information-Coupled Prototype Elaboration for Few-Shot Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25274}, DOI={10.1609/aaai.v37i2.25274}, abstractNote={Few-shot object detection, expecting detectors to detect novel classes with a few instances, has made conspicuous progress. However, the prototypes extracted by existing meta-learning based methods still suffer from insufficient representative information and lack awareness of query images, which cannot be adaptively tailored to different query images. Firstly, only the support images are involved for extracting prototypes, resulting in scarce perceptual information of query images. Secondly, all pixels of all support images are treated equally when aggregating features into prototype vectors, thus the salient objects are overwhelmed by the cluttered background. In this paper, we propose an Information-Coupled Prototype Elaboration (ICPE) method to generate specific and representative prototypes for each query image. Concretely, a conditional information coupling module is introduced to couple information from the query branch to the support branch, strengthening the query-perceptual information in support features. Besides, we design a prototype dynamic aggregation module that dynamically adjusts intra-image and inter-image aggregation weights to highlight the salient information useful for detecting query images. Experimental results on both Pascal VOC and MS COCO demonstrate that our method achieves state-of-the-art performance in almost all settings. Code will be available at: https://github.com/lxn96/ICPE.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Xiaonan and Diao, Wenhui and Mao, Yongqiang and Li, Junxi and Wang, Peijin and Sun, Xian and Fu, Kun}, year={2023}, month={Jun.}, pages={1844-1852} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25274/25046", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25274", + "pdf_size": 2079980, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12396200407303986955&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.ucas.ac.cn;aircas.ac.cn;mails.ucas.ac.cn;mails.ucas.ac.cn;mails.ucas.ac.cn;aircas.ac.cn;gmail.com", + "email": "mails.ucas.ac.cn;aircas.ac.cn;mails.ucas.ac.cn;mails.ucas.ac.cn;mails.ucas.ac.cn;aircas.ac.cn;gmail.com", + "github": "https://github.com/lxn96/ICPE", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2+2;0+1+2+2;0+1+2+2;0+1+2+2;0;0+1+2+2;0+1+2+2", + "aff_unique_norm": "Chinese Academy of Sciences;Aerospace Information Research Institute;University of Chinese Academy of Sciences", + "aff_unique_dep": "Aerospace Information Research Institute;Key Laboratory of Network Information System Technology (NIST);", + "aff_unique_url": "http://www.cas.ac.cn;;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;;UCAS", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0+0;0+0+0+0;0+0+0+0;0+0+0+0;0;0+0+0+0;0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26263", + "title": "BridgeTower: Building Bridges between Encoders in Vision-Language Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Vision-Language (VL) models with the Two-Tower architecture have dominated visual-language representation learning in recent years. Current VL models either use lightweight uni-modal encoders and learn to extract, align and fuse both modalities simultaneously in a deep cross-modal encoder, or feed the last-layer uni-modal representations from the deep pre-trained uni-modal encoders into the top cross-modal encoder. Both approaches potentially restrict vision-language representation learning and limit model performance. In this paper, we propose BridgeTower, which introduces multiple bridge layers that build a connection between the top layers of uni-modal encoders and each layer of the cross-modal encoder. This enables effective bottom-up cross-modal alignment and fusion between visual and textual representations of different semantic levels of pre-trained uni-modal encoders in the cross-modal encoder. Pre-trained with only 4M images, BridgeTower achieves state-of-the-art performance on various downstream vision-language tasks. In particular, on the VQAv2 test-std set, BridgeTower achieves an accuracy of 78.73%, outperforming the previous state-of-the-art model METER by 1.09% with the same pre-training data and almost negligible additional parameters and computational costs. Notably, when further scaling the model, BridgeTower achieves an accuracy of 81.15%, surpassing models that are pre-trained on orders-of-magnitude larger datasets. Code and checkpoints are available at https://github.com/microsoft/BridgeTower.", + "primary_area": "machine learning iv", + "author": "Xiao Xu; Chenfei Wu; Shachar Rosenman; Vasudev Lal; Wanxiang Che; Nan Duan", + "authorids": "", + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology+Microsoft Research Asia; Microsoft Research Asia; Intel Labs, Cognitive Computing Research; Intel Labs, Cognitive Computing Research; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology+Microsoft Research Asia; Microsoft Research Asia", + "bibtex": "@article{Xu_Wu_Rosenman_Lal_Che_Duan_2023, title={BridgeTower: Building Bridges between Encoders in Vision-Language Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26263}, DOI={10.1609/aaai.v37i9.26263}, abstractNote={Vision-Language (VL) models with the Two-Tower architecture have dominated visual-language representation learning in recent years. Current VL models either use lightweight uni-modal encoders and learn to extract, align and fuse both modalities simultaneously in a deep cross-modal encoder, or feed the last-layer uni-modal representations from the deep pre-trained uni-modal encoders into the top cross-modal encoder. Both approaches potentially restrict vision-language representation learning and limit model performance. In this paper, we propose BridgeTower, which introduces multiple bridge layers that build a connection between the top layers of uni-modal encoders and each layer of the cross-modal encoder. This enables effective bottom-up cross-modal alignment and fusion between visual and textual representations of different semantic levels of pre-trained uni-modal encoders in the cross-modal encoder. Pre-trained with only 4M images, BridgeTower achieves state-of-the-art performance on various downstream vision-language tasks. In particular, on the VQAv2 test-std set, BridgeTower achieves an accuracy of 78.73%, outperforming the previous state-of-the-art model METER by 1.09% with the same pre-training data and almost negligible additional parameters and computational costs. Notably, when further scaling the model, BridgeTower achieves an accuracy of 81.15%, surpassing models that are pre-trained on orders-of-magnitude larger datasets. Code and checkpoints are available at https://github.com/microsoft/BridgeTower.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Xiao and Wu, Chenfei and Rosenman, Shachar and Lal, Vasudev and Che, Wanxiang and Duan, Nan}, year={2023}, month={Jun.}, pages={10637-10647} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26263/26035", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26263", + "pdf_size": 524443, + "gs_citation": 76, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17297933561250916578&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ir.hit.edu.cn;microsoft.com;intel.com;intel.com;ir.hit.edu.cn;microsoft.com", + "email": "ir.hit.edu.cn;microsoft.com;intel.com;intel.com;ir.hit.edu.cn;microsoft.com", + "github": "https://github.com/microsoft/BridgeTower", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;2;2;0+1;1", + "aff_unique_norm": "Harbin Institute of Technology;Microsoft Research;Intel Labs", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval;Research;Cognitive Computing Research", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.microsoft.com/en-us/research/group/asia;https://www.intel.com", + "aff_unique_abbr": "HIT;MSR Asia;Intel", + "aff_campus_unique_index": "0+1;1;0+1;1", + "aff_campus_unique": "Harbin;Asia;", + "aff_country_unique_index": "0+0;0;1;1;0+0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26682", + "title": "Bugs in the Data: How ImageNet Misrepresents Biodiversity", + "track": "aaai special track", + "status": "Technical", + "abstract": "ImageNet-1k is a dataset often used for benchmarking machine learning (ML) models and evaluating tasks such as image recognition and object detection. Wild animals make up 27% of ImageNet-1k but, unlike classes representing people and objects, these data have not been closely scrutinized. In the current paper, we analyze the 13,450 images from 269 classes that represent wild animals in the ImageNet-1k validation set, with the participation of expert ecologists. We find that many of the classes are ill-defined or overlapping, and that 12% of the images are incorrectly labeled, with some classes having >90% of images incorrect. We also find that both the wildlife-related labels and images included in ImageNet-1k present significant geographical and cultural biases, as well as ambiguities such as artificial animals, multiple species in the same image, or the presence of humans. Our findings highlight serious issues with the extensive use of this dataset for evaluating ML systems, the use of such algorithms in wildlife-related tasks, and more broadly the ways in which ML datasets are commonly created and curated.", + "primary_area": "ai for social impact", + "author": "Alexandra Sasha Luccioni; David Rolnick", + "authorids": "", + "aff": "Hugging Face; McGill University, Mila", + "bibtex": "@article{Luccioni_Rolnick_2023, title={Bugs in the Data: How ImageNet Misrepresents Biodiversity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26682}, DOI={10.1609/aaai.v37i12.26682}, abstractNote={ImageNet-1k is a dataset often used for benchmarking machine learning (ML) models and evaluating tasks such as image recognition and object detection. Wild animals make up 27% of ImageNet-1k but, unlike classes representing people and objects, these data have not been closely scrutinized. In the current paper, we analyze the 13,450 images from 269 classes that represent wild animals in the ImageNet-1k validation set, with the participation of expert ecologists. We find that many of the classes are ill-defined or overlapping, and that 12% of the images are incorrectly labeled, with some classes having >90% of images incorrect. We also find that both the wildlife-related labels and images included in ImageNet-1k present significant geographical and cultural biases, as well as ambiguities such as artificial animals, multiple species in the same image, or the presence of humans. Our findings highlight serious issues with the extensive use of this dataset for evaluating ML systems, the use of such algorithms in wildlife-related tasks, and more broadly the ways in which ML datasets are commonly created and curated.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luccioni, Alexandra Sasha and Rolnick, David}, year={2023}, month={Jun.}, pages={14382-14390} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26682/26454", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26682", + "pdf_size": 3519366, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2760109459316111305&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "huggingface.co;cs.mcgill.ca", + "email": "huggingface.co;cs.mcgill.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Hugging Face;McGill University", + "aff_unique_dep": ";Mila", + "aff_unique_url": "https://huggingface.co;https://www.mcgill.ca", + "aff_unique_abbr": "Hugging Face;McGill", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-26903", + "title": "Build-a-Bot: Teaching Conversational AI Using a Transformer-Based Intent Recognition and Question Answering Architecture", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "As artificial intelligence (AI) becomes a prominent part of modern life, AI literacy is becoming important for all citizens, not just those in technology careers. Previous research in AI education materials has largely focused on the introduction of terminology as well as AI use cases and ethics, but few allow students to learn by creating their own machine learning models. Therefore, there is a need for enriching AI educational tools with more adaptable and flexible platforms for interested educators with any level of technical experience to utilize within their teaching material. As such, we propose the development of an open-source tool (Build-A-Bot) for students and teachers to not only create their own transformer-based chatbots based on their own course material but also learn the fundamentals of AI through the model creation process. The primary concern of this paper is the creation of an interface for students to learn the principles of artificial intelligence by using a natural language pipeline to train a customized model to answer questions based on their own school curriculums. The model uses contexts given by their instructor, such as chapters of a textbook, to answer questions and is deployed on an interactive chatbot/voice agent. The pipeline teaches students data collection, data augmentation, intent recognition, and question answering by having them work through each of these processes while creating their AI agent, diverging from previous chatbot work where students and teachers use the bots as black-boxes with no abilities for customization or the bots lack AI capabilities, with the majority of dialogue scripts being rule-based. In addition, our tool is designed to make each step of this pipeline intuitive for students at a middle-school level. Further work primarily lies in providing our tool to schools and seeking student and teacher evaluations.", + "primary_area": "", + "author": "Kate Pearce; Sharifa Alghowinem; Cynthia Breazeal", + "authorids": "", + "aff": "Massachusetts Institute of Technology; MIT Media Lab; MIT Media Lab", + "bibtex": "@article{Pearce_Alghowinem_Breazeal_2024, title={Build-a-Bot: Teaching Conversational AI Using a Transformer-Based Intent Recognition and Question Answering Architecture}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26903}, DOI={10.1609/aaai.v37i13.26903}, abstractNote={As artificial intelligence (AI) becomes a prominent part of modern life, AI literacy is becoming important for all citizens, not just those in technology careers. Previous research in AI education materials has largely focused on the introduction of terminology as well as AI use cases and ethics, but few allow students to learn by creating their own machine learning models. Therefore, there is a need for enriching AI educational tools with more adaptable and flexible platforms for interested educators with any level of technical experience to utilize within their teaching material. As such, we propose the development of an open-source tool (Build-A-Bot) for students and teachers to not only create their own transformer-based chatbots based on their own course material but also learn the fundamentals of AI through the model creation process. The primary concern of this paper is the creation of an interface for students to learn the principles of artificial intelligence by using a natural language pipeline to train a customized model to answer questions based on their own school curriculums. The model uses contexts given by their instructor, such as chapters of a textbook, to answer questions and is deployed on an interactive chatbot/voice agent. The pipeline teaches students data collection, data augmentation, intent recognition, and question answering by having them work through each of these processes while creating their AI agent, diverging from previous chatbot work where students and teachers use the bots as black-boxes with no abilities for customization or the bots lack AI capabilities, with the majority of dialogue scripts being rule-based. In addition, our tool is designed to make each step of this pipeline intuitive for students at a middle-school level. Further work primarily lies in providing our tool to schools and seeking student and teacher evaluations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pearce, Kate and Alghowinem, Sharifa and Breazeal, Cynthia}, year={2024}, month={Jul.}, pages={16025-16032} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26903/26675", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26903", + "pdf_size": 1582407, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14561319571454661670&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mit.edu;media.mit.edu;media.mit.edu", + "email": "mit.edu;media.mit.edu;media.mit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://web.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26833", + "title": "Building Compositional Robot Autonomy with Modularity and Abstraction", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "This paper summarizes my research roadmap for building compositional robot autonomy with the principles of modularity and abstraction.", + "primary_area": "", + "author": "Yuke Zhu", + "authorids": "", + "aff": "Department of Computer Science, The University of Texas at Austin", + "bibtex": "@article{Zhu_2024, title={Building Compositional Robot Autonomy with Modularity and Abstraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26833}, DOI={10.1609/aaai.v37i13.26833}, abstractNote={This paper summarizes my research roadmap for building compositional robot autonomy with the principles of modularity and abstraction.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Yuke}, year={2024}, month={Jul.}, pages={15466-15466} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26833/26605", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26833", + "pdf_size": 41778, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:F2NNcBlY_zwJ:scholar.google.com/&scioq=Building+Compositional+Robot+Autonomy+with+Modularity+and+Abstraction&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "mit.edu;media.mit.edu;media.mit.edu", + "email": "mit.edu;media.mit.edu;media.mit.edu", + "github": "", + "project": "https://rpl.cs.utexas.edu/", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25897", + "title": "C-NTPP: Learning Cluster-Aware Neural Temporal Point Process", + "track": "main", + "status": "Technical", + "abstract": "Event sequences in continuous time space are ubiquitous across applications and have been intensively studied with both classic temporal point process (TPP) and its recent deep network variants. This work is motivated by an observation that many of event data exhibit inherent clustering patterns in terms of the sparse correlation among events, while such characteristics are seldom explicitly considered in existing neural TPP models whereby the history encoders are often embodied by RNNs or Transformers. In this work, we propose a c-NTPP (Cluster-Aware Neural Temporal Point Process) model, which leverages a sequential variational autoencoder framework to infer the latent cluster each event belongs to in the sequence. Specially, a novel event-clustered attention mechanism is devised to learn each cluster and then aggregate them together to obtain the final representation for each event. Extensive experiments show that c-NTPP achieves superior performance on both real-world and synthetic datasets, and it can also uncover the underlying clustering correlations.", + "primary_area": "machine learning i", + "author": "Fangyu Ding; Junchi Yan; Haiyang Wang", + "authorids": "", + "aff": "Department of Computer Science and Engineering and MOE Key Lab of AI, Shanghai Jiao Tong University; Department of Computer Science and Engineering and MOE Key Lab of AI, Shanghai Jiao Tong University + Shanghai AI Laboratory, Shanghai, China; Ant Group, Hangzhou, China", + "bibtex": "@article{Ding_Yan_Wang_2023, title={C-NTPP: Learning Cluster-Aware Neural Temporal Point Process}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25897}, DOI={10.1609/aaai.v37i6.25897}, abstractNote={Event sequences in continuous time space are ubiquitous across applications and have been intensively studied with both classic temporal point process (TPP) and its recent deep network variants. This work is motivated by an observation that many of event data exhibit inherent clustering patterns in terms of the sparse correlation among events, while such characteristics are seldom explicitly considered in existing neural TPP models whereby the history encoders are often embodied by RNNs or Transformers. In this work, we propose a c-NTPP (Cluster-Aware Neural Temporal Point Process) model, which leverages a sequential variational autoencoder framework to infer the latent cluster each event belongs to in the sequence. Specially, a novel event-clustered attention mechanism is devised to learn each cluster and then aggregate them together to obtain the final representation for each event. Extensive experiments show that c-NTPP achieves superior performance on both real-world and synthetic datasets, and it can also uncover the underlying clustering correlations.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Fangyu and Yan, Junchi and Wang, Haiyang}, year={2023}, month={Jun.}, pages={7369-7377} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25897/25669", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25897", + "pdf_size": 266297, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13467620363048791278&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff_domain": "farthur 99;sjtu.edu.cn;mybank.cn", + "email": "farthur 99;sjtu.edu.cn;mybank.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "Shanghai Jiao Tong University;Shanghai AI Laboratory;Ant Group", + "aff_unique_dep": "Department of Computer Science and Engineering;;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.shanghaiailab.com;https://www.antgroup.com", + "aff_unique_abbr": "SJTU;SAIL;Ant Group", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Shanghai;Hangzhou", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25152", + "title": "CALIP: Zero-Shot Enhancement of CLIP with Parameter-Free Attention", + "track": "main", + "status": "Technical", + "abstract": "Contrastive Language-Image Pre-training (CLIP) has been shown to learn visual representations with promising zero-shot performance. To further improve its downstream accuracy, existing works propose additional learnable modules upon CLIP and fine-tune them by few-shot training sets. However, the resulting extra training cost and data requirement severely hinder the efficiency for model deployment and knowledge transfer. In this paper, we introduce a free-lunch enhancement method, CALIP, to boost CLIP's zero-shot performance via a parameter-free attention module. Specifically, we guide visual and textual representations to interact with each other and explore cross-modal informative features via attention. As the pre-training has largely reduced the embedding distances between two modalities, we discard all learnable parameters in the attention and bidirectionally update the multi-modal features, enabling the whole process to be parameter-free and training-free. In this way, the images are blended with textual-aware signals and the text representations become visual-guided for better adaptive zero-shot alignment. We evaluate CALIP on various benchmarks of 14 datasets for both 2D image and 3D point cloud few-shot classification, showing consistent zero-shot performance improvement over CLIP. Based on that, we further insert a small number of linear layers in CALIP's attention module and verify our robustness under the few-shot settings, which also achieves leading performance compared to existing methods. Those extensive experiments demonstrate the superiority of our approach for efficient enhancement of CLIP. Code is available at https://github.com/ZiyuGuo99/CALIP.", + "primary_area": "computer vision i", + "author": "Ziyu Guo; Renrui Zhang; Longtian Qiu; Xianzheng Ma; Xupeng Miao; Xuming He; Bin Cui", + "authorids": "", + "aff": "School of CS and Key Lab of HCST, Peking University+The Chinese University of Hong Kong; The Chinese University of Hong Kong+Shanghai AI Laboratory; ShanghaiTech University; Shanghai AI Laboratory; Carnegie Mellon University; ShanghaiTech University; School of CS and Key Lab of HCST, Peking University", + "bibtex": "@article{Guo_Zhang_Qiu_Ma_Miao_He_Cui_2023, title={CALIP: Zero-Shot Enhancement of CLIP with Parameter-Free Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25152}, DOI={10.1609/aaai.v37i1.25152}, abstractNote={Contrastive Language-Image Pre-training (CLIP) has been shown to learn visual representations with promising zero-shot performance. To further improve its downstream accuracy, existing works propose additional learnable modules upon CLIP and fine-tune them by few-shot training sets. However, the resulting extra training cost and data requirement severely hinder the efficiency for model deployment and knowledge transfer. In this paper, we introduce a free-lunch enhancement method, CALIP, to boost CLIP\u2019s zero-shot performance via a parameter-free attention module. Specifically, we guide visual and textual representations to interact with each other and explore cross-modal informative features via attention. As the pre-training has largely reduced the embedding distances between two modalities, we discard all learnable parameters in the attention and bidirectionally update the multi-modal features, enabling the whole process to be parameter-free and training-free. In this way, the images are blended with textual-aware signals and the text representations become visual-guided for better adaptive zero-shot alignment. We evaluate CALIP on various benchmarks of 14 datasets for both 2D image and 3D point cloud few-shot classification, showing consistent zero-shot performance improvement over CLIP. Based on that, we further insert a small number of linear layers in CALIP\u2019s attention module and verify our robustness under the few-shot settings, which also achieves leading performance compared to existing methods. Those extensive experiments demonstrate the superiority of our approach for efficient enhancement of CLIP. Code is available at https://github.com/ZiyuGuo99/CALIP.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Ziyu and Zhang, Renrui and Qiu, Longtian and Ma, Xianzheng and Miao, Xupeng and He, Xuming and Cui, Bin}, year={2023}, month={Jun.}, pages={746-754} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25152/24924", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25152", + "pdf_size": 4658111, + "gs_citation": 135, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12291113417888493866&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pjlab.org.cn;shanghaitech.edu.cn;shanghaitech.edu.cn; ; ;pku.edu.cn", + "email": "pku.edu.cn;pjlab.org.cn;shanghaitech.edu.cn;shanghaitech.edu.cn; ; ;pku.edu.cn", + "github": "https://github.com/ZiyuGuo99/CALIP", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1+2;3;2;4;3;0", + "aff_unique_norm": "Peking University;The Chinese University of Hong Kong;Shanghai AI Laboratory;ShanghaiTech University;Carnegie Mellon University", + "aff_unique_dep": "School of CS and Key Lab of HCST;;;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.cuhk.edu.hk;https://www.shanghai-ai-lab.com;https://www.shanghaitech.edu.cn;https://www.cmu.edu", + "aff_unique_abbr": "PKU;CUHK;SAIL;ShanghaiTech;CMU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25264", + "title": "CCQ: Cross-Class Query Network for Partially Labeled Organ Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Learning multi-organ segmentation from multiple partially-labeled datasets attracts increasing attention. It can be a promising solution for the scarcity of large-scale, fully labeled 3D medical image segmentation datasets. However, existing algorithms of multi-organ segmentation on partially-labeled datasets neglect the semantic relations and anatomical priors between different categories of organs, which is crucial for partially-labeled multi-organ segmentation. In this paper, we tackle the limitations above by proposing the Cross-Class Query Network (CCQ). CCQ consists of an image encoder, a cross-class query learning module, and an attentive refinement segmentation module. More specifically, the image encoder captures the long-range dependency of a single image via the transformer encoder. Cross-class query learning module first generates query vectors that represent semantic concepts of different categories and then utilizes these query vectors to find the class-relevant features of image representation for segmentation. The attentive refinement segmentation module with an attentive skip connection incorporates the high-resolution image details and eliminates the class-irrelevant noise. Extensive experiment results demonstrate that CCQ outperforms all the state-of-the-art models on the MOTS dataset, which consists of seven organ and tumor segmentation tasks. Code is available at https://github.com/Yang-007/CCQ.git.", + "primary_area": "computer vision ii", + "author": "Xuyang Liu; Bingbing Wen; Sibei Yang", + "authorids": "", + "aff": "School of Information Science and Technology, ShanghaiTech University; Information School, University of Washington; School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "bibtex": "@article{Liu_Wen_Yang_2023, title={CCQ: Cross-Class Query Network for Partially Labeled Organ Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25264}, DOI={10.1609/aaai.v37i2.25264}, abstractNote={Learning multi-organ segmentation from multiple partially-labeled datasets attracts increasing attention. It can be a promising solution for the scarcity of large-scale, fully labeled 3D medical image segmentation datasets. However, existing algorithms of multi-organ segmentation on partially-labeled datasets neglect the semantic relations and anatomical priors between different categories of organs, which is crucial for partially-labeled multi-organ segmentation. In this paper, we tackle the limitations above by proposing the Cross-Class Query Network (CCQ). CCQ consists of an image encoder, a cross-class query learning module, and an attentive refinement segmentation module. More specifically, the image encoder captures the long-range dependency of a single image via the transformer encoder. Cross-class query learning module first generates query vectors that represent semantic concepts of different categories and then utilizes these query vectors to find the class-relevant features of image representation for segmentation. The attentive refinement segmentation module with an attentive skip connection incorporates the high-resolution image details and eliminates the class-irrelevant noise. Extensive experiment results demonstrate that CCQ outperforms all the state-of-the-art models on the MOTS dataset, which consists of seven organ and tumor segmentation tasks. Code is available at https://github.com/Yang-007/CCQ.git.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xuyang and Wen, Bingbing and Yang, Sibei}, year={2023}, month={Jun.}, pages={1755-1763} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25264/25036", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25264", + "pdf_size": 10092421, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8855766718750923243&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "shanghaitech.edu.cn;uw.edu;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;uw.edu;shanghaitech.edu.cn", + "github": "https://github.com/Yang-007/CCQ.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "ShanghaiTech University;University of Washington;Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_unique_dep": "School of Information Science and Technology;Information School;", + "aff_unique_url": "https://www.shanghaitech.edu.cn;https://www.washington.edu;", + "aff_unique_abbr": "ShanghaiTech;UW;", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Shanghai;Seattle;", + "aff_country_unique_index": "0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26246", + "title": "CDMA: A Practical Cross-Device Federated Learning Algorithm for General Minimax Problems", + "track": "main", + "status": "Technical", + "abstract": "Minimax problems arise in a wide range of important applications including robust adversarial learning and Generative Adversarial Network (GAN) training. Recently, algorithms for minimax problems in the Federated Learning (FL) paradigm have received considerable interest. Existing federated algorithms for general minimax problems require the full aggregation (i.e., aggregation of local model information from all clients) in each training round. Thus, they are inapplicable to an important setting of FL known as the cross-device setting, which involves numerous unreliable mobile/IoT devices. In this paper, we develop the first practical algorithm named CDMA for general minimax problems in the cross-device FL setting. CDMA is based on a Start-Immediately-With-Enough-Responses mechanism, in which the server first signals a subset of clients to perform local computation and then starts to aggregate the local results reported by clients once it receives responses from enough clients in each round. With this mechanism, CDMA is resilient to the low client availability. In addition, CDMA is incorporated with a lightweight global correction in the local update steps of clients, which mitigates the impact of slow network connections. We establish theoretical guarantees of CDMA under different choices of hyperparameters and conduct experiments on AUC maximization, robust adversarial network training, and GAN training tasks. Theoretical and experimental results demonstrate the efficiency of CDMA.", + "primary_area": "machine learning iv", + "author": "Jiahao Xie; Chao Zhang; Zebang Shen; Weijie Liu; Hui Qian", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; Advanced Technology Institute, Zhejiang University; ETH Z\u00fcrich; Qiushi Academy for Advanced Studies, Zhejiang University + College of Computer Science and Technology, Zhejiang University; State Key Lab of CAD&CG, Zhejiang University", + "bibtex": "@article{Xie_Zhang_Shen_Liu_Qian_2023, title={CDMA: A Practical Cross-Device Federated Learning Algorithm for General Minimax Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26246}, DOI={10.1609/aaai.v37i9.26246}, abstractNote={Minimax problems arise in a wide range of important applications including robust adversarial learning and Generative Adversarial Network (GAN) training. Recently, algorithms for minimax problems in the Federated Learning (FL) paradigm have received considerable interest. Existing federated algorithms for general minimax problems require the full aggregation (i.e., aggregation of local model information from all clients) in each training round. Thus, they are inapplicable to an important setting of FL known as the cross-device setting, which involves numerous unreliable mobile/IoT devices. In this paper, we develop the first practical algorithm named CDMA for general minimax problems in the cross-device FL setting. CDMA is based on a Start-Immediately-With-Enough-Responses mechanism, in which the server first signals a subset of clients to perform local computation and then starts to aggregate the local results reported by clients once it receives responses from enough clients in each round. With this mechanism, CDMA is resilient to the low client availability. In addition, CDMA is incorporated with a lightweight global correction in the local update steps of clients, which mitigates the impact of slow network connections. We establish theoretical guarantees of CDMA under different choices of hyperparameters and conduct experiments on AUC maximization, robust adversarial network training, and GAN training tasks. Theoretical and experimental results demonstrate the efficiency of CDMA.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Jiahao and Zhang, Chao and Shen, Zebang and Liu, Weijie and Qian, Hui}, year={2023}, month={Jun.}, pages={10481-10489} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26246/26018", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26246", + "pdf_size": 276297, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7658537073967856458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "zju.edu.cn;zju.edu.cn;inf.ethz.ch;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;inf.ethz.ch;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0+0;0", + "aff_unique_norm": "Zhejiang University;ETH Z\u00fcrich", + "aff_unique_dep": "College of Computer Science and Technology;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.ethz.ch", + "aff_unique_abbr": "ZJU;ETHZ", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;1;0+0;0", + "aff_country_unique": "China;Switzerland" + }, + { + "id": "article-25239", + "title": "CDTA: A Cross-Domain Transfer-Based Attack with Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Despite the excellent performance, deep neural networks (DNNs) have been shown to be vulnerable to adversarial examples. Besides, these examples are often transferable among different models. In other words, the same adversarial example can fool multiple models with different architectures at the same time. Based on this property, many black-box transfer-based attack techniques have been developed. However, current transfer-based attacks generally focus on the cross-architecture setting, where the attacker has access to the training data of the target model, which is not guaranteed in realistic situations. In this paper, we design a Cross-Domain Transfer-Based Attack (CDTA), which works in the cross-domain scenario. In this setting, attackers have no information about the target model, such as its architecture and training data. Specifically, we propose a contrastive spectral training method to train a feature extractor on a source domain (e.g., ImageNet) and use it to craft adversarial examples on target domains (e.g., Oxford 102 Flower). Our method corrupts the semantic information of the benign image by scrambling the outputs of both the intermediate feature layers and the final layer of the feature extractor. We evaluate CDTA with 16 target deep models on four datasets with widely varying styles. The results confirm that, in terms of the attack success rate, our approach can consistently outperform the state-of-the-art baselines by an average of 11.45% across all target models. Our code is available at https://github.com/LiulietLee/CDTA.", + "primary_area": "computer vision ii", + "author": "Zihan Li; Weibin Wu; Yuxin Su; Zibin Zheng; Michael R. Lyu", + "authorids": "", + "aff": "School of Software Engineering, Sun Yat-sen University; School of Software Engineering, Sun Yat-sen University; School of Software Engineering, Sun Yat-sen University; School of Software Engineering, Sun Yat-sen University; Department of Computer Science and Engineering, The Chinese University of Hong Kong", + "bibtex": "@article{Li_Wu_Su_Zheng_Lyu_2023, title={CDTA: A Cross-Domain Transfer-Based Attack with Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25239}, DOI={10.1609/aaai.v37i2.25239}, abstractNote={Despite the excellent performance, deep neural networks (DNNs) have been shown to be vulnerable to adversarial examples. Besides, these examples are often transferable among different models. In other words, the same adversarial example can fool multiple models with different architectures at the same time. Based on this property, many black-box transfer-based attack techniques have been developed. However, current transfer-based attacks generally focus on the cross-architecture setting, where the attacker has access to the training data of the target model, which is not guaranteed in realistic situations. In this paper, we design a Cross-Domain Transfer-Based Attack (CDTA), which works in the cross-domain scenario. In this setting, attackers have no information about the target model, such as its architecture and training data. Specifically, we propose a contrastive spectral training method to train a feature extractor on a source domain (e.g., ImageNet) and use it to craft adversarial examples on target domains (e.g., Oxford 102 Flower). Our method corrupts the semantic information of the benign image by scrambling the outputs of both the intermediate feature layers and the final layer of the feature extractor. We evaluate CDTA with 16 target deep models on four datasets with widely varying styles. The results confirm that, in terms of the attack success rate, our approach can consistently outperform the state-of-the-art baselines by an average of 11.45% across all target models. Our code is available at https://github.com/LiulietLee/CDTA.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zihan and Wu, Weibin and Su, Yuxin and Zheng, Zibin and Lyu, Michael R.}, year={2023}, month={Jun.}, pages={1530-1538} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25239/25011", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25239", + "pdf_size": 727911, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1985618526790161483&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;cse.cuhk.edu.hk", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;cse.cuhk.edu.hk", + "github": "https://github.com/LiulietLee/CDTA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Sun Yat-sen University;The Chinese University of Hong Kong", + "aff_unique_dep": "School of Software Engineering;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.sysu.edu.cn;https://www.cuhk.edu.hk", + "aff_unique_abbr": "SYSU;CUHK", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hong Kong", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25214", + "title": "CEE-Net: Complementary End-to-End Network for 3D Human Pose Generation and Estimation", + "track": "main", + "status": "Technical", + "abstract": "The limited number of actors and actions in existing datasets make 3D pose estimators tend to overfit, which can be seen from the performance degradation of the algorithm on cross-datasets, especially for rare and complex poses. Although previous data augmentation works have increased the diversity of the training set, the changes in camera viewpoint and position play a dominant role in improving the accuracy of the estimator, while the generated 3D poses are limited and still heavily rely on the source dataset. In addition, these works do not consider the adaptability of the pose estimator to generated data, and complex poses will cause training collapse. In this paper, we propose the CEE-Net, a Complementary End-to-End Network for 3D human pose generation and estimation. The generator extremely expands the distribution of each joint-angle in the existing dataset and limits them to a reasonable range. By learning the correlations within and between the torso and limbs, the estimator can combine different body-parts more effectively and weaken the influence of specific joint-angle changes on the global pose, improving the generalization ability. Extensive ablation studies show that our pose generator greatly strengthens the joint-angle distribution, and our pose estimator can utilize these poses positively. Compared with the state-of-the-art methods, our method can achieve much better performance on various cross-datasets, rare and complex poses.", + "primary_area": "computer vision i", + "author": "Haolun Li; Chi-Man Pun", + "authorids": "", + "aff": "University of Macau, Macau, China; University of Macau, Macau, China", + "bibtex": "@article{Li_Pun_2023, title={CEE-Net: Complementary End-to-End Network for 3D Human Pose Generation and Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25214}, DOI={10.1609/aaai.v37i1.25214}, abstractNote={The limited number of actors and actions in existing datasets make 3D pose estimators tend to overfit, which can be seen from the performance degradation of the algorithm on cross-datasets, especially for rare and complex poses. Although previous data augmentation works have increased the diversity of the training set, the changes in camera viewpoint and position play a dominant role in improving the accuracy of the estimator, while the generated 3D poses are limited and still heavily rely on the source dataset. In addition, these works do not consider the adaptability of the pose estimator to generated data, and complex poses will cause training collapse. In this paper, we propose the CEE-Net, a Complementary End-to-End Network for 3D human pose generation and estimation. The generator extremely expands the distribution of each joint-angle in the existing dataset and limits them to a reasonable range. By learning the correlations within and between the torso and limbs, the estimator can combine different body-parts more effectively and weaken the influence of specific joint-angle changes on the global pose, improving the generalization ability. Extensive ablation studies show that our pose generator greatly strengthens the joint-angle distribution, and our pose estimator can utilize these poses positively. Compared with the state-of-the-art methods, our method can achieve much better performance on various cross-datasets, rare and complex poses.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Haolun and Pun, Chi-Man}, year={2023}, month={Jun.}, pages={1305-1313} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25214/24986", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25214", + "pdf_size": 3287420, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1664844121423331548&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;umac.mo", + "email": "gmail.com;umac.mo", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Macau", + "aff_unique_dep": "", + "aff_unique_url": "https://www.um.edu.mo", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Macau", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26281", + "title": "CEM: Constrained Entropy Maximization for Task-Agnostic Safe Exploration", + "track": "main", + "status": "Technical", + "abstract": "In the absence of assigned tasks, a learning agent typically seeks to explore its environment efficiently. However, the pursuit of exploration will bring more safety risks.\nAn under-explored aspect of reinforcement learning is how to achieve safe efficient exploration when the task is unknown.\nIn this paper, we propose a practical Constrained Entropy Maximization (CEM) algorithm to solve task-agnostic safe exploration problems, which naturally require a finite horizon and undiscounted constraints on safety costs.\nThe CEM algorithm aims to learn a policy that maximizes state entropy under the premise of safety.\nTo avoid approximating the state density in complex domains, CEM leverages a k-nearest neighbor entropy estimator to evaluate the efficiency of exploration.\nIn terms of safety, CEM minimizes the safety costs, and adaptively trades off safety and exploration based on the current constraint satisfaction. The empirical analysis shows that CEM enables the acquisition of a safe exploration policy in complex environments, resulting in improved performance in both safety and sample efficiency for target tasks.", + "primary_area": "machine learning iv", + "author": "Qisong Yang; Matthijs T.J. Spaan", + "authorids": "", + "aff": "Delft University of Technology, The Netherlands; Delft University of Technology, The Netherlands", + "bibtex": "@article{Yang_Spaan_2023, title={CEM: Constrained Entropy Maximization for Task-Agnostic Safe Exploration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26281}, DOI={10.1609/aaai.v37i9.26281}, abstractNote={In the absence of assigned tasks, a learning agent typically seeks to explore its environment efficiently. However, the pursuit of exploration will bring more safety risks.\nAn under-explored aspect of reinforcement learning is how to achieve safe efficient exploration when the task is unknown.\nIn this paper, we propose a practical Constrained Entropy Maximization (CEM) algorithm to solve task-agnostic safe exploration problems, which naturally require a finite horizon and undiscounted constraints on safety costs.\nThe CEM algorithm aims to learn a policy that maximizes state entropy under the premise of safety.\nTo avoid approximating the state density in complex domains, CEM leverages a k-nearest neighbor entropy estimator to evaluate the efficiency of exploration.\nIn terms of safety, CEM minimizes the safety costs, and adaptively trades off safety and exploration based on the current constraint satisfaction. The empirical analysis shows that CEM enables the acquisition of a safe exploration policy in complex environments, resulting in improved performance in both safety and sample efficiency for target tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Qisong and Spaan, Matthijs T.J.}, year={2023}, month={Jun.}, pages={10798-10806} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26281/26053", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26281", + "pdf_size": 5481577, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3692448601168692097&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "tudelft.nl;tudelft.nl", + "email": "tudelft.nl;tudelft.nl", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Delft University of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tudelft.nl", + "aff_unique_abbr": "TUDelft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-26308", + "title": "CEMA \u2013 Cost-Efficient Machine-Assisted Document Annotations", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of semantically annotating textual documents that are complex in the sense that the documents are long, feature rich, and domain specific. Due to their complexity, such annotation tasks require trained human workers, which are very expensive in both time and money. We propose CEMA, a method for deploying machine learning to assist humans in complex document annotation. CEMA estimates the human cost of annotating each document and selects the set of documents to be annotated that strike the best balance between model accuracy and human cost. We conduct experiments on complex annotation tasks in which we compare CEMA against other document selection and annotation strategies. Our results show that CEMA is the most cost-efficient solution for those tasks.", + "primary_area": "machine learning iv", + "author": "Guowen Yuan; Ben Kao; Tien-Hsuan Wu", + "authorids": "", + "aff": "The University of Hong Kong; The University of Hong Kong; The University of Hong Kong", + "bibtex": "@article{Yuan_Kao_Wu_2023, title={CEMA \u2013 Cost-Efficient Machine-Assisted Document Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26308}, DOI={10.1609/aaai.v37i9.26308}, abstractNote={We study the problem of semantically annotating textual documents that are complex in the sense that the documents are long, feature rich, and domain specific. Due to their complexity, such annotation tasks require trained human workers, which are very expensive in both time and money. We propose CEMA, a method for deploying machine learning to assist humans in complex document annotation. CEMA estimates the human cost of annotating each document and selects the set of documents to be annotated that strike the best balance between model accuracy and human cost. We conduct experiments on complex annotation tasks in which we compare CEMA against other document selection and annotation strategies. Our results show that CEMA is the most cost-efficient solution for those tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Guowen and Kao, Ben and Wu, Tien-Hsuan}, year={2023}, month={Jun.}, pages={11043-11050} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26308/26080", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26308", + "pdf_size": 1709696, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6821126328502863239&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.hku.hk;cs.hku.hk;cs.hku.hk", + "email": "cs.hku.hk;cs.hku.hk;cs.hku.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The University of Hong Kong", + "aff_unique_dep": "", + "aff_unique_url": "https://www.hku.hk", + "aff_unique_abbr": "HKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25860", + "title": "CF-ViT: A General Coarse-to-Fine Method for Vision Transformer", + "track": "main", + "status": "Technical", + "abstract": "Vision Transformers (ViT) have made many breakthroughs in computer vision tasks. However, considerable redundancy arises in the spatial dimension of an input image, leading to massive computational costs. Therefore, We propose a coarse-to-fine vision transformer (CF-ViT) to relieve computational burden while retaining performance in this paper. Our proposed CF-ViT is motivated by two important observations in modern ViT models: (1) The coarse-grained patch splitting can locate informative regions of an input image. (2) Most images can be well recognized by a ViT model in a small-length token sequence. Therefore, our CF-ViT implements network inference in a two-stage manner. At coarse inference stage, an input image is split into a small-length patch sequence for a computationally economical classification. If not well recognized, the informative patches are identified and further re-split in a fine-grained granularity. Extensive experiments demonstrate the efficacy of our CF-ViT. For example, without any compromise on performance, CF-ViT reduces 53% FLOPs of LV-ViT, and also achieves 2.01x throughput. Code of this project is at https://github.com/ChenMnZ/CF-V", + "primary_area": "machine learning i", + "author": "Mengzhao Chen; Mingbao Lin; Ke Li; Yunhang Shen; Yongjian Wu; Fei Chao; Rongrong Ji", + "authorids": "", + "aff": "MAC Lab, Department of Artificial Intelligence, Xiamen University + Institute of Artificial Intelligence, Xiamen University; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; MAC Lab, Department of Artificial Intelligence, Xiamen University + Institute of Artificial Intelligence, Xiamen University; MAC Lab, Department of Artificial Intelligence, Xiamen University + Institute of Artificial Intelligence, Xiamen University", + "bibtex": "@article{Chen_Lin_Li_Shen_Wu_Chao_Ji_2023, title={CF-ViT: A General Coarse-to-Fine Method for Vision Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25860}, DOI={10.1609/aaai.v37i6.25860}, abstractNote={Vision Transformers (ViT) have made many breakthroughs in computer vision tasks. However, considerable redundancy arises in the spatial dimension of an input image, leading to massive computational costs. Therefore, We propose a coarse-to-fine vision transformer (CF-ViT) to relieve computational burden while retaining performance in this paper. Our proposed CF-ViT is motivated by two important observations in modern ViT models: (1) The coarse-grained patch splitting can locate informative regions of an input image. (2) Most images can be well recognized by a ViT model in a small-length token sequence. Therefore, our CF-ViT implements network inference in a two-stage manner. At coarse inference stage, an input image is split into a small-length patch sequence for a computationally economical classification. If not well recognized, the informative patches are identified and further re-split in a fine-grained granularity. Extensive experiments demonstrate the efficacy of our CF-ViT. For example, without any compromise on performance, CF-ViT reduces 53% FLOPs of LV-ViT, and also achieves 2.01x throughput. Code of this project is at https://github.com/ChenMnZ/CF-V}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Mengzhao and Lin, Mingbao and Li, Ke and Shen, Yunhang and Wu, Yongjian and Chao, Fei and Ji, Rongrong}, year={2023}, month={Jun.}, pages={7042-7052} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25860/25632", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25860", + "pdf_size": 1251229, + "gs_citation": 79, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2401189037279445006&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;outlook.com; tristanli.sh.gmail.com;gmail.com;tencent.com;xmu.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;outlook.com; tristanli.sh.gmail.com;gmail.com;tencent.com;xmu.edu.cn;xmu.edu.cn", + "github": "https://github.com/ChenMnZ/CF-ViT", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;1;1;1;1;0+0;0+0", + "aff_unique_norm": "Xiamen University;Tencent", + "aff_unique_dep": "Department of Artificial Intelligence;Youtu Lab", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "XMU;Tencent", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25279", + "title": "CFFT-GAN: Cross-Domain Feature Fusion Transformer for Exemplar-Based Image Translation", + "track": "main", + "status": "Technical", + "abstract": "Exemplar-based image translation refers to the task of generating images with the desired style, while conditioning on certain input image. Most of the current methods learn the correspondence between two input domains and lack the mining of information within the domain. In this paper, we propose a more general learning approach by considering two domain features as a whole and learning both inter-domain correspondence and intra-domain potential information interactions. Specifically, we propose a Cross-domain Feature Fusion Transformer (CFFT) to learn inter- and intra-domain feature fusion. Based on CFFT, the proposed CFFT-GAN works well on exemplar-based image translation. Moreover, CFFT-GAN is able to decouple and fuse features from multiple domains by cascading CFFT modules. We conduct rich quantitative and qualitative experiments on several image translation tasks, and the results demonstrate the superiority of our approach compared to state-of-the-art methods. Ablation studies show the importance of our proposed CFFT. Application experimental results reflect the potential of our method.", + "primary_area": "computer vision ii", + "author": "Tianxiang Ma; Bingchuan Li; Wei Liu; Miao Hua; Jing Dong; Tieniu Tan", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences+CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences+Nanjing University; CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences+Nanjing University", + "bibtex": "@article{Ma_Li_Liu_Hua_Dong_Tan_2023, title={CFFT-GAN: Cross-Domain Feature Fusion Transformer for Exemplar-Based Image Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25279}, DOI={10.1609/aaai.v37i2.25279}, abstractNote={Exemplar-based image translation refers to the task of generating images with the desired style, while conditioning on certain input image. Most of the current methods learn the correspondence between two input domains and lack the mining of information within the domain. In this paper, we propose a more general learning approach by considering two domain features as a whole and learning both inter-domain correspondence and intra-domain potential information interactions. Specifically, we propose a Cross-domain Feature Fusion Transformer (CFFT) to learn inter- and intra-domain feature fusion. Based on CFFT, the proposed CFFT-GAN works well on exemplar-based image translation. Moreover, CFFT-GAN is able to decouple and fuse features from multiple domains by cascading CFFT modules. We conduct rich quantitative and qualitative experiments on several image translation tasks, and the results demonstrate the superiority of our approach compared to state-of-the-art methods. Ablation studies show the importance of our proposed CFFT. Application experimental results reflect the potential of our method.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Tianxiang and Li, Bingchuan and Liu, Wei and Hua, Miao and Dong, Jing and Tan, Tieniu}, year={2023}, month={Jun.}, pages={1887-1895} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25279/25051", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25279", + "pdf_size": 4170613, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10223323108911469499&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cripac.ia.ac.cn;bytedance.com;bytedance.com;gmail.com;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "cripac.ia.ac.cn;bytedance.com;bytedance.com;gmail.com;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;2;2;1+3;1+3", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;ByteDance Ltd;Nanjing University", + "aff_unique_dep": "School of Artificial Intelligence;Institute of Automation;;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ia.cas.cn;https://www.bytedance.com;https://www.nju.edu.cn", + "aff_unique_abbr": "UCAS;CAS;ByteDance;Nanjing U", + "aff_campus_unique_index": ";1;1;1;;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26980", + "title": "CKS: A Community-Based K-shell Decomposition Approach Using Community Bridge Nodes for Influence Maximization (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Social networks have enabled user-specific advertisements and recommendations on their platforms, which puts a significant focus on Influence Maximisation (IM) for target advertising and related tasks. The aim is to identify nodes in the network which can maximize the spread of information through a diffusion cascade. We propose a community structures-based approach that employs K-Shell algorithm with community structures to generate a score for the connections between seed nodes and communities. Further, our approach employs entropy within communities to ensure the proper spread of information within the communities. We validate our approach on four publicly available networks and show its superiority to four state-of-the-art approaches while still being relatively efficient.", + "primary_area": "", + "author": "Inder Khatri; Aaryan Gupta; Arjun Choudhry; Aryan Tyagi; Dinesh Kumar Vishwakarma; Mukesh Prasad", + "authorids": "", + "aff": "Biometric Research Laboratory, Delhi Technological University, New Delhi, India+Delhi Technological University, New Delhi, India; Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; School of Computer Science, FEIT, University of Technology Sydney, Sydney, Australia", + "bibtex": "@article{Khatri_Gupta_Choudhry_Tyagi_Vishwakarma_Prasad_2024, title={CKS: A Community-Based K-shell Decomposition Approach Using Community Bridge Nodes for Influence Maximization (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26980}, DOI={10.1609/aaai.v37i13.26980}, abstractNote={Social networks have enabled user-specific advertisements and recommendations on their platforms, which puts a significant focus on Influence Maximisation (IM) for target advertising and related tasks. The aim is to identify nodes in the network which can maximize the spread of information through a diffusion cascade. We propose a community structures-based approach that employs K-Shell algorithm with community structures to generate a score for the connections between seed nodes and communities. Further, our approach employs entropy within communities to ensure the proper spread of information within the communities. We validate our approach on four publicly available networks and show its superiority to four state-of-the-art approaches while still being relatively efficient.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Khatri, Inder and Gupta, Aaryan and Choudhry, Arjun and Tyagi, Aryan and Vishwakarma, Dinesh Kumar and Prasad, Mukesh}, year={2024}, month={Jul.}, pages={16240-16241} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26980/26752", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26980", + "pdf_size": 770679, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11180836158964890667&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;dtu.ac.in;uts.edu.au", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;dtu.ac.in;uts.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0;0;0;0;1", + "aff_unique_norm": "Delhi Technological University;University of Technology Sydney", + "aff_unique_dep": "Biometric Research Laboratory;School of Computer Science", + "aff_unique_url": "https://www.dtu.ac.in;https://www.uts.edu.au", + "aff_unique_abbr": "DTU;UTS", + "aff_campus_unique_index": "0+0;0;0;0;0;1", + "aff_campus_unique": "New Delhi;Sydney", + "aff_country_unique_index": "0+0;0;0;0;0;1", + "aff_country_unique": "India;Australia" + }, + { + "id": "article-25297", + "title": "CL3D: Unsupervised Domain Adaptation for Cross-LiDAR 3D Detection", + "track": "main", + "status": "Technical", + "abstract": "Domain adaptation for Cross-LiDAR 3D detection is challenging due to the large gap on the raw data representation with disparate point densities and point arrangements. By exploring domain-invariant 3D geometric characteristics and motion patterns, we present an unsupervised domain adaptation method that overcomes above difficulties. First, we propose the Spatial Geometry Alignment module to extract similar 3D shape geometric features of the same object class to align two domains, while eliminating the effect of distinct point distributions. Second, we present Temporal Motion Alignment module to utilize motion features in sequential frames of data to match two domains. Prototypes generated from two modules are incorporated into the pseudo-label reweighting procedure and contribute to our effective self-training framework for the target domain. Extensive experiments show that our method achieves state-of-the-art performance on cross-device datasets, especially for the datasets with large gaps captured by mechanical scanning LiDARs and solid-state LiDARs in various scenes. Project homepage is at https://github.com/4DVLab/CL3D.git.", + "primary_area": "computer vision ii", + "author": "Xidong Peng; Xinge Zhu; Yuexin Ma", + "authorids": "", + "aff": "ShanghaiTech University; The Chinese University of Hong Kong; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "bibtex": "@article{Peng_Zhu_Ma_2023, title={CL3D: Unsupervised Domain Adaptation for Cross-LiDAR 3D Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25297}, DOI={10.1609/aaai.v37i2.25297}, abstractNote={Domain adaptation for Cross-LiDAR 3D detection is challenging due to the large gap on the raw data representation with disparate point densities and point arrangements. By exploring domain-invariant 3D geometric characteristics and motion patterns, we present an unsupervised domain adaptation method that overcomes above difficulties. First, we propose the Spatial Geometry Alignment module to extract similar 3D shape geometric features of the same object class to align two domains, while eliminating the effect of distinct point distributions. Second, we present Temporal Motion Alignment module to utilize motion features in sequential frames of data to match two domains. Prototypes generated from two modules are incorporated into the pseudo-label reweighting procedure and contribute to our effective self-training framework for the target domain. Extensive experiments show that our method achieves state-of-the-art performance on cross-device datasets, especially for the datasets with large gaps captured by mechanical scanning LiDARs and solid-state LiDARs in various scenes. Project homepage is at https://github.com/4DVLab/CL3D.git.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Xidong and Zhu, Xinge and Ma, Yuexin}, year={2023}, month={Jun.}, pages={2047-2055} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25297/25069", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25297", + "pdf_size": 7510267, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10873036025050452619&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff_domain": "shanghaitech.edu.cn; ;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn; ;shanghaitech.edu.cn", + "github": "https://github.com/4DVLab/CL3D.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "ShanghaiTech University;The Chinese University of Hong Kong;Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.shanghaitech.edu.cn;https://www.cuhk.edu.hk;", + "aff_unique_abbr": "ShanghaiTech;CUHK;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26893", + "title": "CLGT: A Graph Transformer for Student Performance Prediction in Collaborative Learning", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "Modeling and predicting the performance of students in collaborative learning paradigms is an important task. Most of the research presented in literature regarding collaborative learning focuses on the discussion forums and social learning networks. There are only a few works that investigate how students interact with each other in team projects and how such interactions affect their academic performance. In order to bridge this gap, we choose a software engineering course as the study subject. The students who participate in a software engineering course are required to team up and complete a software project together. In this work, we construct an interaction graph based on the activities of students grouped in various teams. Based on this student interaction graph, we present an extended graph transformer framework for collaborative learning (CLGT) for evaluating and predicting the performance of students. Moreover, the proposed CLGT contains an interpretation module that explains the prediction results and visualizes the student interaction patterns. The experimental results confirm that the proposed CLGT outperforms the baseline models in terms of performing predictions based on the real-world datasets. Moreover, the proposed CLGT differentiates the students with poor performance in the collaborative learning paradigm and gives teachers early warnings, so that appropriate assistance can be provided.", + "primary_area": "", + "author": "Tianhao Peng; Yu Liang; Wenjun Wu; Jian Ren; Zhao Pengrui; Yanjun Pu", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University+School of Computer Science and Engineering, Beihang University; Beijing Engineering Research Center for IoT Software and Systems, Beijing University of Technology; State Key Laboratory of Software Development Environment, Beihang University+Institute of Artificial Intelligence, Beihang University; State Key Laboratory of Software Development Environment, Beihang University+School of Computer Science and Engineering, Beihang University; State Key Laboratory of Software Development Environment, Beihang University+School of Computer Science and Engineering, Beihang University; State Key Laboratory of Software Development Environment, Beihang University+School of Computer Science and Engineering, Beihang University", + "bibtex": "@article{Peng_Liang_Wu_Ren_Pengrui_Pu_2024, title={CLGT: A Graph Transformer for Student Performance Prediction in Collaborative Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26893}, DOI={10.1609/aaai.v37i13.26893}, abstractNote={Modeling and predicting the performance of students in collaborative learning paradigms is an important task. Most of the research presented in literature regarding collaborative learning focuses on the discussion forums and social learning networks. There are only a few works that investigate how students interact with each other in team projects and how such interactions affect their academic performance. In order to bridge this gap, we choose a software engineering course as the study subject. The students who participate in a software engineering course are required to team up and complete a software project together. In this work, we construct an interaction graph based on the activities of students grouped in various teams. Based on this student interaction graph, we present an extended graph transformer framework for collaborative learning (CLGT) for evaluating and predicting the performance of students. Moreover, the proposed CLGT contains an interpretation module that explains the prediction results and visualizes the student interaction patterns. The experimental results confirm that the proposed CLGT outperforms the baseline models in terms of performing predictions based on the real-world datasets. Moreover, the proposed CLGT differentiates the students with poor performance in the collaborative learning paradigm and gives teachers early warnings, so that appropriate assistance can be provided.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Tianhao and Liang, Yu and Wu, Wenjun and Ren, Jian and Pengrui, Zhao and Pu, Yanjun}, year={2024}, month={Jul.}, pages={15947-15954} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26893/26665", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26893", + "pdf_size": 2048961, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12332539111688959739&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "buaa.edu.cn;bjut.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;bjut.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Beihang University;Beijing University of Technology", + "aff_unique_dep": "State Key Laboratory of Software Development Environment;Engineering Research Center for IoT Software and Systems", + "aff_unique_url": "http://www.buaa.edu.cn;http://www.bjut.edu.cn", + "aff_unique_abbr": "Beihang;BJUT", + "aff_campus_unique_index": ";1;;;;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25225", + "title": "CLIP-ReID: Exploiting Vision-Language Model for Image Re-identification without Concrete Text Labels", + "track": "main", + "status": "Technical", + "abstract": "Pre-trained vision-language models like CLIP have recently shown superior performances on various downstream tasks, including image classification and segmentation. However, in fine-grained image re-identification (ReID), the labels are indexes, lacking concrete text descriptions. Therefore, it remains to be determined how such models could be applied to these tasks. This paper first finds out that simply fine-tuning the visual model initialized by the image encoder in CLIP, has already obtained competitive performances in various ReID tasks. Then we propose a two-stage strategy to facilitate a better visual representation. The key idea is to fully exploit the cross-modal description ability in CLIP through a set of learnable text tokens for each ID and give them to the text encoder to form ambiguous descriptions. In the first training stage, image and text encoders from CLIP keep fixed, and only the text tokens are optimized from scratch by the contrastive loss computed within a batch. In the second stage, the ID-specific text tokens and their encoder become static, providing constraints for fine-tuning the image encoder. With the help of the designed loss in the downstream task, the image encoder is able to represent data as vectors in the feature embedding accurately. The effectiveness of the proposed strategy is validated on several datasets for the person or vehicle ReID tasks. Code is available at https://github.com/Syliz517/CLIP-ReID.", + "primary_area": "computer vision i", + "author": "Siyuan Li; Li Sun; Qingli Li", + "authorids": "", + "aff": "Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University; Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University + Key Laboratory of Advanced Theory and Application in Statistics and Data Science, East China Normal University; Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University", + "bibtex": "@article{Li_Sun_Li_2023, title={CLIP-ReID: Exploiting Vision-Language Model for Image Re-identification without Concrete Text Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25225}, DOI={10.1609/aaai.v37i1.25225}, abstractNote={Pre-trained vision-language models like CLIP have recently shown superior performances on various downstream tasks, including image classification and segmentation. However, in fine-grained image re-identification (ReID), the labels are indexes, lacking concrete text descriptions. Therefore, it remains to be determined how such models could be applied to these tasks. This paper first finds out that simply fine-tuning the visual model initialized by the image encoder in CLIP, has already obtained competitive performances in various ReID tasks. Then we propose a two-stage strategy to facilitate a better visual representation. The key idea is to fully exploit the cross-modal description ability in CLIP through a set of learnable text tokens for each ID and give them to the text encoder to form ambiguous descriptions. In the first training stage, image and text encoders from CLIP keep fixed, and only the text tokens are optimized from scratch by the contrastive loss computed within a batch. In the second stage, the ID-specific text tokens and their encoder become static, providing constraints for fine-tuning the image encoder. With the help of the designed loss in the downstream task, the image encoder is able to represent data as vectors in the feature embedding accurately. The effectiveness of the proposed strategy is validated on several datasets for the person or vehicle ReID tasks. Code is available at https://github.com/Syliz517/CLIP-ReID.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Siyuan and Sun, Li and Li, Qingli}, year={2023}, month={Jun.}, pages={1405-1413} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25225/24997", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25225", + "pdf_size": 14308133, + "gs_citation": 159, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13094408219662798029&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 5, + "aff_domain": "ee.ecnu.edu.cn; ; ", + "email": "ee.ecnu.edu.cn; ; ", + "github": "https://github.com/Syliz517/CLIP-ReID", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "Shanghai Key Laboratory of Multidimensional Information Processing", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25326", + "title": "CLIPVG: Text-Guided Image Manipulation Using Differentiable Vector Graphics", + "track": "main", + "status": "Technical", + "abstract": "Considerable progress has recently been made in leveraging CLIP (Contrastive Language-Image Pre-Training) models for text-guided image manipulation. However, all existing works rely on additional generative models to ensure the quality of results, because CLIP alone cannot provide enough guidance information for fine-scale pixel-level changes. In this paper, we introduce CLIPVG, a text-guided image manipulation framework using differentiable vector graphics, which is also the first CLIP-based general image manipulation framework that does not require any additional generative models. We demonstrate that CLIPVG can not only achieve state-of-art performance in both semantic correctness and synthesis quality, but also is flexible enough to support various applications far beyond the capability of all existing methods.", + "primary_area": "computer vision ii", + "author": "Yiren Song; Xuning Shao; Kang Chen; Weidong Zhang; Zhongliang Jing; Minzhe Li", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Netease Games AI Lab; Netease Games AI Lab; Netease Games AI Lab; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Song_Shao_Chen_Zhang_Jing_Li_2023, title={CLIPVG: Text-Guided Image Manipulation Using Differentiable Vector Graphics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25326}, DOI={10.1609/aaai.v37i2.25326}, abstractNote={Considerable progress has recently been made in leveraging CLIP (Contrastive Language-Image Pre-Training) models for text-guided image manipulation. However, all existing works rely on additional generative models to ensure the quality of results, because CLIP alone cannot provide enough guidance information for fine-scale pixel-level changes. In this paper, we introduce CLIPVG, a text-guided image manipulation framework using differentiable vector graphics, which is also the first CLIP-based general image manipulation framework that does not require any additional generative models. We demonstrate that CLIPVG can not only achieve state-of-art performance in both semantic correctness and synthesis quality, but also is flexible enough to support various applications far beyond the capability of all existing methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Yiren and Shao, Xuning and Chen, Kang and Zhang, Weidong and Jing, Zhongliang and Li, Minzhe}, year={2023}, month={Jun.}, pages={2312-2320} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25326/25098", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25326", + "pdf_size": 6843113, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1751418150304035513&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Netease Games", + "aff_unique_dep": ";AI Lab", + "aff_unique_url": "https://www.sjtu.edu.cn;https://game.netease.com", + "aff_unique_abbr": "SJTU;NGAL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27089", + "title": "CLUE-AD: A Context-Based Method for Labeling Unobserved Entities in Autonomous Driving Data", + "track": "demonstrations", + "status": "Technical", + "abstract": "Generating high-quality annotations for object detection and recognition is a challenging and important task, especially in relation to safety-critical applications such as autonomous driving (AD). Due to the difficulty of perception in challenging situations such as occlusion, degraded weather, and sensor failure, objects can go unobserved and unlabeled. In this paper, we present CLUE-AD, a general-purpose method for detecting and labeling unobserved entities by leveraging the object continuity assumption within the context of a scene. This method is dataset-agnostic, supporting any existing and future AD datasets. Using a real-world dataset representing complex urban driving scenes, we demonstrate the applicability of CLUE-AD for detecting unobserved entities and augmenting the scene data with new labels.", + "primary_area": "", + "author": "Ruwan Wickramarachchi; Cory Henson; Amit Sheth", + "authorids": "", + "aff": "AI Institute, University of South Carolina, Columbia, SC, USA; Bosch Center for Artificial Intelligence, Pittsburgh, PA, USA; AI Institute, University of South Carolina, Columbia, SC, USA", + "bibtex": "@article{Wickramarachchi_Henson_Sheth_2024, title={CLUE-AD: A Context-Based Method for Labeling Unobserved Entities in Autonomous Driving Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27089}, DOI={10.1609/aaai.v37i13.27089}, abstractNote={Generating high-quality annotations for object detection and recognition is a challenging and important task, especially in relation to safety-critical applications such as autonomous driving (AD). Due to the difficulty of perception in challenging situations such as occlusion, degraded weather, and sensor failure, objects can go unobserved and unlabeled. In this paper, we present CLUE-AD, a general-purpose method for detecting and labeling unobserved entities by leveraging the object continuity assumption within the context of a scene. This method is dataset-agnostic, supporting any existing and future AD datasets. Using a real-world dataset representing complex urban driving scenes, we demonstrate the applicability of CLUE-AD for detecting unobserved entities and augmenting the scene data with new labels.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wickramarachchi, Ruwan and Henson, Cory and Sheth, Amit}, year={2024}, month={Jul.}, pages={16491-16493} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27089/26861", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27089", + "pdf_size": 2947227, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8058873921380981860&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "email.sc.edu;us.bosch.com;sc.edu", + "email": "email.sc.edu;us.bosch.com;sc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of South Carolina;Bosch Center for Artificial Intelligence", + "aff_unique_dep": "AI Institute;Artificial Intelligence", + "aff_unique_url": "https://www.sc.edu;https://www.bosch-ai.com", + "aff_unique_abbr": "USC;BCAI", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Columbia;Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25083", + "title": "CMNet: Contrastive Magnification Network for Micro-Expression Recognition", + "track": "main", + "status": "Technical", + "abstract": "Micro-Expression Recognition (MER) is challenging because the Micro-Expressions' (ME) motion is too weak to distinguish. This hurdle can be tackled by enhancing intensity for a more accurate acquisition of movements. However, existing magnification strategies tend to use the features of facial images that include not only intensity clues as intensity features, leading to the intensity representation deficient of credibility. In addition, the intensity variation over time, which is crucial for encoding movements, is also neglected. To this end, we provide a reliable scheme to extract intensity clues while considering their variation on the time scale. First, we devise an Intensity Distillation (ID) loss to acquire the intensity clues by contrasting the difference between frames, given that the difference in the same video lies only in the intensity. Then, the intensity clues are calibrated to follow the trend of the original video. Specifically, due to the lack of truth intensity annotation of the original video, we build the intensity tendency by setting each intensity vacancy an uncertain value, which guides the extracted intensity clues to converge towards this trend rather some fixed values. A Wilcoxon rank sum test (Wrst) method is enforced to implement the calibration. Experimental results on three public ME databases i.e. CASME II, SAMM, and SMIC-HS validate the superiority against state-of-the-art methods.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Mengting Wei; Xingxun Jiang; Wenming Zheng; Yuan Zong; Cheng Lu; Jiateng Liu", + "authorids": "", + "aff": "Key Laboratory of Child Development and Learning Science of Ministry of Education + School of Biological Science and Medical Engineering, Southeast University, Nanjing, China; Key Laboratory of Child Development and Learning Science of Ministry of Education + School of Biological Science and Medical Engineering, Southeast University, Nanjing, China; Key Laboratory of Child Development and Learning Science of Ministry of Education + School of Biological Science and Medical Engineering, Southeast University, Nanjing, China; Key Laboratory of Child Development and Learning Science of Ministry of Education + School of Biological Science and Medical Engineering, Southeast University, Nanjing, China; School of Information Science and Engineering, Southeast University, Nanjing, China; Key Laboratory of Child Development and Learning Science of Ministry of Education + School of Biological Science and Medical Engineering, Southeast University, Nanjing, China", + "bibtex": "@article{Wei_Jiang_Zheng_Zong_Lu_Liu_2023, title={CMNet: Contrastive Magnification Network for Micro-Expression Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25083}, DOI={10.1609/aaai.v37i1.25083}, abstractNote={Micro-Expression Recognition (MER) is challenging because the Micro-Expressions\u2019 (ME) motion is too weak to distinguish. This hurdle can be tackled by enhancing intensity for a more accurate acquisition of movements. However, existing magnification strategies tend to use the features of facial images that include not only intensity clues as intensity features, leading to the intensity representation deficient of credibility. In addition, the intensity variation over time, which is crucial for encoding movements, is also neglected. To this end, we provide a reliable scheme to extract intensity clues while considering their variation on the time scale. First, we devise an Intensity Distillation (ID) loss to acquire the intensity clues by contrasting the difference between frames, given that the difference in the same video lies only in the intensity. Then, the intensity clues are calibrated to follow the trend of the original video. Specifically, due to the lack of truth intensity annotation of the original video, we build the intensity tendency by setting each intensity vacancy an uncertain value, which guides the extracted intensity clues to converge towards this trend rather some fixed values. A Wilcoxon rank sum test (Wrst) method is enforced to implement the calibration. Experimental results on three public ME databases i.e. CASME II, SAMM, and SMIC-HS validate the superiority against state-of-the-art methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wei, Mengting and Jiang, Xingxun and Zheng, Wenming and Zong, Yuan and Lu, Cheng and Liu, Jiateng}, year={2023}, month={Jun.}, pages={119-127} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25083/24855", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25083", + "pdf_size": 2489600, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11350490193238588028&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;0+1;1;0+1", + "aff_unique_norm": "Ministry of Education;Southeast University", + "aff_unique_dep": "Key Laboratory of Child Development and Learning Science;School of Biological Science and Medical Engineering", + "aff_unique_url": ";https://www.seu.edu.cn/", + "aff_unique_abbr": ";SEU", + "aff_campus_unique_index": "1;1;1;1;1;1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26135", + "title": "CMVAE: Causal Meta VAE for Unsupervised Meta-Learning", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised meta-learning aims to learn the meta knowledge from unlabeled data and rapidly adapt to novel tasks. However, existing approaches may be misled by the context-bias (e.g. background) from the training data. In this paper, we abstract the unsupervised meta-learning problem into a Structural Causal Model (SCM) and point out that such bias arises due to hidden confounders. To eliminate the confounders, we define the priors are conditionally independent, learn the relationships between priors and intervene on them with casual factorization. Furthermore, we propose Causal Meta VAE (CMVAE) that encodes the priors into latent codes in the causal space and learns their relationships simultaneously to achieve the downstream few-shot image classification task. Results on toy datasets and three benchmark datasets demonstrate that our method can remove the context-bias and it outperforms other state-of-the-art unsupervised meta-learning algorithms because of bias-removal. Code is available at https://github.com/GuodongQi/CMVAE.", + "primary_area": "machine learning iii", + "author": "Guodong Qi; Huimin Yu", + "authorids": "", + "aff": "College of Information Science and Electronic Engineering, Zhejiang University + ZJU-League Research & Development Center; College of Information Science and Electronic Engineering, Zhejiang University + State Key Lab of CAD&CG, Zhejiang University + Zhejiang Provincial Key Laboratory of Information Processing, Communication and Networking", + "bibtex": "@article{Qi_Yu_2023, title={CMVAE: Causal Meta VAE for Unsupervised Meta-Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26135}, DOI={10.1609/aaai.v37i8.26135}, abstractNote={Unsupervised meta-learning aims to learn the meta knowledge from unlabeled data and rapidly adapt to novel tasks. However, existing approaches may be misled by the context-bias (e.g. background) from the training data. In this paper, we abstract the unsupervised meta-learning problem into a Structural Causal Model (SCM) and point out that such bias arises due to hidden confounders. To eliminate the confounders, we define the priors are conditionally independent, learn the relationships between priors and intervene on them with casual factorization. Furthermore, we propose Causal Meta VAE (CMVAE) that encodes the priors into latent codes in the causal space and learns their relationships simultaneously to achieve the downstream few-shot image classification task. Results on toy datasets and three benchmark datasets demonstrate that our method can remove the context-bias and it outperforms other state-of-the-art unsupervised meta-learning algorithms because of bias-removal. Code is available at https://github.com/GuodongQi/CMVAE.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qi, Guodong and Yu, Huimin}, year={2023}, month={Jun.}, pages={9480-9488} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26135/25907", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26135", + "pdf_size": 1307486, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1097444127637534935&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn", + "github": "https://github.com/GuodongQi/CMV_AE", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Information Science and Electronic Engineering", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26527", + "title": "COCA: COllaborative CAusal Regularization for Audio-Visual Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Audio-Visual Question Answering (AVQA) is a sophisticated QA task, which aims at answering textual questions over given video-audio pairs with comprehensive multimodal reasoning. Through detailed causal-graph analyses and careful inspections of their learning processes, we reveal that AVQA models are not only prone to over-exploit prevalent language bias, but also suffer from additional joint-modal biases caused by the shortcut relations between textual-auditory/visual co-occurrences and dominated answers. In this paper, we propose a COllabrative CAusal (COCA) Regularization to remedy this more challenging issue of data biases.\nSpecifically, a novel Bias-centered Causal Regularization (BCR) is proposed to alleviate specific shortcut biases by intervening bias-irrelevant causal effects, and further introspect the predictions of AVQA models in counterfactual and factual scenarios. Based on the fact that the dominated bias impairing model robustness for different samples tends to be different, we introduce a Multi-shortcut Collaborative Debiasing (MCD) to measure how each sample suffers from different biases, and dynamically adjust their debiasing concentration to different shortcut correlations. Extensive experiments demonstrate the effectiveness as well as backbone-agnostic ability of our COCA strategy, and it achieves state-of-the-art performance on the large-scale MUSIC-AVQA dataset.", + "primary_area": "speech natural language processing", + "author": "Mingrui Lao; Nan Pu; Yu Liu; Kai He; Erwin M. Bakker; Michael S. Lew", + "authorids": "", + "aff": "LIACS Media Lab, Leiden University; LIACS Media Lab, Leiden University; International School of Information Science & Engineering, Dalian University of Technology; LIACS Media Lab, Leiden University; LIACS Media Lab, Leiden University; LIACS Media Lab, Leiden University", + "bibtex": "@article{Lao_Pu_Liu_He_Bakker_Lew_2023, title={COCA: COllaborative CAusal Regularization for Audio-Visual Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26527}, DOI={10.1609/aaai.v37i11.26527}, abstractNote={Audio-Visual Question Answering (AVQA) is a sophisticated QA task, which aims at answering textual questions over given video-audio pairs with comprehensive multimodal reasoning. Through detailed causal-graph analyses and careful inspections of their learning processes, we reveal that AVQA models are not only prone to over-exploit prevalent language bias, but also suffer from additional joint-modal biases caused by the shortcut relations between textual-auditory/visual co-occurrences and dominated answers. In this paper, we propose a COllabrative CAusal (COCA) Regularization to remedy this more challenging issue of data biases.\nSpecifically, a novel Bias-centered Causal Regularization (BCR) is proposed to alleviate specific shortcut biases by intervening bias-irrelevant causal effects, and further introspect the predictions of AVQA models in counterfactual and factual scenarios. Based on the fact that the dominated bias impairing model robustness for different samples tends to be different, we introduce a Multi-shortcut Collaborative Debiasing (MCD) to measure how each sample suffers from different biases, and dynamically adjust their debiasing concentration to different shortcut correlations. Extensive experiments demonstrate the effectiveness as well as backbone-agnostic ability of our COCA strategy, and it achieves state-of-the-art performance on the large-scale MUSIC-AVQA dataset.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lao, Mingrui and Pu, Nan and Liu, Yu and He, Kai and Bakker, Erwin M. and Lew, Michael S.}, year={2023}, month={Jun.}, pages={12995-13003} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26527/26299", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26527", + "pdf_size": 15659161, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4560588406823999647&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "liacs.leidenuniv.nl;liacs.leidenuniv.nl;dlut.edu.cn;liacs.leidenuniv.nl;liacs.leidenuniv.nl;liacs.leidenuniv.nl", + "email": "liacs.leidenuniv.nl;liacs.leidenuniv.nl;dlut.edu.cn;liacs.leidenuniv.nl;liacs.leidenuniv.nl;liacs.leidenuniv.nl", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Leiden University;Dalian University of Technology", + "aff_unique_dep": "LIACS Media Lab;International School of Information Science & Engineering", + "aff_unique_url": "https://www.universiteitleiden.nl;http://en.dlut.edu.cn/", + "aff_unique_abbr": "LU;DUT", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Leiden;Dalian", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "Netherlands;China" + }, + { + "id": "article-25567", + "title": "COLA: Improving Conversational Recommender Systems by Collaborative Augmentation", + "track": "main", + "status": "Technical", + "abstract": "Conversational recommender systems (CRS) aim to employ natural language conversations to suggest suitable products to users. Understanding user preferences for prospective items and learning efficient item representations are crucial for CRS. Despite various attempts, earlier studies mostly learned item representations based on individual conversations, ignoring item popularity embodied among all others. Besides, they still need support in efficiently capturing user preferences since the information reflected in a single conversation is limited. Inspired by collaborative filtering, we propose a collaborative augmentation (COLA) method to simultaneously improve both item representation learning and user preference modeling to address these issues. We construct an interactive user-item graph from all conversations, which augments item representations with user-aware information, i.e., item popularity. To improve user preference modeling, we retrieve similar conversations from the training corpus, where the involved items and attributes that reflect the user's potential interests are used to augment the user representation through gate control. Extensive experiments on two benchmark datasets demonstrate the effectiveness of our method. Our code and data are available at https://github.com/DongdingLin/COLA.", + "primary_area": "data mining and knowledge management", + "author": "Dongding Lin; Jian Wang; Wenjie Li", + "authorids": "", + "aff": "The Hong Kong Polytechnic University; The Hong Kong Polytechnic University; The Hong Kong Polytechnic University", + "bibtex": "@article{Lin_Wang_Li_2023, title={COLA: Improving Conversational Recommender Systems by Collaborative Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25567}, DOI={10.1609/aaai.v37i4.25567}, abstractNote={Conversational recommender systems (CRS) aim to employ natural language conversations to suggest suitable products to users. Understanding user preferences for prospective items and learning efficient item representations are crucial for CRS. Despite various attempts, earlier studies mostly learned item representations based on individual conversations, ignoring item popularity embodied among all others. Besides, they still need support in efficiently capturing user preferences since the information reflected in a single conversation is limited. Inspired by collaborative filtering, we propose a collaborative augmentation (COLA) method to simultaneously improve both item representation learning and user preference modeling to address these issues. We construct an interactive user-item graph from all conversations, which augments item representations with user-aware information, i.e., item popularity. To improve user preference modeling, we retrieve similar conversations from the training corpus, where the involved items and attributes that reflect the user\u2019s potential interests are used to augment the user representation through gate control. Extensive experiments on two benchmark datasets demonstrate the effectiveness of our method. Our code and data are available at https://github.com/DongdingLin/COLA.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Dongding and Wang, Jian and Li, Wenjie}, year={2023}, month={Jun.}, pages={4462-4470} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25567/25339", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25567", + "pdf_size": 330459, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6501953884020344502&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "connect.polyu.hk;connect.polyu.hk;comp.polyu.edu.hk", + "email": "connect.polyu.hk;connect.polyu.hk;comp.polyu.edu.hk", + "github": "https://github.com/DongdingLin/COLA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The Hong Kong Polytechnic University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.polyu.edu.hk", + "aff_unique_abbr": "PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26648", + "title": "COSMOS: Catching Out-of-Context Image Misuse Using Self-Supervised Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Despite the recent attention to DeepFakes, one of the most prevalent ways to mislead audiences on social media is the use of unaltered images in a new but false context. We propose a new method that automatically highlights out-of-context image and text pairs, for assisting fact-checkers. Our key insight is to leverage the grounding of images with text to distinguish out-of-context scenarios that cannot be disambiguated with language alone. We propose a self-supervised training strategy where we only need a set of captioned images. At train time, our method learns to selectively align individual objects in an image with textual claims, without explicit supervision. At test time, we check if both captions correspond to the same object(s) in the image but are semantically different, which allows us to make fairly accurate out-of-context predictions. Our method achieves 85% out-of-context detection accuracy. To facilitate benchmarking of this task, we create a large-scale dataset of 200K images with 450K textual captions from a variety of news websites, blogs, and social media posts", + "primary_area": "ai for social impact", + "author": "Shivangi Aneja; Chris Bregler; Matthias Niessner", + "authorids": "", + "aff": "Technical University of Munich; Google AI; Technical University of Munich", + "bibtex": "@article{Aneja_Bregler_Niessner_2023, title={COSMOS: Catching Out-of-Context Image Misuse Using Self-Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26648}, DOI={10.1609/aaai.v37i12.26648}, abstractNote={Despite the recent attention to DeepFakes, one of the most prevalent ways to mislead audiences on social media is the use of unaltered images in a new but false context. We propose a new method that automatically highlights out-of-context image and text pairs, for assisting fact-checkers. Our key insight is to leverage the grounding of images with text to distinguish out-of-context scenarios that cannot be disambiguated with language alone. We propose a self-supervised training strategy where we only need a set of captioned images. At train time, our method learns to selectively align individual objects in an image with textual claims, without explicit supervision. At test time, we check if both captions correspond to the same object(s) in the image but are semantically different, which allows us to make fairly accurate out-of-context predictions. Our method achieves 85% out-of-context detection accuracy. To facilitate benchmarking of this task, we create a large-scale dataset of 200K images with 450K textual captions from a variety of news websites, blogs, and social media posts}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aneja, Shivangi and Bregler, Chris and Niessner, Matthias}, year={2023}, month={Jun.}, pages={14084-14092} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26648/26420", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26648", + "pdf_size": 5038638, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9186937869965588810&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Technical University of Munich;Google", + "aff_unique_dep": ";Google AI", + "aff_unique_url": "https://www.tum.de;https://ai.google", + "aff_unique_abbr": "TUM;Google AI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "article-26487", + "title": "CP-Rec: Contextual Prompting for Conversational Recommender Systems", + "track": "main", + "status": "Technical", + "abstract": "The conversational recommender system (CRS) aims to provide high-quality recommendations through interactive dialogues. However, previous CRS models have no effective mechanisms for task planning and topic elaboration, and thus they hardly maintain coherence in multi-task recommendation dialogues. Inspired by recent advances in prompt-based learning, we propose a novel contextual prompting framework for dialogue management, which optimizes prompts based on context, topics, and user profiles. Specifically, we develop a topic controller to sequentially plan the subtasks, and a prompt search module to construct context-aware prompts. We further adopt external knowledge to enrich user profiles and make knowledge-aware recommendations. Incorporating these techniques, we propose a conversational recommender system with contextual prompting, namely CP-Rec. Experimental results demonstrate that it achieves state-of-the-art recommendation accuracy and generates more coherent and informative conversations.", + "primary_area": "speech natural language processing", + "author": "Keyu Chen; Shiliang Sun", + "authorids": "", + "aff": "School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China", + "bibtex": "@article{Chen_Sun_2023, title={CP-Rec: Contextual Prompting for Conversational Recommender Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26487}, DOI={10.1609/aaai.v37i11.26487}, abstractNote={The conversational recommender system (CRS) aims to provide high-quality recommendations through interactive dialogues. However, previous CRS models have no effective mechanisms for task planning and topic elaboration, and thus they hardly maintain coherence in multi-task recommendation dialogues. Inspired by recent advances in prompt-based learning, we propose a novel contextual prompting framework for dialogue management, which optimizes prompts based on context, topics, and user profiles. Specifically, we develop a topic controller to sequentially plan the subtasks, and a prompt search module to construct context-aware prompts. We further adopt external knowledge to enrich user profiles and make knowledge-aware recommendations. Incorporating these techniques, we propose a conversational recommender system with contextual prompting, namely CP-Rec. Experimental results demonstrate that it achieves state-of-the-art recommendation accuracy and generates more coherent and informative conversations.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Keyu and Sun, Shiliang}, year={2023}, month={Jun.}, pages={12635-12643} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26487/26259", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26487", + "pdf_size": 408102, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15325973620715143971&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25198", + "title": "CRAFT: Camera-Radar 3D Object Detection with Spatio-Contextual Fusion Transformer", + "track": "main", + "status": "Technical", + "abstract": "Camera and radar sensors have significant advantages in cost, reliability, and maintenance compared to LiDAR. Existing fusion methods often fuse the outputs of single modalities at the result-level, called the late fusion strategy. This can benefit from using off-the-shelf single sensor detection algorithms, but late fusion cannot fully exploit the complementary properties of sensors, thus having limited performance despite the huge potential of camera-radar fusion. Here we propose a novel proposal-level early fusion approach that effectively exploits both spatial and contextual properties of camera and radar for 3D object detection. Our fusion framework first associates image proposal with radar points in the polar coordinate system to efficiently handle the discrepancy between the coordinate system and spatial properties. Using this as a first stage, following consecutive cross-attention based feature fusion layers adaptively exchange spatio-contextual information between camera and radar, leading to a robust and attentive fusion. Our camera-radar fusion approach achieves the state-of-the-art 41.1% mAP and 52.3% NDS on the nuScenes test set, which is 8.7 and 10.8 points higher than the camera-only baseline, as well as yielding competitive performance on the LiDAR method.", + "primary_area": "computer vision i", + "author": "Youngseok Kim; Sanmin Kim; Jun Won Choi; Dongsuk Kum", + "authorids": "", + "aff": "Korea Advanced Institute of Science and Technology; Korea Advanced Institute of Science and Technology; Hanyang University; Korea Advanced Institute of Science and Technology", + "bibtex": "@article{Kim_Kim_Choi_Kum_2023, title={CRAFT: Camera-Radar 3D Object Detection with Spatio-Contextual Fusion Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25198}, DOI={10.1609/aaai.v37i1.25198}, abstractNote={Camera and radar sensors have significant advantages in cost, reliability, and maintenance compared to LiDAR. Existing fusion methods often fuse the outputs of single modalities at the result-level, called the late fusion strategy. This can benefit from using off-the-shelf single sensor detection algorithms, but late fusion cannot fully exploit the complementary properties of sensors, thus having limited performance despite the huge potential of camera-radar fusion. Here we propose a novel proposal-level early fusion approach that effectively exploits both spatial and contextual properties of camera and radar for 3D object detection. Our fusion framework first associates image proposal with radar points in the polar coordinate system to efficiently handle the discrepancy between the coordinate system and spatial properties. Using this as a first stage, following consecutive cross-attention based feature fusion layers adaptively exchange spatio-contextual information between camera and radar, leading to a robust and attentive fusion. Our camera-radar fusion approach achieves the state-of-the-art 41.1% mAP and 52.3% NDS on the nuScenes test set, which is 8.7 and 10.8 points higher than the camera-only baseline, as well as yielding competitive performance on the LiDAR method.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Youngseok and Kim, Sanmin and Choi, Jun Won and Kum, Dongsuk}, year={2023}, month={Jun.}, pages={1160-1168} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25198/24970", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25198", + "pdf_size": 3448293, + "gs_citation": 95, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4558296643324428940&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;hanyang.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;hanyang.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Hanyang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.hanyang.ac.kr", + "aff_unique_abbr": "KAIST;HYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25271", + "title": "CRIN: Rotation-Invariant Point Cloud Analysis and Rotation Estimation via Centrifugal Reference Frame", + "track": "main", + "status": "Technical", + "abstract": "Various recent methods attempt to implement rotation-invariant 3D deep learning by replacing the input coordinates of points with relative distances and angles. Due to the incompleteness of these low-level features, they have to undertake the expense of losing global information. In this paper, we propose the CRIN, namely Centrifugal Rotation-Invariant Network. CRIN directly takes the coordinates of points as input and transforms local points into rotation-invariant representations via centrifugal reference frames. Aided by centrifugal reference frames, each point corresponds to a discrete rotation so that the information of rotations can be implicitly stored in point features. Unfortunately, discrete points are far from describing the whole rotation space. We further introduce a continuous distribution for 3D rotations based on points. Furthermore, we propose an attention-based down-sampling strategy to sample points invariant to rotations. A relation module is adopted at last for reinforcing the long-range dependencies between sampled points and predicts the anchor point for unsupervised rotation estimation. Extensive experiments show that our method achieves rotation invariance, accurately estimates the object rotation, and obtains state-of-the-art results on rotation-augmented classification and part segmentation. Ablation studies validate the effectiveness of the network design.", + "primary_area": "computer vision ii", + "author": "Yujing Lou; Zelin Ye; Yang You; Nianjuan Jiang; Jiangbo Lu; Weiming Wang; Lizhuang Ma; Cewu Lu", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; SmartMore; SmartMore; Shanghai Jiao Tong University; Shanghai Jiao Tong University+Shanghai Qi Zhi Institute; Shanghai Jiao Tong University+Shanghai Qi Zhi Institute", + "bibtex": "@article{Lou_Ye_You_Jiang_Lu_Wang_Ma_Lu_2023, title={CRIN: Rotation-Invariant Point Cloud Analysis and Rotation Estimation via Centrifugal Reference Frame}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25271}, DOI={10.1609/aaai.v37i2.25271}, abstractNote={Various recent methods attempt to implement rotation-invariant 3D deep learning by replacing the input coordinates of points with relative distances and angles. Due to the incompleteness of these low-level features, they have to undertake the expense of losing global information. In this paper, we propose the CRIN, namely Centrifugal Rotation-Invariant Network. CRIN directly takes the coordinates of points as input and transforms local points into rotation-invariant representations via centrifugal reference frames. Aided by centrifugal reference frames, each point corresponds to a discrete rotation so that the information of rotations can be implicitly stored in point features. Unfortunately, discrete points are far from describing the whole rotation space. We further introduce a continuous distribution for 3D rotations based on points. Furthermore, we propose an attention-based down-sampling strategy to sample points invariant to rotations. A relation module is adopted at last for reinforcing the long-range dependencies between sampled points and predicts the anchor point for unsupervised rotation estimation. Extensive experiments show that our method achieves rotation invariance, accurately estimates the object rotation, and obtains state-of-the-art results on rotation-augmented classification and part segmentation. Ablation studies validate the effectiveness of the network design.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lou, Yujing and Ye, Zelin and You, Yang and Jiang, Nianjuan and Lu, Jiangbo and Wang, Weiming and Ma, Lizhuang and Lu, Cewu}, year={2023}, month={Jun.}, pages={1817-1825} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25271/25043", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25271", + "pdf_size": 4317526, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13251339711054480474&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com;gmail.com;sjtu.edu.cn;cs.sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com;gmail.com;sjtu.edu.cn;cs.sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;1;0;0+2;0+2", + "aff_unique_norm": "Shanghai Jiao Tong University;SmartMore;Shanghai Qi Zhi Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sjtu.edu.cn;;https://www.qz.io", + "aff_unique_abbr": "SJTU;;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-25299", + "title": "CSTAR: Towards Compact and Structured Deep Neural Networks with Adversarial Robustness", + "track": "main", + "status": "Technical", + "abstract": "Model compression and model defense for deep neural networks (DNNs) have been extensively and individually studied. Considering the co-importance of model compactness and robustness in practical applications, several prior works have explored to improve the adversarial robustness of the sparse neural networks. However, the structured sparse models obtained by the existing works suffer severe performance degradation for both benign and robust accuracy, thereby causing a challenging dilemma between robustness and structuredness of compact DNNs.\nTo address this problem, in this paper, we propose CSTAR, an efficient solution that simultaneously impose Compactness, high STructuredness and high Adversarial Robustness on the target DNN models. By formulating the structuredness and robustness requirement within the same framework, the compressed DNNs can simultaneously achieve high compression performance and strong adversarial robustness. Evaluations for various DNN models on different datasets demonstrate the effectiveness of CSTAR. Compared with the state-of-the-art robust structured pruning, CSTAR shows consistently better performance. For instance, when compressing ResNet-18 on CIFAR-10, CSTAR achieves up to 20.07% and 11.91% improvement for benign accuracy and robust accuracy, respectively. For compressing ResNet-18 with 16x compression ratio on Imagenet, CSTAR obtains 8.58% benign accuracy gain and 4.27% robust accuracy gain compared to the existing robust structured pruning.", + "primary_area": "computer vision ii", + "author": "Huy Phan; Miao Yin; Yang Sui; Bo Yuan; Saman Zonouz", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Rutgers University; Department of Electrical and Computer Engineering, Rutgers University; Department of Electrical and Computer Engineering, Rutgers University; Department of Electrical and Computer Engineering, Rutgers University; Schools of Cybersecurity and Privacy, Georgia Institute of Technology", + "bibtex": "@article{Phan_Yin_Sui_Yuan_Zonouz_2023, title={CSTAR: Towards Compact and Structured Deep Neural Networks with Adversarial Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25299}, DOI={10.1609/aaai.v37i2.25299}, abstractNote={Model compression and model defense for deep neural networks (DNNs) have been extensively and individually studied. Considering the co-importance of model compactness and robustness in practical applications, several prior works have explored to improve the adversarial robustness of the sparse neural networks. However, the structured sparse models obtained by the existing works suffer severe performance degradation for both benign and robust accuracy, thereby causing a challenging dilemma between robustness and structuredness of compact DNNs.\nTo address this problem, in this paper, we propose CSTAR, an efficient solution that simultaneously impose Compactness, high STructuredness and high Adversarial Robustness on the target DNN models. By formulating the structuredness and robustness requirement within the same framework, the compressed DNNs can simultaneously achieve high compression performance and strong adversarial robustness. Evaluations for various DNN models on different datasets demonstrate the effectiveness of CSTAR. Compared with the state-of-the-art robust structured pruning, CSTAR shows consistently better performance. For instance, when compressing ResNet-18 on CIFAR-10, CSTAR achieves up to 20.07% and 11.91% improvement for benign accuracy and robust accuracy, respectively. For compressing ResNet-18 with 16x compression ratio on Imagenet, CSTAR obtains 8.58% benign accuracy gain and 4.27% robust accuracy gain compared to the existing robust structured pruning.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Phan, Huy and Yin, Miao and Sui, Yang and Yuan, Bo and Zonouz, Saman}, year={2023}, month={Jun.}, pages={2065-2073} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25299/25071", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25299", + "pdf_size": 508029, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9318062095824568363&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "rutgers.edu;rutgers.edu;rutgers.edu;soe.rutgers.edu;gatech.edu", + "email": "rutgers.edu;rutgers.edu;rutgers.edu;soe.rutgers.edu;gatech.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Rutgers University;Georgia Institute of Technology", + "aff_unique_dep": "Department of Electrical and Computer Engineering;Schools of Cybersecurity and Privacy", + "aff_unique_url": "https://www.rutgers.edu;https://www.gatech.edu", + "aff_unique_abbr": "Rutgers;Georgia Tech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25349", + "title": "Calibrated Teacher for Sparsely Annotated Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Fully supervised object detection requires training images in which all instances are annotated. This is actually impractical due to the high labor and time costs and the unavoidable missing annotations. As a result, the incomplete annotation in each image could provide misleading supervision and harm the training. Recent works on sparsely annotated object detection alleviate this problem by generating pseudo labels for the missing annotations. Such a mechanism is sensitive to the threshold of the pseudo label score. However, the effective threshold is different in different training stages and among different object detectors. Therefore, the current methods with fixed thresholds have sub-optimal performance, and are difficult to be applied to other detectors. In order to resolve this obstacle, we propose a Calibrated Teacher, of which the confidence estimation of the prediction is well calibrated to match its real precision. In this way, different detectors in different training stages would share a similar distribution of the output confidence, so that multiple detectors could share the same fixed threshold and achieve better performance. Furthermore, we present a simple but effective Focal IoU Weight (FIoU) for the classification loss. FIoU aims at reducing the loss weight of false negative samples caused by the missing annotation, and thus works as the complement of the teacher-student paradigm. Extensive experiments show that our methods set new state-of-the-art under all different sparse settings in COCO. Code will be available at https://github.com/Whileherham/CalibratedTeacher.", + "primary_area": "computer vision ii", + "author": "Haohan Wang; Liang Liu; Boshen Zhang; Jiangning Zhang; Wuhao Zhang; Zhenye Gan; Yabiao Wang; Chengjie Wang; Haoqian Wang", + "authorids": "", + "aff": "Shenzhen International Graduate School, Tsinghua University; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab + Shanghai Jiao Tong University; Shenzhen International Graduate School, Tsinghua University", + "bibtex": "@article{Wang_Liu_Zhang_Zhang_Zhang_Gan_Wang_Wang_Wang_2023, title={Calibrated Teacher for Sparsely Annotated Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25349}, DOI={10.1609/aaai.v37i2.25349}, abstractNote={Fully supervised object detection requires training images in which all instances are annotated. This is actually impractical due to the high labor and time costs and the unavoidable missing annotations. As a result, the incomplete annotation in each image could provide misleading supervision and harm the training. Recent works on sparsely annotated object detection alleviate this problem by generating pseudo labels for the missing annotations. Such a mechanism is sensitive to the threshold of the pseudo label score. However, the effective threshold is different in different training stages and among different object detectors. Therefore, the current methods with fixed thresholds have sub-optimal performance, and are difficult to be applied to other detectors. In order to resolve this obstacle, we propose a Calibrated Teacher, of which the confidence estimation of the prediction is well calibrated to match its real precision. In this way, different detectors in different training stages would share a similar distribution of the output confidence, so that multiple detectors could share the same fixed threshold and achieve better performance. Furthermore, we present a simple but effective Focal IoU Weight (FIoU) for the classification loss. FIoU aims at reducing the loss weight of false negative samples caused by the missing annotation, and thus works as the complement of the teacher-student paradigm. Extensive experiments show that our methods set new state-of-the-art under all different sparse settings in COCO. Code will be available at https://github.com/Whileherham/CalibratedTeacher.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Haohan and Liu, Liang and Zhang, Boshen and Zhang, Jiangning and Zhang, Wuhao and Gan, Zhenye and Wang, Yabiao and Wang, Chengjie and Wang, Haoqian}, year={2023}, month={Jun.}, pages={2519-2527} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25349/25121", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25349", + "pdf_size": 2531384, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9299468912113782427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tsinghua.edu.cn", + "github": "https://github.com/Whileherham/CalibratedTeacher", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;1;1;1;1+2;0", + "aff_unique_norm": "Tsinghua University;Tencent;Shanghai Jiao Tong University", + "aff_unique_dep": "Shenzhen International Graduate School;Youtu Lab;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.tencent.com;https://www.sjtu.edu.cn", + "aff_unique_abbr": "THU;Tencent;SJTU", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27021", + "title": "Can Adversarial Networks Make Uninformative Colonoscopy Video Frames Clinically Informative? (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Various artifacts, such as ghost colors, interlacing, and motion blur, hinder diagnosing colorectal cancer (CRC) from videos acquired during colonoscopy. The frames containing these artifacts are called uninformative frames and are present in large proportions in colonoscopy videos. To alleviate the impact of artifacts, we propose an adversarial network based framework to convert uninformative frames to clinically relevant frames. We examine the effectiveness of the proposed approach by evaluating the translated frames for polyp detection using YOLOv5. Preliminary results present improved detection performance along with elegant qualitative outcomes. We also examine the failure cases to determine the directions for future work.", + "primary_area": "", + "author": "Vanshali Sharma; M.K. Bhuyan; Pradip K. Das", + "authorids": "", + "aff": "Indian Institute of Technology Guwahati, Assam, India-781039; Indian Institute of Technology Guwahati, Assam, India-781039; Indian Institute of Technology Guwahati, Assam, India-781039", + "bibtex": "@article{Sharma_Bhuyan_Das_2024, title={Can Adversarial Networks Make Uninformative Colonoscopy Video Frames Clinically Informative? (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27021}, DOI={10.1609/aaai.v37i13.27021}, abstractNote={Various artifacts, such as ghost colors, interlacing, and motion blur, hinder diagnosing colorectal cancer (CRC) from videos acquired during colonoscopy. The frames containing these artifacts are called uninformative frames and are present in large proportions in colonoscopy videos. To alleviate the impact of artifacts, we propose an adversarial network based framework to convert uninformative frames to clinically relevant frames. We examine the effectiveness of the proposed approach by evaluating the translated frames for polyp detection using YOLOv5. Preliminary results present improved detection performance along with elegant qualitative outcomes. We also examine the failure cases to determine the directions for future work.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharma, Vanshali and Bhuyan, M.K. and Das, Pradip K.}, year={2024}, month={Jul.}, pages={16322-16323} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27021/26793", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27021", + "pdf_size": 1122812, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4432665099427199516&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "iitg.ac.in;iitg.ac.in;iitg.ac.in", + "email": "iitg.ac.in;iitg.ac.in;iitg.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Guwahati", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitg.ac.in", + "aff_unique_abbr": "IIT Guwahati", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Guwahati", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25879", + "title": "Can Bad Teaching Induce Forgetting? Unlearning in Deep Networks Using an Incompetent Teacher", + "track": "main", + "status": "Technical", + "abstract": "Machine unlearning has become an important area of research due to an increasing need for machine learning (ML) applications to comply with the emerging data privacy regulations. It facilitates the provision for removal of certain set or class of data from an already trained ML model without requiring retraining from scratch. Recently, several efforts have been put in to make unlearning to be effective and efficient. We propose a novel machine unlearning method by exploring the utility of competent and incompetent teachers in a student-teacher framework to induce forgetfulness. The knowledge from the competent and incompetent teachers is selectively transferred to the student to obtain a model that doesn't contain any information about the forget data. We experimentally show that this method generalizes well, is fast and effective. Furthermore, we introduce the zero retrain forgetting (ZRF) metric to evaluate any unlearning method. Unlike the existing unlearning metrics, the ZRF score does not depend on the availability of the expensive retrained model. This makes it useful for analysis of the unlearned model after deployment as well. We present results of experiments conducted for random subset forgetting and class forgetting on various deep networks and across different application domains. Code is at: https://github.com/vikram2000b/bad-teaching- unlearning", + "primary_area": "machine learning i", + "author": "Vikram S Chundawat; Ayush K Tarun; Murari Mandal; Mohan Kankanhalli", + "authorids": "", + "aff": "Mavvex Labs, India; Mavvex Labs, India; School of Computer Engineering, Kalinga Institute of Industrial Technology Bhubaneswar + School of Computing, National University of Singapore; School of Computing, National University of Singapore", + "bibtex": "@article{Chundawat_Tarun_Mandal_Kankanhalli_2023, title={Can Bad Teaching Induce Forgetting? Unlearning in Deep Networks Using an Incompetent Teacher}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25879}, DOI={10.1609/aaai.v37i6.25879}, abstractNote={Machine unlearning has become an important area of research due to an increasing need for machine learning (ML) applications to comply with the emerging data privacy regulations. It facilitates the provision for removal of certain set or class of data from an already trained ML model without requiring retraining from scratch. Recently, several efforts have been put in to make unlearning to be effective and efficient. We propose a novel machine unlearning method by exploring the utility of competent and incompetent teachers in a student-teacher framework to induce forgetfulness. The knowledge from the competent and incompetent teachers is selectively transferred to the student to obtain a model that doesn\u2019t contain any information about the forget data. We experimentally show that this method generalizes well, is fast and effective. Furthermore, we introduce the zero retrain forgetting (ZRF) metric to evaluate any unlearning method. Unlike the existing unlearning metrics, the ZRF score does not depend on the availability of the expensive retrained model. This makes it useful for analysis of the unlearned model after deployment as well. We present results of experiments conducted for random subset forgetting and class forgetting on various deep networks and across different application domains. Code is at: https://github.com/vikram2000b/bad-teaching- unlearning}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chundawat, Vikram S and Tarun, Ayush K and Mandal, Murari and Kankanhalli, Mohan}, year={2023}, month={Jun.}, pages={7210-7217} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25879/25651", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25879", + "pdf_size": 411166, + "gs_citation": 143, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14455345805279356910&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com;kiit.ac.in;comp.nus.edu.sg", + "email": "gmail.com;gmail.com;kiit.ac.in;comp.nus.edu.sg", + "github": "https://github.com/vikram2000b/bad-teaching-unlearning", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+2;2", + "aff_unique_norm": "Mavvex Labs;Kalinga Institute of Industrial Technology;National University of Singapore", + "aff_unique_dep": ";School of Computer Engineering;School of Computing", + "aff_unique_url": ";https://kiit.ac.in;https://www.nus.edu.sg", + "aff_unique_abbr": ";KIIT;NUS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Bhubaneswar", + "aff_country_unique_index": "0;0;0+1;1", + "aff_country_unique": "India;Singapore" + }, + { + "id": "article-26992", + "title": "Can Graph Neural Networks Learn to Solve the MaxSAT Problem? (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The paper presents an attempt to bridge the gap between machine learning and symbolic reasoning. We build graph neural networks (GNNs) to predict the solution of the Maximum Satisfiability (MaxSAT) problem, an optimization variant of SAT. Two closely related graph representations are adopted, and we prove their theoretical equivalence. We also show that GNNs can achieve attractive performance to solve hard MaxSAT problems in certain distributions even compared with state-of-the-art solvers through experimental evaluation.", + "primary_area": "", + "author": "Minghao Liu; Pei Huang; Fuqi Jia; Fan Zhang; Yuchen Sun; Shaowei Cai; Feifei Ma; Jian Zhang", + "authorids": "", + "aff": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Laboratory of Parallel Software and Computational Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Inspir.ai; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + Laboratory of Parallel Software and Computational Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences + University of Chinese Academy of Sciences", + "bibtex": "@article{Liu_Huang_Jia_Zhang_Sun_Cai_Ma_Zhang_2024, title={Can Graph Neural Networks Learn to Solve the MaxSAT Problem? (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26992}, DOI={10.1609/aaai.v37i13.26992}, abstractNote={The paper presents an attempt to bridge the gap between machine learning and symbolic reasoning. We build graph neural networks (GNNs) to predict the solution of the Maximum Satisfiability (MaxSAT) problem, an optimization variant of SAT. Two closely related graph representations are adopted, and we prove their theoretical equivalence. We also show that GNNs can achieve attractive performance to solve hard MaxSAT problems in certain distributions even compared with state-of-the-art solvers through experimental evaluation.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Minghao and Huang, Pei and Jia, Fuqi and Zhang, Fan and Sun, Yuchen and Cai, Shaowei and Ma, Feifei and Zhang, Jian}, year={2024}, month={Jul.}, pages={16264-16265} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26992/26764", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26992", + "pdf_size": 110788, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5715282667173794535&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ios.ac.cn;ios.ac.cn;ios.ac.cn; ; ; ; ; ", + "email": "ios.ac.cn;ios.ac.cn;ios.ac.cn; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;0+1;0+0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Inspir.ai", + "aff_unique_dep": "Institute of Software;;", + "aff_unique_url": "http://www.ios.ac.cn;http://www.ucas.ac.cn;", + "aff_unique_abbr": "CAS;UCAS;", + "aff_campus_unique_index": ";;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-25904", + "title": "Can Label-Specific Features Help Partial-Label Learning?", + "track": "main", + "status": "Technical", + "abstract": "Partial label learning (PLL) aims to learn from inexact data annotations where each training example is associated with a coarse candidate label set. Due to its practicability, many PLL algorithms have been proposed in recent literature. Most prior PLL works attempt to identify the ground-truth labels from candidate sets and the classifier is trained afterward by fitting the features of examples and their exact ground-truth labels. From a different perspective, we propose to enrich the feature space and raise the question ``Can label-specific features help PLL?'' rather than learning from examples with identical features for all classes. Despite its benefits, previous label-specific feature approaches rely on ground-truth labels to split positive and negative examples of each class and then conduct clustering analysis, which is not directly applicable in PLL. To remedy this problem, we propose an uncertainty-aware confidence region to accommodate false positive labels. We first employ graph-based label enhancement to yield smooth pseudo-labels and facilitate the confidence region split. After acquiring label-specific features, a family of binary classifiers is induced. Extensive experiments on both synthesized and real-world datasets are conducted and the results show that our method consistently outperforms eight baselines. Our code is released at\nhttps://github.com/meteoseeker/UCL", + "primary_area": "machine learning i", + "author": "Ruo-Jing Dong; Jun-Yi Hang; Tong Wei; Min-Ling Zhang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University, Nanjing 210096, China + Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, China + Computer Experimental Teaching Center of Southeast University; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China + Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, China + Computer Experimental Teaching Center of Southeast University; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China + Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, China + Computer Experimental Teaching Center of Southeast University; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China + Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, China + Computer Experimental Teaching Center of Southeast University", + "bibtex": "@article{Dong_Hang_Wei_Zhang_2023, title={Can Label-Specific Features Help Partial-Label Learning?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25904}, DOI={10.1609/aaai.v37i6.25904}, abstractNote={Partial label learning (PLL) aims to learn from inexact data annotations where each training example is associated with a coarse candidate label set. Due to its practicability, many PLL algorithms have been proposed in recent literature. Most prior PLL works attempt to identify the ground-truth labels from candidate sets and the classifier is trained afterward by fitting the features of examples and their exact ground-truth labels. From a different perspective, we propose to enrich the feature space and raise the question ``Can label-specific features help PLL?\u2019\u2019 rather than learning from examples with identical features for all classes. Despite its benefits, previous label-specific feature approaches rely on ground-truth labels to split positive and negative examples of each class and then conduct clustering analysis, which is not directly applicable in PLL. To remedy this problem, we propose an uncertainty-aware confidence region to accommodate false positive labels. We first employ graph-based label enhancement to yield smooth pseudo-labels and facilitate the confidence region split. After acquiring label-specific features, a family of binary classifiers is induced. Extensive experiments on both synthesized and real-world datasets are conducted and the results show that our method consistently outperforms eight baselines. Our code is released at\nhttps://github.com/meteoseeker/UCL}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Ruo-Jing and Hang, Jun-Yi and Wei, Tong and Zhang, Min-Ling}, year={2023}, month={Jun.}, pages={7432-7440} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25904/25676", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25904", + "pdf_size": 297928, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12271392535587182503&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "https://github.com/meteoseeker/UCL", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25433", + "title": "Can We Find Strong Lottery Tickets in Generative Models?", + "track": "main", + "status": "Technical", + "abstract": "Yes. In this paper, we investigate strong lottery tickets in generative models, the subnetworks that achieve good generative performance without any weight update. Neural network pruning is considered the main cornerstone of model compression for reducing the costs of computation and memory. Unfortunately, pruning a generative model has not been extensively explored, and all existing pruning algorithms suffer from excessive weight-training costs, performance degradation, limited generalizability, or complicated training. To address these problems, we propose to find a strong lottery ticket via moment-matching scores. Our experimental results show that the discovered subnetwork can perform similarly or better than the trained dense model even when only 10% of the weights remain. To the best of our knowledge, we are the first to show the existence of strong lottery tickets in generative models and provide an algorithm to find it stably. Our code and supplementary materials are publicly available at https://lait-cvlab.github.io/SLT-in-Generative-Models/.", + "primary_area": "computer vision iii", + "author": "Sangyeop Yeo; Yoojin Jang; Jy-yong Sohn; Dongyoon Han; Jaejun Yoo", + "authorids": "", + "aff": "LAIT, Ulsan National Institute of Science and Technology (UNIST); LAIT, Ulsan National Institute of Science and Technology (UNIST); University of Wisconsin-Madison; Naver AI Lab; LAIT, Ulsan National Institute of Science and Technology (UNIST)", + "bibtex": "@article{Yeo_Jang_Sohn_Han_Yoo_2023, title={Can We Find Strong Lottery Tickets in Generative Models?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25433}, DOI={10.1609/aaai.v37i3.25433}, abstractNote={Yes. In this paper, we investigate strong lottery tickets in generative models, the subnetworks that achieve good generative performance without any weight update. Neural network pruning is considered the main cornerstone of model compression for reducing the costs of computation and memory. Unfortunately, pruning a generative model has not been extensively explored, and all existing pruning algorithms suffer from excessive weight-training costs, performance degradation, limited generalizability, or complicated training. To address these problems, we propose to find a strong lottery ticket via moment-matching scores. Our experimental results show that the discovered subnetwork can perform similarly or better than the trained dense model even when only 10% of the weights remain. To the best of our knowledge, we are the first to show the existence of strong lottery tickets in generative models and provide an algorithm to find it stably. Our code and supplementary materials are publicly available at https://lait-cvlab.github.io/SLT-in-Generative-Models/.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yeo, Sangyeop and Jang, Yoojin and Sohn, Jy-yong and Han, Dongyoon and Yoo, Jaejun}, year={2023}, month={Jun.}, pages={3267-3275} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25433/25205", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25433", + "pdf_size": 3100392, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7453822218154877809&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;unist.ac.kr;gmail.com;navercorp.com;unist.ac.kr", + "email": "gmail.com;unist.ac.kr;gmail.com;navercorp.com;unist.ac.kr", + "github": "", + "project": "https://lait-cvlab.github.io/SLT-in-Generative-Models/", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Ulsan National Institute of Science and Technology;University of Wisconsin-Madison;Naver Corporation", + "aff_unique_dep": "LAIT;;Naver AI Lab", + "aff_unique_url": "https://www.unist.ac.kr;https://www.wisc.edu;https://www.naver.com", + "aff_unique_abbr": "UNIST;UW-Madison;Naver", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Ulsan;Madison;", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-26798", + "title": "Can We Trust Fair-AI?", + "track": "senior member presentation summary papers", + "status": "Technical", + "abstract": "There is a fast-growing literature in addressing the fairness of AI models (fair-AI), with a continuous stream of new conceptual frameworks, methods, and tools. How much can we trust them? How much do they actually impact society? \nWe take a critical focus on fair-AI and survey issues, simplifications, and mistakes that researchers and practitioners often underestimate, which in turn can undermine the trust on fair-AI and limit its contribution to society. In particular, we discuss the hyper-focus on fairness metrics and on optimizing their average performances. We instantiate this observation by discussing the Yule's effect of fair-AI tools: being fair on average does not imply being fair in contexts that matter. We conclude that the use of fair-AI methods should be complemented with the design, development, and verification practices that are commonly summarized under the umbrella of trustworthy AI.", + "primary_area": "", + "author": "Salvatore Ruggieri; Jose M. Alvarez; Andrea Pugnana; Laura State; Franco Turini", + "authorids": "", + "aff": "University of Pisa; University of Pisa + Scuola Normale Superiore; Scuola Normale Superiore; University of Pisa + Scuola Normale Superiore; University of Pisa", + "bibtex": "@article{Ruggieri_Alvarez_Pugnana_State_Turini_2024, title={Can We Trust Fair-AI?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26798}, DOI={10.1609/aaai.v37i13.26798}, abstractNote={There is a fast-growing literature in addressing the fairness of AI models (fair-AI), with a continuous stream of new conceptual frameworks, methods, and tools. How much can we trust them? How much do they actually impact society? We take a critical focus on fair-AI and survey issues, simplifications, and mistakes that researchers and practitioners often underestimate, which in turn can undermine the trust on fair-AI and limit its contribution to society. In particular, we discuss the hyper-focus on fairness metrics and on optimizing their average performances. We instantiate this observation by discussing the Yule\u2019s effect of fair-AI tools: being fair on average does not imply being fair in contexts that matter. We conclude that the use of fair-AI methods should be complemented with the design, development, and verification practices that are commonly summarized under the umbrella of trustworthy AI.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ruggieri, Salvatore and Alvarez, Jose M. and Pugnana, Andrea and State, Laura and Turini, Franco}, year={2024}, month={Jul.}, pages={15421-15430} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26798/26570", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26798", + "pdf_size": 240035, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12592733637989681255&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "unipi.it;di.unipi.it;sns.it;di.unipi.it;unipi.it", + "email": "unipi.it;di.unipi.it;sns.it;di.unipi.it;unipi.it", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;1;0+1;0", + "aff_unique_norm": "University of Pisa;Scuola Normale Superiore", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unipi.it;https://www.sns.it", + "aff_unique_abbr": "UNIP;SNS", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0+0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-27019", + "title": "Can You Answer This? \u2013 Exploring Zero-Shot QA Generalization Capabilities in Large Language Models (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The buzz around Transformer-based language models (TLM) such as BERT, RoBERTa, etc. is well-founded owing to their impressive results on an array of tasks. However, when applied to areas needing specialized knowledge (closed-domain), such as medical, finance, etc. their performance takes drastic hits, sometimes more than their older recurrent/convolutional counterparts. In this paper, we explore zero-shot capabilities of large LMs for extractive QA. Our objective is to examine performance change in the face of domain drift i.e. when the target domain data is vastly different in semantic and statistical properties from the source domain and attempt to explain the subsequent behavior. To this end, we present two studies in this paper while planning further experiments later down the road. Our findings indicate flaws in the current generation of TLM limiting their performance on closed-domain tasks.", + "primary_area": "", + "author": "Saptarshi Sengupta; Shreya Ghosh; Preslav Nakov; Prasenjit Mitra", + "authorids": "", + "aff": "College of IST, The Pennsylvania State University, USA; College of IST, The Pennsylvania State University, USA; Mohamed bin Zayed University of Artificial Intelligence, UAE; College of IST, The Pennsylvania State University, USA", + "bibtex": "@article{Sengupta_Ghosh_Nakov_Mitra_2024, title={Can You Answer This? \u2013 Exploring Zero-Shot QA Generalization Capabilities in Large Language Models (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27019}, DOI={10.1609/aaai.v37i13.27019}, abstractNote={The buzz around Transformer-based language models (TLM) such as BERT, RoBERTa, etc. is well-founded owing to their impressive results on an array of tasks. However, when applied to areas needing specialized knowledge (closed-domain), such as medical, finance, etc. their performance takes drastic hits, sometimes more than their older recurrent/convolutional counterparts. In this paper, we explore zero-shot capabilities of large LMs for extractive QA. Our objective is to examine performance change in the face of domain drift i.e. when the target domain data is vastly different in semantic and statistical properties from the source domain and attempt to explain the subsequent behavior. To this end, we present two studies in this paper while planning further experiments later down the road. Our findings indicate flaws in the current generation of TLM limiting their performance on closed-domain tasks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sengupta, Saptarshi and Ghosh, Shreya and Nakov, Preslav and Mitra, Prasenjit}, year={2024}, month={Jul.}, pages={16318-16319} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27019/26791", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27019", + "pdf_size": 68873, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:jFaWrkpjkCgJ:scholar.google.com/&scioq=Can+You+Answer+This%3F+%E2%80%93+Exploring+Zero-Shot+QA+Generalization+Capabilities+in+Large+Language+Models+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "psu.edu;psu.edu;mbzuai.ac.ae;psu.edu", + "email": "psu.edu;psu.edu;mbzuai.ac.ae;psu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "The Pennsylvania State University;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": "College of IST;", + "aff_unique_url": "https://www.psu.edu;https://mbzuai.ac.ae", + "aff_unique_abbr": "PSU;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "article-26686", + "title": "Carburacy: Summarization Models Tuning and Comparison in Eco-Sustainable Regimes with a Novel Carbon-Aware Accuracy", + "track": "aaai special track", + "status": "Technical", + "abstract": "Generative transformer-based models have reached cutting-edge performance in long document summarization. Nevertheless, this task is witnessing a paradigm shift in developing ever-increasingly computationally-hungry solutions, focusing on effectiveness while ignoring the economic, environmental, and social costs of yielding such results. Accordingly, such extensive resources impact climate change and raise barriers to small and medium organizations distinguished by low-resource regimes of hardware and data. As a result, this unsustainable trend has lifted many concerns in the community, which directs the primary efforts on the proposal of tools to monitor models' energy costs. Despite their importance, no evaluation measure considering models' eco-sustainability exists yet. In this work, we propose Carburacy, the first carbon-aware accuracy measure that captures both model effectiveness and eco-sustainability. We perform a comprehensive benchmark for long document summarization, comparing multiple state-of-the-art quadratic and linear transformers on several datasets under eco-sustainable regimes. Finally, thanks to Carburacy, we found optimal combinations of hyperparameters that let models be competitive in effectiveness with significantly lower costs.", + "primary_area": "ai for social impact", + "author": "Gianluca Moro; Luca Ragazzi; Lorenzo Valgimigli", + "authorids": "", + "aff": "Department of Computer Science and Engineering, University of Bologna, Cesena Campus; Department of Computer Science and Engineering, University of Bologna, Cesena Campus; Department of Computer Science and Engineering, University of Bologna, Cesena Campus + CNIT", + "bibtex": "@article{Moro_Ragazzi_Valgimigli_2023, title={Carburacy: Summarization Models Tuning and Comparison in Eco-Sustainable Regimes with a Novel Carbon-Aware Accuracy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26686}, DOI={10.1609/aaai.v37i12.26686}, abstractNote={Generative transformer-based models have reached cutting-edge performance in long document summarization. Nevertheless, this task is witnessing a paradigm shift in developing ever-increasingly computationally-hungry solutions, focusing on effectiveness while ignoring the economic, environmental, and social costs of yielding such results. Accordingly, such extensive resources impact climate change and raise barriers to small and medium organizations distinguished by low-resource regimes of hardware and data. As a result, this unsustainable trend has lifted many concerns in the community, which directs the primary efforts on the proposal of tools to monitor models\u2019 energy costs. Despite their importance, no evaluation measure considering models\u2019 eco-sustainability exists yet. In this work, we propose Carburacy, the first carbon-aware accuracy measure that captures both model effectiveness and eco-sustainability. We perform a comprehensive benchmark for long document summarization, comparing multiple state-of-the-art quadratic and linear transformers on several datasets under eco-sustainable regimes. Finally, thanks to Carburacy, we found optimal combinations of hyperparameters that let models be competitive in effectiveness with significantly lower costs.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Moro, Gianluca and Ragazzi, Luca and Valgimigli, Lorenzo}, year={2023}, month={Jun.}, pages={14417-14425} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26686/26458", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26686", + "pdf_size": 270828, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12358317741048278954&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "unibo.it;unibo.it;unibo.it", + "email": "unibo.it;unibo.it;unibo.it", + "github": "https://github.com/disi-unibo-nlp/carburacy", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "University of Bologna;Consorzio Nazionale Interuniversitario per le Telecomunicazioni", + "aff_unique_dep": "Department of Computer Science and Engineering;", + "aff_unique_url": "https://www.unibo.it;https://www.cnit.it", + "aff_unique_abbr": ";CNIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Cesena;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25405", + "title": "CasFusionNet: A Cascaded Network for Point Cloud Semantic Scene Completion by Dense Feature Fusion", + "track": "main", + "status": "Technical", + "abstract": "Semantic scene completion (SSC) aims to complete a partial 3D scene and predict its semantics simultaneously. Most existing works adopt the voxel representations, thus suffering from the growth of memory and computation cost as the voxel resolution increases. Though a few works attempt to solve SSC from the perspective of 3D point clouds, they have not fully exploited the correlation and complementarity between the two tasks of scene completion and semantic segmentation. In our work, we present CasFusionNet, a novel cascaded network for point cloud semantic scene completion by dense feature fusion. Specifically, we design (i) a global completion module (GCM) to produce an upsampled and completed but coarse point set, (ii) a semantic segmentation module (SSM) to predict the per-point semantic labels of the completed points generated by GCM, and (iii) a local refinement module (LRM) to further refine the coarse completed points and the associated labels from a local perspective. We organize the above three modules via dense feature fusion in each level, and cascade a total of four levels, where we also employ feature fusion between each level for sufficient information usage. Both quantitative and qualitative results on our compiled two point-based datasets validate the effectiveness and superiority of our CasFusionNet compared to state-of-the-art methods in terms of both scene completion and semantic segmentation. The codes and datasets are available at: https://github.com/JinfengX/CasFusionNet.", + "primary_area": "computer vision iii", + "author": "Jinfeng Xu; Xianzhi Li; Yuan Tang; Qiao Yu; Yixue Hao; Long Hu; Min Chen", + "authorids": "", + "aff": "Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology", + "bibtex": "@article{Xu_Li_Tang_Yu_Hao_Hu_Chen_2023, title={CasFusionNet: A Cascaded Network for Point Cloud Semantic Scene Completion by Dense Feature Fusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25405}, DOI={10.1609/aaai.v37i3.25405}, abstractNote={Semantic scene completion (SSC) aims to complete a partial 3D scene and predict its semantics simultaneously. Most existing works adopt the voxel representations, thus suffering from the growth of memory and computation cost as the voxel resolution increases. Though a few works attempt to solve SSC from the perspective of 3D point clouds, they have not fully exploited the correlation and complementarity between the two tasks of scene completion and semantic segmentation. In our work, we present CasFusionNet, a novel cascaded network for point cloud semantic scene completion by dense feature fusion. Specifically, we design (i) a global completion module (GCM) to produce an upsampled and completed but coarse point set, (ii) a semantic segmentation module (SSM) to predict the per-point semantic labels of the completed points generated by GCM, and (iii) a local refinement module (LRM) to further refine the coarse completed points and the associated labels from a local perspective. We organize the above three modules via dense feature fusion in each level, and cascade a total of four levels, where we also employ feature fusion between each level for sufficient information usage. Both quantitative and qualitative results on our compiled two point-based datasets validate the effectiveness and superiority of our CasFusionNet compared to state-of-the-art methods in terms of both scene completion and semantic segmentation. The codes and datasets are available at: https://github.com/JinfengX/CasFusionNet.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Jinfeng and Li, Xianzhi and Tang, Yuan and Yu, Qiao and Hao, Yixue and Hu, Long and Chen, Min}, year={2023}, month={Jun.}, pages={3018-3026} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25405/25177", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25405", + "pdf_size": 12287251, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15108524710289683287&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "gmail.com;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/JinfengX/CasFusionNet", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26956", + "title": "CasODE: Modeling Irregular Information Cascade via Neural Ordinary Differential Equations (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Predicting information cascade popularity is a fundamental problem for understanding the nature of information propagation on social media. However, existing works fail to capture an essential aspect of information propagation: the temporal irregularity of cascade event -- i.e., users' re-tweetings at random and non-periodic time instants. In this work, we present a novel framework CasODE for information cascade prediction with neural ordinary differential equations (ODEs). CasODE generalizes the discrete state transitions in RNNs to continuous-time dynamics for modeling the irregular-sampled events in information cascades. Experimental evaluations on real-world datasets demonstrate the advantages of the CasODE over baseline approaches.", + "primary_area": "", + "author": "Zhangtao Cheng; Xovee Xu; Ting Zhong; Fan Zhou; Goce Trajcevski", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; Iowa State University", + "bibtex": "@article{Cheng_Xu_Zhong_Zhou_Trajcevski_2024, title={CasODE: Modeling Irregular Information Cascade via Neural Ordinary Differential Equations (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26956}, DOI={10.1609/aaai.v37i13.26956}, abstractNote={Predicting information cascade popularity is a fundamental problem for understanding the nature of information propagation on social media. However, existing works fail to capture an essential aspect of information propagation: the temporal irregularity of cascade event -- i.e., users\u2019 re-tweetings at random and non-periodic time instants. In this work, we present a novel framework CasODE for information cascade prediction with neural ordinary differential equations (ODEs). CasODE generalizes the discrete state transitions in RNNs to continuous-time dynamics for modeling the irregular-sampled events in information cascades. Experimental evaluations on real-world datasets demonstrate the advantages of the CasODE over baseline approaches.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Zhangtao and Xu, Xovee and Zhong, Ting and Zhou, Fan and Trajcevski, Goce}, year={2024}, month={Jul.}, pages={16192-16193} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26956/26728", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26956", + "pdf_size": 169664, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17957832005468321635&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "outlook.com;live.com;uestc.edu.cn;uestc.edu.cn;iastate.edu", + "email": "outlook.com;live.com;uestc.edu.cn;uestc.edu.cn;iastate.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "University of Electronic Science and Technology of China;Iowa State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.iastate.edu", + "aff_unique_abbr": "UESTC;ISU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26991", + "title": "Category-Guided Visual Question Generation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Visual question generation aims to generate high-quality questions related to images. Generating questions based only on images can better reduce labor costs and thus be easily applied. However, their methods tend to generate similar general questions that fail to ask questions about the specific content of each image scene. In this paper, we propose a category-guided visual question generation model that can generate questions with multiple categories that focus on different objects in an image. Specifically, our model first selects the appropriate question category based on the objects in the image and the relationships among objects. Then, we generate corresponding questions based on the selected question categories. Experiments conducted on the TDIUC dataset show that our proposed model outperforms existing models in terms of diversity and quality.", + "primary_area": "", + "author": "Hongfei Liu; Jiali Chen; Wenhao Fang; Jiayuan Xie; Yi Cai", + "authorids": "", + "aff": "School of Software Engineering, South China University of Technology, Guangzhou, China+Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education; School of Software Engineering, South China University of Technology, Guangzhou, China+Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education; School of Software Engineering, South China University of Technology, Guangzhou, China+Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education; School of Software Engineering, South China University of Technology, Guangzhou, China+Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education; School of Software Engineering, South China University of Technology, Guangzhou, China+Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education", + "bibtex": "@article{Liu_Chen_Fang_Xie_Cai_2024, title={Category-Guided Visual Question Generation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26991}, DOI={10.1609/aaai.v37i13.26991}, abstractNote={Visual question generation aims to generate high-quality questions related to images. Generating questions based only on images can better reduce labor costs and thus be easily applied. However, their methods tend to generate similar general questions that fail to ask questions about the specific content of each image scene. In this paper, we propose a category-guided visual question generation model that can generate questions with multiple categories that focus on different objects in an image. Specifically, our model first selects the appropriate question category based on the objects in the image and the relationships among objects. Then, we generate corresponding questions based on the selected question categories. Experiments conducted on the TDIUC dataset show that our proposed model outperforms existing models in terms of diversity and quality.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Hongfei and Chen, Jiali and Fang, Wenhao and Xie, Jiayuan and Cai, Yi}, year={2024}, month={Jul.}, pages={16262-16263} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26991/26763", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26991", + "pdf_size": 509735, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8574384778746585866&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;scut.edu.cn", + "email": "mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;scut.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Software Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25619", + "title": "Causal Conditional Hidden Markov Model for Multimodal Traffic Prediction", + "track": "main", + "status": "Technical", + "abstract": "Multimodal traffic flow can reflect the health of the transportation system, and its prediction is crucial to urban traffic management. Recent works overemphasize spatio-temporal correlations of traffic flow, ignoring the physical concepts that lead to the generation of observations and their causal relationship. Spatio-temporal correlations are considered unstable under the influence of different conditions, and spurious correlations may exist in observations. In this paper, we analyze the physical concepts affecting the generation of multimode traffic flow from the perspective of the observation generation principle and propose a Causal Conditional Hidden Markov Model (CCHMM) to predict multimodal traffic flow. In the latent variables inference stage, a posterior network disentangles the causal representations of the concepts of interest from conditional information and observations, and a causal propagation module mines their causal relationship. In the data generation stage, a prior network samples the causal latent variables from the prior distribution and feeds them into the generator to generate multimodal traffic flow. We use a mutually supervised training method for the prior and posterior to enhance the identifiability of the model. Experiments on real-world datasets show that CCHMM can effectively disentangle causal representations of concepts of interest and identify causality, and accurately predict multimodal traffic flow.", + "primary_area": "data mining and knowledge management", + "author": "Yu Zhao; Pan Deng; Junting Liu; Xiaofeng Jia; Mulan Wang", + "authorids": "", + "aff": "Beihang University; Beihang University; Beihang University; Beijing Big Data Centre; Beihang University", + "bibtex": "@article{Zhao_Deng_Liu_Jia_Wang_2023, title={Causal Conditional Hidden Markov Model for Multimodal Traffic Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25619}, DOI={10.1609/aaai.v37i4.25619}, abstractNote={Multimodal traffic flow can reflect the health of the transportation system, and its prediction is crucial to urban traffic management. Recent works overemphasize spatio-temporal correlations of traffic flow, ignoring the physical concepts that lead to the generation of observations and their causal relationship. Spatio-temporal correlations are considered unstable under the influence of different conditions, and spurious correlations may exist in observations. In this paper, we analyze the physical concepts affecting the generation of multimode traffic flow from the perspective of the observation generation principle and propose a Causal Conditional Hidden Markov Model (CCHMM) to predict multimodal traffic flow. In the latent variables inference stage, a posterior network disentangles the causal representations of the concepts of interest from conditional information and observations, and a causal propagation module mines their causal relationship. In the data generation stage, a prior network samples the causal latent variables from the prior distribution and feeds them into the generator to generate multimodal traffic flow. We use a mutually supervised training method for the prior and posterior to enhance the identifiability of the model. Experiments on real-world datasets show that CCHMM can effectively disentangle causal representations of concepts of interest and identify causality, and accurately predict multimodal traffic flow.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yu and Deng, Pan and Liu, Junting and Jia, Xiaofeng and Wang, Mulan}, year={2023}, month={Jun.}, pages={4929-4936} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25619/25391", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25619", + "pdf_size": 6346208, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18000686547744150381&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;jxj.beijing.gov.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;jxj.beijing.gov.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Beihang University;Beijing Big Data Centre", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.buaa.edu.cn/;", + "aff_unique_abbr": "BUAA;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26435", + "title": "Causal Effect Identification in Cluster DAGs", + "track": "main", + "status": "Technical", + "abstract": "Reasoning about the effect of interventions and counterfactuals is a fundamental task found throughout the data sciences. A collection of principles, algorithms, and tools has been developed for performing such tasks in the last decades. One of the pervasive requirements found throughout this literature is the articulation of assumptions, which commonly appear in the form of causal diagrams. Despite the power of this approach, there are significant settings where the knowledge necessary to specify a causal diagram over all variables is not available, particularly in complex, high-dimensional domains. In this paper, we introduce a new graphical modeling tool called cluster DAGs (for short, C-DAGs) that allows for the partial specification of relationships among variables based on limited prior knowledge, alleviating the stringent requirement of specifying a full causal diagram. A C-DAG specifies relationships between clusters of variables, while the relationships between the variables within a cluster are left unspecified, and can be seen as a graphical representation of an equivalence class of causal diagrams that share the relationships among the clusters. We develop the foundations and machinery for valid inferences over C-DAGs about the clusters of variables at each layer of Pearl's Causal Hierarchy - L1 (probabilistic), L2 (interventional), and L3 (counterfactual). In particular, we prove the soundness and completeness of d-separation for probabilistic inference in C-DAGs. Further, we demonstrate the validity of Pearl's do-calculus rules over C-DAGs and show that the standard ID identification algorithm is sound and complete to systematically compute causal effects from observational data given a C-DAG. Finally, we show that C-DAGs are valid for performing counterfactual inferences about clusters of variables.", + "primary_area": "reasoning under uncertainty", + "author": "Tara V. Anand; Adele H. Ribeiro; Jin Tian; Elias Bareinboim", + "authorids": "", + "aff": "Department of Biomedical Informatics, Columbia University; Department of Computer Science, Columbia University; Department of Computer Science, Iowa State University; Department of Computer Science, Columbia University", + "bibtex": "@article{Anand_Ribeiro_Tian_Bareinboim_2023, title={Causal Effect Identification in Cluster DAGs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26435}, DOI={10.1609/aaai.v37i10.26435}, abstractNote={Reasoning about the effect of interventions and counterfactuals is a fundamental task found throughout the data sciences. A collection of principles, algorithms, and tools has been developed for performing such tasks in the last decades. One of the pervasive requirements found throughout this literature is the articulation of assumptions, which commonly appear in the form of causal diagrams. Despite the power of this approach, there are significant settings where the knowledge necessary to specify a causal diagram over all variables is not available, particularly in complex, high-dimensional domains. In this paper, we introduce a new graphical modeling tool called cluster DAGs (for short, C-DAGs) that allows for the partial specification of relationships among variables based on limited prior knowledge, alleviating the stringent requirement of specifying a full causal diagram. A C-DAG specifies relationships between clusters of variables, while the relationships between the variables within a cluster are left unspecified, and can be seen as a graphical representation of an equivalence class of causal diagrams that share the relationships among the clusters. We develop the foundations and machinery for valid inferences over C-DAGs about the clusters of variables at each layer of Pearl\u2019s Causal Hierarchy - L1 (probabilistic), L2 (interventional), and L3 (counterfactual). In particular, we prove the soundness and completeness of d-separation for probabilistic inference in C-DAGs. Further, we demonstrate the validity of Pearl\u2019s do-calculus rules over C-DAGs and show that the standard ID identification algorithm is sound and complete to systematically compute causal effects from observational data given a C-DAG. Finally, we show that C-DAGs are valid for performing counterfactual inferences about clusters of variables.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Anand, Tara V. and Ribeiro, Adele H. and Tian, Jin and Bareinboim, Elias}, year={2023}, month={Jun.}, pages={12172-12179} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26435/26207", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26435", + "pdf_size": 382527, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5389373880759396464&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 7, + "aff_domain": "columbia.edu;cs.columbia.edu;iastate.edu;cs.columbia.edu", + "email": "columbia.edu;cs.columbia.edu;iastate.edu;cs.columbia.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Columbia University;Iowa State University", + "aff_unique_dep": "Department of Biomedical Informatics;Department of Computer Science", + "aff_unique_url": "https://www.columbia.edu;https://www.iastate.edu", + "aff_unique_abbr": "Columbia;ISU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25869", + "title": "Causal Inference with Conditional Instruments Using Deep Generative Models", + "track": "main", + "status": "Technical", + "abstract": "The instrumental variable (IV) approach is a widely used way to estimate the causal effects of a treatment on an outcome of interest from observational data with latent confounders. A standard IV is expected to be related to the treatment variable and independent of all other variables in the system. However, it is challenging to search for a standard IV from data directly due to the strict conditions. The conditional IV (CIV) method has been proposed to allow a variable to be an instrument conditioning on a set of variables, allowing a wider choice of possible IVs and enabling broader practical applications of the IV approach. Nevertheless, there is not a data-driven method to discover a CIV and its conditioning set directly from data. To fill this gap, in this paper, we propose to learn the representations of the information of a CIV and its conditioning set from data with latent confounders for average causal effect estimation. By taking advantage of deep generative models, we develop a novel data-driven approach for simultaneously learning the representation of a CIV from measured variables and generating the representation of its conditioning set given measured variables. Extensive experiments on synthetic and real-world datasets show that our method outperforms the existing IV methods.", + "primary_area": "machine learning i", + "author": "Debo Cheng; Ziqi Xu; Jiuyong Li; Lin Liu; Jixue Liu; Thuc Duy Le", + "authorids": "", + "aff": "School of Computer Science and Engineering, Guangxi Normal University; UniSA STEM, University of South Australia; UniSA STEM, University of South Australia; UniSA STEM, University of South Australia; UniSA STEM, University of South Australia; UniSA STEM, University of South Australia", + "bibtex": "@article{Cheng_Xu_Li_Liu_Liu_Le_2023, title={Causal Inference with Conditional Instruments Using Deep Generative Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25869}, DOI={10.1609/aaai.v37i6.25869}, abstractNote={The instrumental variable (IV) approach is a widely used way to estimate the causal effects of a treatment on an outcome of interest from observational data with latent confounders. A standard IV is expected to be related to the treatment variable and independent of all other variables in the system. However, it is challenging to search for a standard IV from data directly due to the strict conditions. The conditional IV (CIV) method has been proposed to allow a variable to be an instrument conditioning on a set of variables, allowing a wider choice of possible IVs and enabling broader practical applications of the IV approach. Nevertheless, there is not a data-driven method to discover a CIV and its conditioning set directly from data. To fill this gap, in this paper, we propose to learn the representations of the information of a CIV and its conditioning set from data with latent confounders for average causal effect estimation. By taking advantage of deep generative models, we develop a novel data-driven approach for simultaneously learning the representation of a CIV from measured variables and generating the representation of its conditioning set given measured variables. Extensive experiments on synthetic and real-world datasets show that our method outperforms the existing IV methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Debo and Xu, Ziqi and Li, Jiuyong and Liu, Lin and Liu, Jixue and Le, Thuc Duy}, year={2023}, month={Jun.}, pages={7122-7130} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25869/25641", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25869", + "pdf_size": 529022, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8365366147484362227&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "mymail.unisa.edu.au;mymail.unisa.edu.au;unisa.edu.au;unisa.edu.au;unisa.edu.au;unisa.edu.au", + "email": "mymail.unisa.edu.au;mymail.unisa.edu.au;unisa.edu.au;unisa.edu.au;unisa.edu.au;unisa.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "Guangxi Normal University;University of South Australia", + "aff_unique_dep": "School of Computer Science and Engineering;STEM", + "aff_unique_url": "http://www.gxnu.edu.cn;https://www.unisa.edu.au/", + "aff_unique_abbr": ";UniSA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25142", + "title": "Causal Intervention for Human Trajectory Prediction with Cross Attention Mechanism", + "track": "main", + "status": "Technical", + "abstract": "Human trajectory Prediction (HTP) in complex social environments plays a crucial and fundamental role in artificial intelligence systems. Conventional methods make use of both history behaviors and social interactions to forecast future trajectories. However, we demonstrate that the social environment is a confounder that misleads the model to learn spurious correlations between history and future trajectories. To end this, we first formulate the social environment, history and future trajectory variables into a structural causal model to analyze the causalities among them. Based on causal intervention rather than conventional likelihood, we propose a Social Environment ADjustment (SEAD) method, to remove the confounding effect of the social environment. The core of our method is implemented by a Social Cross Attention (SCA) module, which is universal, simple and effective. Our method has consistent improvements on ETH-UCY datasets with three baseline models and achieves competitive performances with existing methods.", + "primary_area": "computer vision i", + "author": "Chunjiang Ge; Shiji Song; Gao Huang", + "authorids": "", + "aff": "Department of Automation, BNRist, Tsinghua University; Department of Automation, BNRist, Tsinghua University; Department of Automation, BNRist, Tsinghua University", + "bibtex": "@article{Ge_Song_Huang_2023, title={Causal Intervention for Human Trajectory Prediction with Cross Attention Mechanism}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25142}, DOI={10.1609/aaai.v37i1.25142}, abstractNote={Human trajectory Prediction (HTP) in complex social environments plays a crucial and fundamental role in artificial intelligence systems. Conventional methods make use of both history behaviors and social interactions to forecast future trajectories. However, we demonstrate that the social environment is a confounder that misleads the model to learn spurious correlations between history and future trajectories. To end this, we first formulate the social environment, history and future trajectory variables into a structural causal model to analyze the causalities among them. Based on causal intervention rather than conventional likelihood, we propose a Social Environment ADjustment (SEAD) method, to remove the confounding effect of the social environment. The core of our method is implemented by a Social Cross Attention (SCA) module, which is universal, simple and effective. Our method has consistent improvements on ETH-UCY datasets with three baseline models and achieves competitive performances with existing methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ge, Chunjiang and Song, Shiji and Huang, Gao}, year={2023}, month={Jun.}, pages={658-666} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25142/24914", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25142", + "pdf_size": 484818, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17346427624330124163&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Department of Automation", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Tsinghua", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26031", + "title": "Causal Recurrent Variational Autoencoder for Medical Time Series Generation", + "track": "main", + "status": "Technical", + "abstract": "We propose causal recurrent variational autoencoder (CR-VAE), a novel generative model that is able to learn a Granger causal graph from a multivariate time series x and incorporates the underlying causal mechanism into its data generation process. Distinct to the classical recurrent VAEs, our CR-VAE uses a multi-head decoder, in which the p-th head is responsible for generating the p-th dimension of x (i.e., x^p). By imposing a sparsity-inducing penalty on the weights (of the decoder) and encouraging specific sets of weights to be zero, our CR-VAE learns a sparse adjacency matrix that encodes causal relations between all pairs of variables. Thanks to this causal matrix, our decoder strictly obeys the underlying principles of Granger causality, thereby making the data generating process transparent. We develop a two-stage approach to train the overall objective. Empirically, we evaluate the behavior of our model in synthetic data and two real-world human brain datasets involving, respectively, the electroencephalography (EEG) signals and the functional magnetic resonance imaging (fMRI) data. Our model consistently outperforms state-of-the-art time series generative models both qualitatively and quantitatively. Moreover, it also discovers a faithful causal graph with similar or improved accuracy over existing Granger causality-based causal inference methods. Code of CR-VAE is publicly available at https://github.com/hongmingli1995/CR-VAE.", + "primary_area": "machine learning ii", + "author": "Hongming Li; Shujian Yu; Jose Principe", + "authorids": "", + "aff": "University of Florida; UiT - The Arctic University of Norway; University of Florida", + "bibtex": "@article{Li_Yu_Principe_2023, title={Causal Recurrent Variational Autoencoder for Medical Time Series Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26031}, DOI={10.1609/aaai.v37i7.26031}, abstractNote={We propose causal recurrent variational autoencoder (CR-VAE), a novel generative model that is able to learn a Granger causal graph from a multivariate time series x and incorporates the underlying causal mechanism into its data generation process. Distinct to the classical recurrent VAEs, our CR-VAE uses a multi-head decoder, in which the p-th head is responsible for generating the p-th dimension of x (i.e., x^p). By imposing a sparsity-inducing penalty on the weights (of the decoder) and encouraging specific sets of weights to be zero, our CR-VAE learns a sparse adjacency matrix that encodes causal relations between all pairs of variables. Thanks to this causal matrix, our decoder strictly obeys the underlying principles of Granger causality, thereby making the data generating process transparent. We develop a two-stage approach to train the overall objective. Empirically, we evaluate the behavior of our model in synthetic data and two real-world human brain datasets involving, respectively, the electroencephalography (EEG) signals and the functional magnetic resonance imaging (fMRI) data. Our model consistently outperforms state-of-the-art time series generative models both qualitatively and quantitatively. Moreover, it also discovers a faithful causal graph with similar or improved accuracy over existing Granger causality-based causal inference methods. Code of CR-VAE is publicly available at https://github.com/hongmingli1995/CR-VAE.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Hongming and Yu, Shujian and Principe, Jose}, year={2023}, month={Jun.}, pages={8562-8570} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26031/25803", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26031", + "pdf_size": 822426, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15390566819728840840&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ufl.edu;gmail.com;cnel.ufl.edu", + "email": "ufl.edu;gmail.com;cnel.ufl.edu", + "github": "https://github.com/hongmingli1995/CR-V AE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Florida;The Arctic University of Norway", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ufl.edu;https://www.uit.no", + "aff_unique_abbr": "UF;UiT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;Norway" + }, + { + "id": "article-25683", + "title": "Causes of Stability in Dynamic Coalition Formation", + "track": "main", + "status": "Technical", + "abstract": "We study the formation of stable outcomes via simple dynamics in cardinal hedonic games, where the utilities of agents change over time depending on the history of the coalition formation process. Specifically, we analyze situations where members of a coalition decrease their utility for a leaving agent (resent) or increase their utility for a joining agent (appreciation). We show that in contrast to classical dynamics, for resentful or appreciative agents, dynamics are guaranteed to converge under mild conditions for various stability concepts. Thereby, we establish that both resent and appreciation are strong stability-driving forces.", + "primary_area": "game theory and economic paradigms", + "author": "Niclas Boehmer; Martin Bullinger; Anna Maria Kerkmann", + "authorids": "", + "aff": "Algorithmics and Computational Complexity, Technische Universit \u00a8at Berlin; School of Computation, Information and Technology, Technische Universit \u00a8at M \u00a8unchen; Institut f \u00a8ur Informatik, Heinrich-Heine-Universit \u00a8at D\u00a8usseldorf", + "bibtex": "@article{Boehmer_Bullinger_Kerkmann_2023, title={Causes of Stability in Dynamic Coalition Formation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25683}, DOI={10.1609/aaai.v37i5.25683}, abstractNote={We study the formation of stable outcomes via simple dynamics in cardinal hedonic games, where the utilities of agents change over time depending on the history of the coalition formation process. Specifically, we analyze situations where members of a coalition decrease their utility for a leaving agent (resent) or increase their utility for a joining agent (appreciation). We show that in contrast to classical dynamics, for resentful or appreciative agents, dynamics are guaranteed to converge under mild conditions for various stability concepts. Thereby, we establish that both resent and appreciation are strong stability-driving forces.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Boehmer, Niclas and Bullinger, Martin and Kerkmann, Anna Maria}, year={2023}, month={Jun.}, pages={5499-5506} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25683/25455", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25683", + "pdf_size": 131510, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12398243253073994748&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "tu-berlin.de;in.tum.de;hhu.de", + "email": "tu-berlin.de;in.tum.de;hhu.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;Technische Universit\u00e4t M\u00fcnchen;Heinrich-Heine-Universit\u00e4t D\u00fcsseldorf", + "aff_unique_dep": "Algorithmics and Computational Complexity;School of Computation, Information and Technology;Institut f\u00fcr Informatik", + "aff_unique_url": "https://www.tu-berlin.de;https://www.tum.de;https://www.hhu.de", + "aff_unique_abbr": "TU Berlin;TUM;", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Berlin;M\u00fcnchen;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26708", + "title": "Censored Fairness through Awareness", + "track": "aaai special track", + "status": "Technical", + "abstract": "There has been increasing concern within the machine learning community and beyond that Artificial Intelligence (AI) faces a bias and discrimination crisis which needs AI fairness with urgency. As many have begun to work on this problem, most existing work depends on the availability of class label for the given fairness definition and algorithm which may not align with real-world usage. In this work, we study an AI fairness problem that stems from the gap between the design of a \"fair\" model in the lab and its deployment in the real-world. Specifically, we consider defining and mitigating individual unfairness amidst censorship, where the availability of class label is not always guaranteed due to censorship, which is broadly applicable in a diversity of real-world socially sensitive applications. We show that our method is able to quantify and mitigate individual unfairness in the presence of censorship across three benchmark tasks, which provides the first known results on individual fairness guarantee in analysis of censored data.", + "primary_area": "ai for social impact", + "author": "Wenbin Zhang; Tina Hernandez-Boussard; Jeremy Weiss", + "authorids": "", + "aff": "Michigan Technological University, Houghton, MI 49931; Stanford University, Stanford, CA 94305; National Institutes of Health, Bethesda, MD 20892", + "bibtex": "@article{Zhang_Hernandez-Boussard_Weiss_2023, title={Censored Fairness through Awareness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26708}, DOI={10.1609/aaai.v37i12.26708}, abstractNote={There has been increasing concern within the machine learning community and beyond that Artificial Intelligence (AI) faces a bias and discrimination crisis which needs AI fairness with urgency. As many have begun to work on this problem, most existing work depends on the availability of class label for the given fairness definition and algorithm which may not align with real-world usage. In this work, we study an AI fairness problem that stems from the gap between the design of a "fair" model in the lab and its deployment in the real-world. Specifically, we consider defining and mitigating individual unfairness amidst censorship, where the availability of class label is not always guaranteed due to censorship, which is broadly applicable in a diversity of real-world socially sensitive applications. We show that our method is able to quantify and mitigate individual unfairness in the presence of censorship across three benchmark tasks, which provides the first known results on individual fairness guarantee in analysis of censored data.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Wenbin and Hernandez-Boussard, Tina and Weiss, Jeremy}, year={2023}, month={Jun.}, pages={14611-14619} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26708/26480", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26708", + "pdf_size": 937554, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15387753931470543840&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mtu.edu;stanford.edu;nih.gov", + "email": "mtu.edu;stanford.edu;nih.gov", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Michigan Technological University;Stanford University;National Institutes of Health", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.mtu.edu;https://www.stanford.edu;https://www.nih.gov", + "aff_unique_abbr": "MTU;Stanford;NIH", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "Houghton;Stanford;Bethesda", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26075", + "title": "Centerless Multi-View K-means Based on the Adjacency Matrix", + "track": "main", + "status": "Technical", + "abstract": "Although K-Means clustering has been widely studied due to its simplicity, these methods still have the following fatal drawbacks. Firstly, they need to initialize the cluster centers, which causes unstable clustering performance. Secondly, they have poor performance on non-Gaussian datasets. Inspired by the affinity matrix, we propose a novel multi-view K-Means based on the adjacency matrix. It maps the affinity matrix to the distance matrix according to the principle that every sample has a small distance from the points in its neighborhood and a large distance from the points outside of the neighborhood. Moreover, this method well exploits the complementary information embedded in different views by minimizing the tensor Schatten p-norm regularize on the third-order tensor which consists of cluster assignment matrices of different views. Additionally, this method avoids initializing cluster centroids to obtain stable performance. And there is no need to compute the means of clusters so that our model is not sensitive to outliers. Experiment on a toy dataset shows the excellent performance on non-Gaussian datasets. And other experiments on several benchmark datasets demonstrate the superiority of our proposed method.", + "primary_area": "machine learning ii", + "author": "Han Lu; Quanxue Gao; Qianqian Wang; Ming Yang; Wei Xia", + "authorids": "", + "aff": "School of Telecommunications Engineering, Xidian University; School of Telecommunications Engineering, Xidian University; School of Telecommunications Engineering, Xidian University; Mathematics department of the University of Evansville; School of Telecommunications Engineering, Xidian University", + "bibtex": "@article{Lu_Gao_Wang_Yang_Xia_2023, title={Centerless Multi-View K-means Based on the Adjacency Matrix}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26075}, DOI={10.1609/aaai.v37i7.26075}, abstractNote={Although K-Means clustering has been widely studied due to its simplicity, these methods still have the following fatal drawbacks. Firstly, they need to initialize the cluster centers, which causes unstable clustering performance. Secondly, they have poor performance on non-Gaussian datasets. Inspired by the affinity matrix, we propose a novel multi-view K-Means based on the adjacency matrix. It maps the affinity matrix to the distance matrix according to the principle that every sample has a small distance from the points in its neighborhood and a large distance from the points outside of the neighborhood. Moreover, this method well exploits the complementary information embedded in different views by minimizing the tensor Schatten p-norm regularize on the third-order tensor which consists of cluster assignment matrices of different views. Additionally, this method avoids initializing cluster centroids to obtain stable performance. And there is no need to compute the means of clusters so that our model is not sensitive to outliers. Experiment on a toy dataset shows the excellent performance on non-Gaussian datasets. And other experiments on several benchmark datasets demonstrate the superiority of our proposed method.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Han and Gao, Quanxue and Wang, Qianqian and Yang, Ming and Xia, Wei}, year={2023}, month={Jun.}, pages={8949-8956} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26075/25847", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26075", + "pdf_size": 619054, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11380905335627773972&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "foxmail.com;xidian.edu.cn;xidian.edu.cn;gmail.com;gmail.com", + "email": "foxmail.com;xidian.edu.cn;xidian.edu.cn;gmail.com;gmail.com", + "github": "https://github.com/luhan0/CMKOA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Xidian University;University of Evansville", + "aff_unique_dep": "School of Telecommunications Engineering;Mathematics department", + "aff_unique_url": "http://www.xidian.edu.cn/;https://www.evansville.edu", + "aff_unique_abbr": "Xidian;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25994", + "title": "CertiFair: A Framework for Certified Global Fairness of Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of whether a Neural Network (NN) model satisfies global individual fairness. Individual Fairness (defined in (Dwork et al. 2012)) suggests that similar individuals with respect to a certain task are to be treated similarly by the decision model. In this work, we have two main objectives. The first is to construct a verifier which checks whether the fairness property holds for a given NN in a classification task or provides a counterexample if it is violated, i.e., the model is fair if all similar individuals are classified the same, and unfair if a pair of similar individuals are classified differently. To that end, we construct a sound and complete verifier that verifies global individual fairness properties of ReLU NN classifiers using distance-based similarity metrics. The second objective of this paper is to provide a method for training provably fair NN classifiers from unfair (biased) data. We propose a fairness loss that can be used during training to enforce fair outcomes for similar individuals. We then provide provable bounds on the fairness of the resulting NN. We run experiments on commonly used fairness datasets that are publicly available and we show that global individual fairness can be improved by 96 % without a significant drop in test accuracy.", + "primary_area": "machine learning ii", + "author": "Haitham Khedr; Yasser Shoukry", + "authorids": "", + "aff": "University of California, Irvine; University of California, Irvine", + "bibtex": "@article{Khedr_Shoukry_2023, title={CertiFair: A Framework for Certified Global Fairness of Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25994}, DOI={10.1609/aaai.v37i7.25994}, abstractNote={We consider the problem of whether a Neural Network (NN) model satisfies global individual fairness. Individual Fairness (defined in (Dwork et al. 2012)) suggests that similar individuals with respect to a certain task are to be treated similarly by the decision model. In this work, we have two main objectives. The first is to construct a verifier which checks whether the fairness property holds for a given NN in a classification task or provides a counterexample if it is violated, i.e., the model is fair if all similar individuals are classified the same, and unfair if a pair of similar individuals are classified differently. To that end, we construct a sound and complete verifier that verifies global individual fairness properties of ReLU NN classifiers using distance-based similarity metrics. The second objective of this paper is to provide a method for training provably fair NN classifiers from unfair (biased) data. We propose a fairness loss that can be used during training to enforce fair outcomes for similar individuals. We then provide provable bounds on the fairness of the resulting NN. We run experiments on commonly used fairness datasets that are publicly available and we show that global individual fairness can be improved by 96 % without a significant drop in test accuracy.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Khedr, Haitham and Shoukry, Yasser}, year={2023}, month={Jun.}, pages={8237-8245} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25994/25766", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25994", + "pdf_size": 643667, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=153986551674512801&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "uci.edu;uci.edu", + "email": "uci.edu;uci.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Irvine", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uci.edu", + "aff_unique_abbr": "UCI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Irvine", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26295", + "title": "Certifiable Out-of-Distribution Generalization", + "track": "main", + "status": "Technical", + "abstract": "Machine learning methods suffer from test-time performance degeneration when faced with out-of-distribution (OoD) data whose distribution is not necessarily the same as training data distribution. Although a plethora of algorithms have been proposed to mitigate this issue, it has been demonstrated that achieving better performance than ERM simultaneously on different types of distributional shift datasets is challenging for existing approaches. Besides, it is unknown how and to what extent these methods work on any OoD datum without theoretical guarantees. In this paper, we propose a certifiable out-of-distribution generalization method that provides provable OoD generalization performance guarantees via a functional optimization framework leveraging random distributions and max-margin learning for each input datum. With this approach, the proposed algorithmic scheme can provide certified accuracy for each input datum's prediction on the semantic space and achieves better performance simultaneously on OoD datasets dominated by correlation shifts or diversity shifts. Our code is available at https://github.com/ZlatanWilliams/StochasticDisturbanceLearning.", + "primary_area": "machine learning iv", + "author": "Nanyang Ye; Lin Zhu; Jia Wang; Zhaoyu Zeng; Jiayao Shao; Chensheng Peng; Bikang Pan; Kaican Li; Jun Zhu", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; University of Cambridge; Shanghai Jiao Tong University; University of Warwick; Shanghai Jiao Tong University; ShanghaiTech University; Huawei Noah\u2019s Ark Lab; Tsinghua University", + "bibtex": "@article{Ye_Zhu_Wang_Zeng_Shao_Peng_Pan_Li_Zhu_2023, title={Certifiable Out-of-Distribution Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26295}, DOI={10.1609/aaai.v37i9.26295}, abstractNote={Machine learning methods suffer from test-time performance degeneration when faced with out-of-distribution (OoD) data whose distribution is not necessarily the same as training data distribution. Although a plethora of algorithms have been proposed to mitigate this issue, it has been demonstrated that achieving better performance than ERM simultaneously on different types of distributional shift datasets is challenging for existing approaches. Besides, it is unknown how and to what extent these methods work on any OoD datum without theoretical guarantees. In this paper, we propose a certifiable out-of-distribution generalization method that provides provable OoD generalization performance guarantees via a functional optimization framework leveraging random distributions and max-margin learning for each input datum. With this approach, the proposed algorithmic scheme can provide certified accuracy for each input datum\u2019s prediction on the semantic space and achieves better performance simultaneously on OoD datasets dominated by correlation shifts or diversity shifts. Our code is available at https://github.com/ZlatanWilliams/StochasticDisturbanceLearning.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Nanyang and Zhu, Lin and Wang, Jia and Zeng, Zhaoyu and Shao, Jiayao and Peng, Chensheng and Pan, Bikang and Li, Kaican and Zhu, Jun}, year={2023}, month={Jun.}, pages={10927-10935} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26295/26067", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26295", + "pdf_size": 6971092, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12832332135830240822&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;cam.ac.uk;sjtu.edu.cn;163.com;sjtu.edu.cn;shanghaitech.edu.cn;gmail.com;mail.tsinghua.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;cam.ac.uk;sjtu.edu.cn;163.com;sjtu.edu.cn;shanghaitech.edu.cn;gmail.com;mail.tsinghua.edu.cn", + "github": "https://github.com/ZlatanWilliams/StochasticDisturbanceLearning", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;2;0;3;4;5", + "aff_unique_norm": "Shanghai Jiao Tong University;University of Cambridge;University of Warwick;ShanghaiTech University;Huawei;Tsinghua University", + "aff_unique_dep": ";;;;Noah\u2019s Ark Lab;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.cam.ac.uk;https://www.warwick.ac.uk;https://www.shanghaitech.edu.cn;https://www.huawei.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "SJTU;Cambridge;Warwick;ShanghaiTech;Huawei;THU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0;1;0;1;0;0;0;0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26756", + "title": "Certified Policy Smoothing for Cooperative Multi-Agent Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Cooperative multi-agent reinforcement learning (c-MARL) is widely applied in safety-critical scenarios, thus the analysis of robustness for c-MARL models is profoundly important. However, robustness certification for c-MARLs has not yet been explored in the community. In this paper, we propose a novel certification method, which is the first work to leverage a scalable approach for c-MARLs to determine actions with guaranteed certified bounds. c-MARL certification poses two key challenges compared to single-agent systems: (i) the accumulated uncertainty as the number of agents increases; (ii) the potential lack of impact when changing the action of a single agent into a global team reward. These challenges prevent us from directly using existing algorithms. Hence, we employ the false discovery rate (FDR) controlling procedure considering the importance of each agent to certify per-state robustness. We further propose a tree-search-based algorithm to find a lower bound of the global reward under the minimal certified perturbation. As our method is general, it can also be applied in a single-agent environment. We empirically show that our certification bounds are much tighter than those of state-of-the-art RL certification solutions. We also evaluate our method on two popular c-MARL algorithms: QMIX and VDN, under two different environments, with two and four agents. The experimental results show that our method can certify the robustness of all c-MARL models in various environments. Our tool CertifyCMARL is available at https://github.com/TrustAI/CertifyCMARL.", + "primary_area": "safe and robust ai", + "author": "Ronghui Mu; Wenjie Ruan; Leandro Soriano Marcolino; Gaojie Jin; Qiang Ni", + "authorids": "", + "aff": "School of Computing & Communication, Lancaster University; Department of Computer Science, University of Exeter; School of Computing & Communication, Lancaster University; Department of Computer Science, University of Liverpool; School of Computing & Communication, Lancaster University", + "bibtex": "@article{Mu_Ruan_Soriano Marcolino_Jin_Ni_2023, title={Certified Policy Smoothing for Cooperative Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26756}, DOI={10.1609/aaai.v37i12.26756}, abstractNote={Cooperative multi-agent reinforcement learning (c-MARL) is widely applied in safety-critical scenarios, thus the analysis of robustness for c-MARL models is profoundly important. However, robustness certification for c-MARLs has not yet been explored in the community. In this paper, we propose a novel certification method, which is the first work to leverage a scalable approach for c-MARLs to determine actions with guaranteed certified bounds. c-MARL certification poses two key challenges compared to single-agent systems: (i) the accumulated uncertainty as the number of agents increases; (ii) the potential lack of impact when changing the action of a single agent into a global team reward. These challenges prevent us from directly using existing algorithms. Hence, we employ the false discovery rate (FDR) controlling procedure considering the importance of each agent to certify per-state robustness. We further propose a tree-search-based algorithm to find a lower bound of the global reward under the minimal certified perturbation. As our method is general, it can also be applied in a single-agent environment. We empirically show that our certification bounds are much tighter than those of state-of-the-art RL certification solutions. We also evaluate our method on two popular c-MARL algorithms: QMIX and VDN, under two different environments, with two and four agents. The experimental results show that our method can certify the robustness of all c-MARL models in various environments. Our tool CertifyCMARL is available at https://github.com/TrustAI/CertifyCMARL.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mu, Ronghui and Ruan, Wenjie and Soriano Marcolino, Leandro and Jin, Gaojie and Ni, Qiang}, year={2023}, month={Jun.}, pages={15046-15054} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26756/26528", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26756", + "pdf_size": 1120601, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11930162722628777969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "lancaster.ac.uk;exeter.ac.uk;lancaster.ac.uk;liverpool.ac.uk;lancaster.ac.uk", + "email": "lancaster.ac.uk;exeter.ac.uk;lancaster.ac.uk;liverpool.ac.uk;lancaster.ac.uk", + "github": "https://github.com/TrustAI/CertifyCMARL", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2;0", + "aff_unique_norm": "Lancaster University;University of Exeter;University of Liverpool", + "aff_unique_dep": "School of Computing & Communication;Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.lancaster.ac.uk;https://www.exeter.ac.uk;https://www.liverpool.ac.uk", + "aff_unique_abbr": "Lancaster;Exeter;Liv Uni", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26447", + "title": "Certifying Fairness of Probabilistic Circuits", + "track": "main", + "status": "Technical", + "abstract": "With the increased use of machine learning systems for decision making, questions about the fairness properties of such systems start to take center stage. Most existing work on algorithmic fairness assume complete observation of features at prediction time, as is the case for popular notions like statistical parity and equal opportunity. However, this is not sufficient for models that can make predictions with partial observation as we could miss patterns of bias and incorrectly certify a model to be fair. To address this, a recently introduced notion of fairness asks whether the model exhibits any discrimination pattern, in which an individual\u2014characterized by (partial) feature observations\u2014receives vastly different decisions merely by disclosing one or more sensitive attributes such as gender and race. By explicitly accounting for partial observations, this provides a much more fine-grained notion of fairness.\n\nIn this paper, we propose an algorithm to search for discrimination patterns in a general class of probabilistic models, namely probabilistic circuits. Previously, such algorithms were limited to naive Bayes classifiers which make strong independence assumptions; by contrast, probabilistic circuits provide a unifying framework for a wide range of tractable probabilistic models and can even be compiled from certain classes of Bayesian networks and probabilistic programs, making our method much more broadly applicable. Furthermore, for an unfair model, it may be useful to quickly find discrimination patterns and distill them for better interpretability. As such, we also propose a sampling-based approach to more efficiently mine discrimination patterns, and introduce new classes of patterns such as minimal, maximal, and Pareto optimal patterns that can effectively summarize exponentially many discrimination patterns.", + "primary_area": "reasoning under uncertainty", + "author": "Nikil Roashan Selvam; Guy Van den Broeck; YooJung Choi", + "authorids": "", + "aff": "Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; School of Computing and Augmented Intelligence, Arizona State University", + "bibtex": "@article{Selvam_Van den Broeck_Choi_2023, title={Certifying Fairness of Probabilistic Circuits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26447}, DOI={10.1609/aaai.v37i10.26447}, abstractNote={With the increased use of machine learning systems for decision making, questions about the fairness properties of such systems start to take center stage. Most existing work on algorithmic fairness assume complete observation of features at prediction time, as is the case for popular notions like statistical parity and equal opportunity. However, this is not sufficient for models that can make predictions with partial observation as we could miss patterns of bias and incorrectly certify a model to be fair. To address this, a recently introduced notion of fairness asks whether the model exhibits any discrimination pattern, in which an individual\u2014characterized by (partial) feature observations\u2014receives vastly different decisions merely by disclosing one or more sensitive attributes such as gender and race. By explicitly accounting for partial observations, this provides a much more fine-grained notion of fairness. In this paper, we propose an algorithm to search for discrimination patterns in a general class of probabilistic models, namely probabilistic circuits. Previously, such algorithms were limited to naive Bayes classifiers which make strong independence assumptions; by contrast, probabilistic circuits provide a unifying framework for a wide range of tractable probabilistic models and can even be compiled from certain classes of Bayesian networks and probabilistic programs, making our method much more broadly applicable. Furthermore, for an unfair model, it may be useful to quickly find discrimination patterns and distill them for better interpretability. As such, we also propose a sampling-based approach to more efficiently mine discrimination patterns, and introduce new classes of patterns such as minimal, maximal, and Pareto optimal patterns that can effectively summarize exponentially many discrimination patterns.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Selvam, Nikil Roashan and Van den Broeck, Guy and Choi, YooJung}, year={2023}, month={Jun.}, pages={12278-12286} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26447/26219", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26447", + "pdf_size": 330902, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12936097341057867135&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 10, + "aff_domain": "cs.ucla.edu;cs.ucla.edu;asu.edu", + "email": "cs.ucla.edu;cs.ucla.edu;asu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of California, Los Angeles;Arizona State University", + "aff_unique_dep": "Computer Science Department;School of Computing and Augmented Intelligence", + "aff_unique_url": "https://www.ucla.edu;https://www.asu.edu", + "aff_unique_abbr": "UCLA;ASU", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "Los Angeles;Tempe", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25316", + "title": "Channel Regeneration: Improving Channel Utilization for Compact DNNs", + "track": "main", + "status": "Technical", + "abstract": "Overparameterized deep neural networks have redundant neurons that do not contribute to the network's accuracy. In this paper, we introduce a novel channel regeneration technique that reinvigorates these redundant channels by re-initializing its batch normalization scaling factor gamma. This re-initialization of BN gamma promotes regular weight updates during training. Furthermore, we show that channel regeneration encourages the channels to contribute equally to the learned representation and further boosts the generalization accuracy. We apply our technique at regular intervals of the training cycle to improve channel utilization. The solutions proposed in previous works either raise the total computational cost or increase the model complexity. Integrating the proposed channel regeneration technique into the training methodology of efficient architectures requires minimal effort and comes at no additional cost in size or memory. Extensive experiments on several image classification and semantic segmentation benchmarks demonstrate the effectiveness of applying the channel regeneration technique to compact architectures.", + "primary_area": "computer vision ii", + "author": "Ankit Sharma; Hassan Foroosh", + "authorids": "", + "aff": "Computational Imaging Lab, Department of Computer Science, University of Central Florida; Computational Imaging Lab, Department of Computer Science, University of Central Florida", + "bibtex": "@article{Sharma_Foroosh_2023, title={Channel Regeneration: Improving Channel Utilization for Compact DNNs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25316}, DOI={10.1609/aaai.v37i2.25316}, abstractNote={Overparameterized deep neural networks have redundant neurons that do not contribute to the network\u2019s accuracy. In this paper, we introduce a novel channel regeneration technique that reinvigorates these redundant channels by re-initializing its batch normalization scaling factor gamma. This re-initialization of BN gamma promotes regular weight updates during training. Furthermore, we show that channel regeneration encourages the channels to contribute equally to the learned representation and further boosts the generalization accuracy. We apply our technique at regular intervals of the training cycle to improve channel utilization. The solutions proposed in previous works either raise the total computational cost or increase the model complexity. Integrating the proposed channel regeneration technique into the training methodology of efficient architectures requires minimal effort and comes at no additional cost in size or memory. Extensive experiments on several image classification and semantic segmentation benchmarks demonstrate the effectiveness of applying the channel regeneration technique to compact architectures.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharma, Ankit and Foroosh, Hassan}, year={2023}, month={Jun.}, pages={2218-2226} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25316/25088", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25316", + "pdf_size": 278349, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:ZoVBnSWmpMIJ:scholar.google.com/&scioq=Channel+Regeneration:+Improving+Channel+Utilization+for+Compact+DNNs&hl=en&as_sdt=0,14", + "gs_version_total": 2, + "aff_domain": "knights.ucf.edu;ucf.edu", + "email": "knights.ucf.edu;ucf.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Central Florida", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ucf.edu", + "aff_unique_abbr": "UCF", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25788", + "title": "Characterizing Structural Hardness of Logic Programs: What Makes Cycles and Reachability Hard for Treewidth?", + "track": "main", + "status": "Technical", + "abstract": "Answer Set Programming (ASP) is a problem modeling and solving framework for several problems in KR with growing industrial applications. Also for studies of computational complexity and deeper insights into the hardness and its sources, ASP has been attracting researchers for many years. These studies resulted in fruitful characterizations in terms of complexity classes, fine-grained insights in form of dichotomy-style results, as well as detailed parameterized complexity landscapes. Recently, this lead to a novel result establishing that for the measure treewidth, which captures structural density of a program, the evaluation of the well-known class of normal programs is expected to be slightly harder than deciding satisfiability (SAT). However, it is unclear how to utilize this structural power of ASP. This paper deals with a novel reduction from SAT to normal ASP that goes beyond well-known encodings: We explicitly utilize the structural power of ASP, whereby we sublinearly decrease the treewidth, which probably cannot be significantly improved. Then, compared to existing results, this characterizes hardness in a fine-grained way by establishing the required functional dependency of the dependency graph\u2019s cycle length (SCC size) on the treewidth.", + "primary_area": "knowledge representation and reasoning", + "author": "Markus Hecher", + "authorids": "", + "aff": "Computer Science and Artificial Intelligence Lab, Massachusetts Institute of Technology, Cambridge, United States", + "bibtex": "@article{Hecher_2023, title={Characterizing Structural Hardness of Logic Programs: What Makes Cycles and Reachability Hard for Treewidth?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25788}, DOI={10.1609/aaai.v37i5.25788}, abstractNote={Answer Set Programming (ASP) is a problem modeling and solving framework for several problems in KR with growing industrial applications. Also for studies of computational complexity and deeper insights into the hardness and its sources, ASP has been attracting researchers for many years. These studies resulted in fruitful characterizations in terms of complexity classes, fine-grained insights in form of dichotomy-style results, as well as detailed parameterized complexity landscapes. Recently, this lead to a novel result establishing that for the measure treewidth, which captures structural density of a program, the evaluation of the well-known class of normal programs is expected to be slightly harder than deciding satisfiability (SAT). However, it is unclear how to utilize this structural power of ASP. This paper deals with a novel reduction from SAT to normal ASP that goes beyond well-known encodings: We explicitly utilize the structural power of ASP, whereby we sublinearly decrease the treewidth, which probably cannot be significantly improved. Then, compared to existing results, this characterizes hardness in a fine-grained way by establishing the required functional dependency of the dependency graph\u2019s cycle length (SCC size) on the treewidth.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hecher, Markus}, year={2023}, month={Jun.}, pages={6407-6415} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25788/25560", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25788", + "pdf_size": 200679, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8050801446095656440&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mit.edu", + "email": "mit.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "Computer Science and Artificial Intelligence Lab", + "aff_unique_url": "https://web.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25524", + "title": "Circuit Minimization with QBF-Based Exact Synthesis", + "track": "main", + "status": "Technical", + "abstract": "This paper presents a rewriting method for Boolean circuits that minimizes small subcircuits with exact synthesis. Individual synthesis tasks are encoded as Quantified Boolean Formulas (QBFs) that capture the full flexibility for implementing multi-output subcircuits.\nThis is in contrast to SAT-based resynthesis, where \"don't cares\" are computed for an individual gate, and replacements are confined to the circuitry used exclusively by that gate.\nAn implementation of our method achieved substantial size reductions compared to state-of-the-art methods across a wide range of benchmark circuits.", + "primary_area": "constraint satisfaction and optimization", + "author": "Franz-Xaver Reichl; Friedrich Slivovsky; Stefan Szeider", + "authorids": "", + "aff": "Algorithms and Complexity Group, TU Wien, Vienna, Austria; Algorithms and Complexity Group, TU Wien, Vienna, Austria; Algorithms and Complexity Group, TU Wien, Vienna, Austria", + "bibtex": "@article{Reichl_Slivovsky_Szeider_2023, title={Circuit Minimization with QBF-Based Exact Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25524}, DOI={10.1609/aaai.v37i4.25524}, abstractNote={This paper presents a rewriting method for Boolean circuits that minimizes small subcircuits with exact synthesis. Individual synthesis tasks are encoded as Quantified Boolean Formulas (QBFs) that capture the full flexibility for implementing multi-output subcircuits.\nThis is in contrast to SAT-based resynthesis, where "don\u2019t cares" are computed for an individual gate, and replacements are confined to the circuitry used exclusively by that gate.\nAn implementation of our method achieved substantial size reductions compared to state-of-the-art methods across a wide range of benchmark circuits.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Reichl, Franz-Xaver and Slivovsky, Friedrich and Szeider, Stefan}, year={2023}, month={Jun.}, pages={4087-4094} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25524/25296", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25524", + "pdf_size": 158635, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11175028737002813138&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "ac.tuwien.ac.at;ac.tuwien.ac.at;ac.tuwien.ac.at", + "email": "ac.tuwien.ac.at;ac.tuwien.ac.at;ac.tuwien.ac.at", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "TU Wien", + "aff_unique_dep": "Algorithms and Complexity Group", + "aff_unique_url": "https://www.tuwien.ac.at", + "aff_unique_abbr": "TU Wien", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Vienna", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Austria" + }, + { + "id": "article-26695", + "title": "City-Scale Pollution Aware Traffic Routing by Sampling Max Flows Using MCMC", + "track": "aaai special track", + "status": "Technical", + "abstract": "A significant cause of air pollution in urban areas worldwide is the high volume of road traffic. Long-term exposure to severe pollution can cause serious health issues. One approach towards tackling this problem is to design a pollution-aware traffic routing policy that balances multiple objectives of i) avoiding extreme pollution in any area ii) enabling short transit times, and iii) making effective use of the road capacities. We propose a novel sampling-based approach for this problem. We give the first construction of a Markov Chain that can sample integer max flow solutions of a planar graph, with theoretical guarantees that the probabilities depend on the aggregate transit length. We designed a traffic policy using diverse samples and simulated traffic on real-world road maps using the SUMO traffic simulator. We observe a considerable decrease in areas with severe pollution when experimented with maps of large cities across the world compared to other approaches.", + "primary_area": "ai for social impact", + "author": "Shreevignesh Suriyanarayanan; Praveen Paruchuri; Girish Varma", + "authorids": "", + "aff": "Machine Learning Lab, IIIT Hyderabad; Machine Learning Lab, IIIT Hyderabad; Machine Learning Lab, IIIT Hyderabad", + "bibtex": "@article{Suriyanarayanan_Paruchuri_Varma_2023, title={City-Scale Pollution Aware Traffic Routing by Sampling Max Flows Using MCMC}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26695}, DOI={10.1609/aaai.v37i12.26695}, abstractNote={A significant cause of air pollution in urban areas worldwide is the high volume of road traffic. Long-term exposure to severe pollution can cause serious health issues. One approach towards tackling this problem is to design a pollution-aware traffic routing policy that balances multiple objectives of i) avoiding extreme pollution in any area ii) enabling short transit times, and iii) making effective use of the road capacities. We propose a novel sampling-based approach for this problem. We give the first construction of a Markov Chain that can sample integer max flow solutions of a planar graph, with theoretical guarantees that the probabilities depend on the aggregate transit length. We designed a traffic policy using diverse samples and simulated traffic on real-world road maps using the SUMO traffic simulator. We observe a considerable decrease in areas with severe pollution when experimented with maps of large cities across the world compared to other approaches.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Suriyanarayanan, Shreevignesh and Paruchuri, Praveen and Varma, Girish}, year={2023}, month={Jun.}, pages={14496-14503} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26695/26467", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26695", + "pdf_size": 4016848, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8073377578149369777&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "research.iiit.ac.in;iiit.ac.in;iiit.ac.in", + "email": "research.iiit.ac.in;iiit.ac.in;iiit.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IIIT Hyderabad", + "aff_unique_dep": "Machine Learning Lab", + "aff_unique_url": "https://www.iiit Hyderabad.ac.in", + "aff_unique_abbr": "IIIT-H", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hyderabad", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25704", + "title": "Class Fairness in Online Matching", + "track": "main", + "status": "Technical", + "abstract": "We initiate the study of fairness among classes of agents in online bipartite matching where there is a given set of offline vertices (aka agents) and another set of vertices (aka items) that arrive online and must be matched irrevocably upon arrival. In this setting, agents are partitioned into a set of classes and the matching is required to be fair with respect to the classes. We adopt popular fairness notions (e.g. envy-freeness, proportionality, and maximin share) and their relaxations to this setting and study deterministic and randomized algorithms for matching indivisible items (leading to integral matchings) and for matching divisible items (leading to fractional matchings).\nFor matching indivisible items, we propose an adaptive-priority-based algorithm, MATCH-AND-SHIFT, prove that it achieves (1/2)-approximation of both class envy-freeness up to one item and class maximin share fairness, and show that each guarantee is tight. For matching divisible items, we design a water-filling-based algorithm, EQUAL-FILLING, that achieves (1-1/e)-approximation of class envy-freeness and class proportionality; we prove (1-1/e) to be tight for class proportionality and establish a 3/4 upper bound on class envy-freeness.", + "primary_area": "game theory and economic paradigms", + "author": "Hadi Hosseini; Zhiyi Huang; Ayumi Igarashi; Nisarg Shah", + "authorids": "", + "aff": "Pennsylvania State University; University of Hong Kong; University of Tokyo; University of Toronto", + "bibtex": "@article{Hosseini_Huang_Igarashi_Shah_2023, title={Class Fairness in Online Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25704}, DOI={10.1609/aaai.v37i5.25704}, abstractNote={We initiate the study of fairness among classes of agents in online bipartite matching where there is a given set of offline vertices (aka agents) and another set of vertices (aka items) that arrive online and must be matched irrevocably upon arrival. In this setting, agents are partitioned into a set of classes and the matching is required to be fair with respect to the classes. We adopt popular fairness notions (e.g. envy-freeness, proportionality, and maximin share) and their relaxations to this setting and study deterministic and randomized algorithms for matching indivisible items (leading to integral matchings) and for matching divisible items (leading to fractional matchings).\nFor matching indivisible items, we propose an adaptive-priority-based algorithm, MATCH-AND-SHIFT, prove that it achieves (1/2)-approximation of both class envy-freeness up to one item and class maximin share fairness, and show that each guarantee is tight. For matching divisible items, we design a water-filling-based algorithm, EQUAL-FILLING, that achieves (1-1/e)-approximation of class envy-freeness and class proportionality; we prove (1-1/e) to be tight for class proportionality and establish a 3/4 upper bound on class envy-freeness.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hosseini, Hadi and Huang, Zhiyi and Igarashi, Ayumi and Shah, Nisarg}, year={2023}, month={Jun.}, pages={5673-5680} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25704/25476", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25704", + "pdf_size": 228031, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4027795450842012212&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 21, + "aff_domain": "psu.edu;cs.hku.hk;mist.i.u-tokyo.ac.jp;cs.toronto.edu", + "email": "psu.edu;cs.hku.hk;mist.i.u-tokyo.ac.jp;cs.toronto.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Pennsylvania State University;University of Hong Kong;University of Tokyo;University of Toronto", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.psu.edu;https://www.hku.hk;https://www.u-tokyo.ac.jp;https://www.utoronto.ca", + "aff_unique_abbr": "PSU;HKU;UTokyo;U of T", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3", + "aff_country_unique": "United States;China;Japan;Canada" + }, + { + "id": "article-27044", + "title": "Class Incremental Learning for Task-Oriented Dialogue System with Contrastive Distillation on Internal Representations (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The ability to continually learn over time by grasping new knowledge and remembering previously learned experiences is essential for developing an online task-oriented dialogue system (TDS). In this paper, we work on the class incremental learning scenario where the TDS is evaluated without specifying the dialogue domain. We employ contrastive distillation on the intermediate representations of dialogues to learn transferable representations that suffer less from catastrophic forgetting. Besides, we provide a dynamic update mechanism to explicitly preserve the learned experiences by only updating the parameters related to the new task while keeping other parameters fixed. Extensive experiments demonstrate that our method significantly outperforms the strong baselines.", + "primary_area": "", + "author": "Qiancheng Xu; Min Yang; Binzong Geng", + "authorids": "", + "aff": "Georgia Institute of Technology; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences; University of Science and Technology of China", + "bibtex": "@article{Xu_Yang_Geng_2024, title={Class Incremental Learning for Task-Oriented Dialogue System with Contrastive Distillation on Internal Representations (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27044}, DOI={10.1609/aaai.v37i13.27044}, abstractNote={The ability to continually learn over time by grasping new knowledge and remembering previously learned experiences is essential for developing an online task-oriented dialogue system (TDS). In this paper, we work on the class incremental learning scenario where the TDS is evaluated without specifying the dialogue domain. We employ contrastive distillation on the intermediate representations of dialogues to learn transferable representations that suffer less from catastrophic forgetting. Besides, we provide a dynamic update mechanism to explicitly preserve the learned experiences by only updating the parameters related to the new task while keeping other parameters fixed. Extensive experiments demonstrate that our method significantly outperforms the strong baselines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Qiancheng and Yang, Min and Geng, Binzong}, year={2024}, month={Jul.}, pages={16368-16369} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27044/26816", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27044", + "pdf_size": 105854, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:R-Qu2zv392oJ:scholar.google.com/&scioq=Class+Incremental+Learning+for+Task-Oriented+Dialogue+System+with+Contrastive+Distillation+on+Internal+Representations+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "gatech.edu;siat.ac.cn;mail.ustc.edu.cn", + "email": "gatech.edu;siat.ac.cn;mail.ustc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Georgia Institute of Technology;Chinese Academy of Sciences;University of Science and Technology of China", + "aff_unique_dep": ";Shenzhen Institutes of Advanced Technology;", + "aff_unique_url": "https://www.gatech.edu;http://www.siat.cas.cn;http://www.ustc.edu.cn", + "aff_unique_abbr": "Georgia Tech;SIAT;USTC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-25407", + "title": "Class Overwhelms: Mutual Conditional Blended-Target Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Current methods of blended targets domain adaptation (BTDA) usually infer or consider domain label information but underemphasize hybrid categorical feature structures of targets, which yields limited performance, especially under the label distribution shift. We demonstrate that domain labels are not directly necessary for BTDA if categorical distributions of various domains are sufficiently aligned even facing the imbalance of domains and the label distribution shift of classes. However, we observe that the cluster assumption in BTDA does not comprehensively hold. The hybrid categorical feature space hinders the modeling of categorical distributions and the generation of reliable pseudo labels for categorical alignment. To address these, we propose a categorical domain discriminator guided by uncertainty to explicitly model and directly align categorical distributions P(Z|Y). Simultaneously, we utilize the low-level features to augment the single source features with diverse target styles to rectify the biased classifier P(Y|Z) among diverse targets. Such a mutual conditional alignment of P(Z|Y) and P(Y|Z) forms a mutual reinforced mechanism. Our approach outperforms the state-of-the-art in BTDA even compared with methods utilizing domain labels, especially under the label distribution shift, and in single target DA on DomainNet.", + "primary_area": "computer vision iii", + "author": "Pengcheng Xu; Boyu Wang; Charles Ling", + "authorids": "", + "aff": "Western University; Western University + Vector Institute; Western University", + "bibtex": "@article{Xu_Wang_Ling_2023, title={Class Overwhelms: Mutual Conditional Blended-Target Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25407}, DOI={10.1609/aaai.v37i3.25407}, abstractNote={Current methods of blended targets domain adaptation (BTDA) usually infer or consider domain label information but underemphasize hybrid categorical feature structures of targets, which yields limited performance, especially under the label distribution shift. We demonstrate that domain labels are not directly necessary for BTDA if categorical distributions of various domains are sufficiently aligned even facing the imbalance of domains and the label distribution shift of classes. However, we observe that the cluster assumption in BTDA does not comprehensively hold. The hybrid categorical feature space hinders the modeling of categorical distributions and the generation of reliable pseudo labels for categorical alignment. To address these, we propose a categorical domain discriminator guided by uncertainty to explicitly model and directly align categorical distributions P(Z|Y). Simultaneously, we utilize the low-level features to augment the single source features with diverse target styles to rectify the biased classifier P(Y|Z) among diverse targets. Such a mutual conditional alignment of P(Z|Y) and P(Y|Z) forms a mutual reinforced mechanism. Our approach outperforms the state-of-the-art in BTDA even compared with methods utilizing domain labels, especially under the label distribution shift, and in single target DA on DomainNet.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Pengcheng and Wang, Boyu and Ling, Charles}, year={2023}, month={Jun.}, pages={3036-3044} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25407/25179", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25407", + "pdf_size": 13961944, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15442172981017008316&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "uwo.ca;csd.uwo.ca;uwo.ca", + "email": "uwo.ca;csd.uwo.ca;uwo.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0", + "aff_unique_norm": "Western University;Vector Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uwo.ca;https://vectorinstitute.ai/", + "aff_unique_abbr": "Western;Vector Institute", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25434", + "title": "Class-Independent Regularization for Learning with Noisy Labels", + "track": "main", + "status": "Technical", + "abstract": "Training deep neural networks (DNNs) with noisy labels often leads to poorly generalized models as DNNs tend to memorize the noisy labels in training. Various strategies have been developed for improving sample selection precision and mitigating the noisy label memorization issue. However, most existing works adopt a class-dependent softmax classifier that is vulnerable to noisy labels by entangling the classification of multi-class features. This paper presents a class-independent regularization (CIR) method that can effectively alleviate the negative impact of noisy labels in DNN training. CIR regularizes the class-dependent softmax classifier by introducing multi-binary classifiers each of which takes care of one class only. Thanks to its class-independent nature, CIR is tolerant to noisy labels as misclassification by one binary classifier does not affect others. For effective training of CIR, we design a heterogeneous adaptive co-teaching strategy that forces the class-independent and class-dependent classifiers to focus on sample selection and image classification, respectively, in a cooperative manner. Extensive experiments show that CIR achieves superior performance consistently across multiple benchmarks with both synthetic and real images. Code is\navailable at https://github.com/RumengYi/CIR.", + "primary_area": "computer vision iii", + "author": "Rumeng Yi; Dayan Guan; Yaping Huang; Shijian Lu", + "authorids": "", + "aff": "Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, China; Mohamed bin Zayed University of Artificial Intelligence, UAE; Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Yi_Guan_Huang_Lu_2023, title={Class-Independent Regularization for Learning with Noisy Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25434}, DOI={10.1609/aaai.v37i3.25434}, abstractNote={Training deep neural networks (DNNs) with noisy labels often leads to poorly generalized models as DNNs tend to memorize the noisy labels in training. Various strategies have been developed for improving sample selection precision and mitigating the noisy label memorization issue. However, most existing works adopt a class-dependent softmax classifier that is vulnerable to noisy labels by entangling the classification of multi-class features. This paper presents a class-independent regularization (CIR) method that can effectively alleviate the negative impact of noisy labels in DNN training. CIR regularizes the class-dependent softmax classifier by introducing multi-binary classifiers each of which takes care of one class only. Thanks to its class-independent nature, CIR is tolerant to noisy labels as misclassification by one binary classifier does not affect others. For effective training of CIR, we design a heterogeneous adaptive co-teaching strategy that forces the class-independent and class-dependent classifiers to focus on sample selection and image classification, respectively, in a cooperative manner. Extensive experiments show that CIR achieves superior performance consistently across multiple benchmarks with both synthetic and real images. Code is\navailable at https://github.com/RumengYi/CIR.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yi, Rumeng and Guan, Dayan and Huang, Yaping and Lu, Shijian}, year={2023}, month={Jun.}, pages={3276-3284} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25434/25206", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25434", + "pdf_size": 2049016, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6684893684991609652&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "bjtu.edu.cn;mbzuai.ac.ae;bjtu.edu.cn;ntu.edu.sg", + "email": "bjtu.edu.cn;mbzuai.ac.ae;bjtu.edu.cn;ntu.edu.sg", + "github": "https://github.com/RumengYi/CIR", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Beijing Jiaotong University;Mohamed bin Zayed University of Artificial Intelligence;Nanyang Technological University", + "aff_unique_dep": "Beijing Key Laboratory of Traffic Data Analysis and Mining;;School of Computer Science and Engineering", + "aff_unique_url": "http://www.bjtu.edu.cn;https://mbzuai.ac.ae;https://www.ntu.edu.sg", + "aff_unique_abbr": ";MBZUAI;NTU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Singapore", + "aff_country_unique_index": "0;1;0;2", + "aff_country_unique": "China;United Arab Emirates;Singapore" + }, + { + "id": "article-25171", + "title": "ClassFormer: Exploring Class-Aware Dependency with Transformer for Medical Image Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Vision Transformers have recently shown impressive performances on medical image segmentation. Despite their strong capability of modeling long-range dependencies, the current methods still give rise to two main concerns in a class-level perspective: (1) intra-class problem: the existing methods lacked in extracting class-specific correspondences of different pixels, which may lead to poor object coverage and/or boundary prediction; (2) inter-class problem: the existing methods failed to model explicit category-dependencies among various objects, which may result in inaccurate localization. In light of these two issues, we propose a novel transformer, called ClassFormer, powered by two appealing transformers, i.e., intra-class dynamic transformer and inter-class interactive transformer, to address the challenge of fully exploration on compactness and discrepancy. Technically, the intra-class dynamic transformer is first designed to decouple representations of different categories with an adaptive selection mechanism for compact learning, which optimally highlights the informative features to reflect the salient keys/values from multiple scales. We further introduce the inter-class interactive transformer to capture the category dependency among different objects, and model class tokens as the representative class centers to guide a global semantic reasoning. As a consequence, the feature consistency is ensured with the expense of intra-class penalization, while inter-class constraint strengthens the feature discriminability between different categories. Extensive empirical evidence shows that ClassFormer can be easily plugged into any architecture, and yields improvements over the state-of-the-art methods in three public benchmarks.", + "primary_area": "computer vision i", + "author": "Huimin Huang; Shiao Xie; Lanfen Lin; Ruofeng Tong; Yen-Wei Chen; Hong Wang; Yuexiang Li; Yawen Huang; Yefeng Zheng", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University+Zhejiang Lab; Ritsumeikan University; Tencent Jarvis Lab; Tencent Jarvis Lab; Tencent Jarvis Lab; Tencent Jarvis Lab", + "bibtex": "@article{Huang_Xie_Lin_Tong_Chen_Wang_Li_Huang_Zheng_2023, title={ClassFormer: Exploring Class-Aware Dependency with Transformer for Medical Image Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25171}, DOI={10.1609/aaai.v37i1.25171}, abstractNote={Vision Transformers have recently shown impressive performances on medical image segmentation. Despite their strong capability of modeling long-range dependencies, the current methods still give rise to two main concerns in a class-level perspective: (1) intra-class problem: the existing methods lacked in extracting class-specific correspondences of different pixels, which may lead to poor object coverage and/or boundary prediction; (2) inter-class problem: the existing methods failed to model explicit category-dependencies among various objects, which may result in inaccurate localization. In light of these two issues, we propose a novel transformer, called ClassFormer, powered by two appealing transformers, i.e., intra-class dynamic transformer and inter-class interactive transformer, to address the challenge of fully exploration on compactness and discrepancy. Technically, the intra-class dynamic transformer is first designed to decouple representations of different categories with an adaptive selection mechanism for compact learning, which optimally highlights the informative features to reflect the salient keys/values from multiple scales. We further introduce the inter-class interactive transformer to capture the category dependency among different objects, and model class tokens as the representative class centers to guide a global semantic reasoning. As a consequence, the feature consistency is ensured with the expense of intra-class penalization, while inter-class constraint strengthens the feature discriminability between different categories. Extensive empirical evidence shows that ClassFormer can be easily plugged into any architecture, and yields improvements over the state-of-the-art methods in three public benchmarks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Huimin and Xie, Shiao and Lin, Lanfen and Tong, Ruofeng and Chen, Yen-Wei and Wang, Hong and Li, Yuexiang and Huang, Yawen and Zheng, Yefeng}, year={2023}, month={Jun.}, pages={917-925} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25171/24943", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25171", + "pdf_size": 9855368, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13246010256095279586&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;ritsumei.ac.jp;tencent.com;tencent.com;tencent.com;tencent.com", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;ritsumei.ac.jp;tencent.com;tencent.com;tencent.com;tencent.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0+1;2;3;3;3;3", + "aff_unique_norm": "Zhejiang University;Zhejiang Lab;Ritsumeikan University;Tencent", + "aff_unique_dep": ";;;Jarvis Lab", + "aff_unique_url": "https://www.zju.edu.cn;http://www.zhejianglab.com;https://www.ritsumei.ac.jp;https://www.tencent.com", + "aff_unique_abbr": "ZJU;;Ritsumeikan;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;1;0;0;0;0", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-26285", + "title": "Cluster-Guided Contrastive Graph Clustering Network", + "track": "main", + "status": "Technical", + "abstract": "Benefiting from the intrinsic supervision information exploitation capability, contrastive learning has achieved promising performance in the field of deep graph clustering recently. However, we observe that two drawbacks of the positive and negative sample construction mechanisms limit the performance of existing algorithms from further improvement. 1) The quality of positive samples heavily depends on the carefully designed data augmentations, while inappropriate data augmentations would easily lead to the semantic drift and indiscriminative positive samples. 2) The constructed negative samples are not reliable for ignoring important clustering information. To solve these problems, we propose a Cluster-guided Contrastive deep Graph Clustering network (CCGC) by mining the intrinsic supervision information in the high-confidence clustering results. Specifically, instead of conducting complex node or edge perturbation, we construct two views of the graph by designing special Siamese encoders whose weights are not shared between the sibling sub-networks. Then, guided by the high-confidence clustering information, we carefully select and construct the positive samples from the same high-confidence cluster in two views. Moreover, to construct semantic meaningful negative sample pairs, we regard the centers of different high-confidence clusters as negative samples, thus improving the discriminative capability and reliability of the constructed sample pairs. Lastly, we design an objective function to pull close the samples from the same cluster while pushing away those from other clusters by maximizing and minimizing the cross-view cosine similarity between positive and negative samples. Extensive experimental results on six datasets demonstrate the effectiveness of CCGC compared with the existing state-of-the-art algorithms. The code of CCGC is available at https://github.com/xihongyang1999/CCGC on Github.", + "primary_area": "machine learning iv", + "author": "Xihong Yang; Yue Liu; Sihang Zhou; Siwei Wang; Wenxuan Tu; Qun Zheng; Xinwang Liu; Liming Fang; En Zhu", + "authorids": "", + "aff": "College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; College of Intelligence Science and Technology, National University of Defense Technology; College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; University of Science and Technology of China; College of Computer, National University of Defense Technology+*; Nanjing University of Aeronautics and Astronautics; College of Computer, National University of Defense Technology+\u2020", + "bibtex": "@article{Yang_Liu_Zhou_Wang_Tu_Zheng_Liu_Fang_Zhu_2023, title={Cluster-Guided Contrastive Graph Clustering Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26285}, DOI={10.1609/aaai.v37i9.26285}, abstractNote={Benefiting from the intrinsic supervision information exploitation capability, contrastive learning has achieved promising performance in the field of deep graph clustering recently. However, we observe that two drawbacks of the positive and negative sample construction mechanisms limit the performance of existing algorithms from further improvement. 1) The quality of positive samples heavily depends on the carefully designed data augmentations, while inappropriate data augmentations would easily lead to the semantic drift and indiscriminative positive samples. 2) The constructed negative samples are not reliable for ignoring important clustering information. To solve these problems, we propose a Cluster-guided Contrastive deep Graph Clustering network (CCGC) by mining the intrinsic supervision information in the high-confidence clustering results. Specifically, instead of conducting complex node or edge perturbation, we construct two views of the graph by designing special Siamese encoders whose weights are not shared between the sibling sub-networks. Then, guided by the high-confidence clustering information, we carefully select and construct the positive samples from the same high-confidence cluster in two views. Moreover, to construct semantic meaningful negative sample pairs, we regard the centers of different high-confidence clusters as negative samples, thus improving the discriminative capability and reliability of the constructed sample pairs. Lastly, we design an objective function to pull close the samples from the same cluster while pushing away those from other clusters by maximizing and minimizing the cross-view cosine similarity between positive and negative samples. Extensive experimental results on six datasets demonstrate the effectiveness of CCGC compared with the existing state-of-the-art algorithms. The code of CCGC is available at https://github.com/xihongyang1999/CCGC on Github.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Xihong and Liu, Yue and Zhou, Sihang and Wang, Siwei and Tu, Wenxuan and Zheng, Qun and Liu, Xinwang and Fang, Liming and Zhu, En}, year={2023}, month={Jun.}, pages={10834-10842} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26285/26057", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26285", + "pdf_size": 4463710, + "gs_citation": 115, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10411916681573644280&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "nudt.edu.cn; ; ; ; ; ; ; ; ", + "email": "nudt.edu.cn; ; ; ; ; ; ; ; ", + "github": "https://github.com/xihongyang1999/CCGC", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;0;3;0", + "aff_unique_norm": "National University of Defense Technology;University of Science and Technology of China;;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "College of Computer;;;", + "aff_unique_url": "http://www.nudt.edu.cn/;http://www.ustc.edu.cn;;http://www.nuaa.edu.cn", + "aff_unique_abbr": "NUDT;USTC;;NUAA", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-27049", + "title": "Clustered Federated Learning for Heterogeneous Data (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Federated Learning (FL) aims to achieve a global model via aggregating models from all devices. However, it can diverge when the data on the users\u2019 devices are heterogeneous. To address this issue, we propose a novel clustered FL method (FPFC) based on a nonconvex pairwise fusion penalty. FPFC can automatically identify clusters without prior knowledge of the number of clusters and the set of devices in each cluster. Our method is implemented in parallel, updates only a subset of devices at each communication round, and allows each participating device to perform inexact computation. We also provide convergence guarantees of FPFC for general nonconvex losses. Experiment results demonstrate the advantages of FPFC over existing methods.", + "primary_area": "", + "author": "Xue Yu; Ziyi Liu; Yifan Sun; Wu Wang", + "authorids": "", + "aff": "Center for Applied Statistics, School of Statistics, Renmin University of China, China; Center for Applied Statistics, School of Statistics, Renmin University of China, China; Center for Applied Statistics, School of Statistics, Renmin University of China, China; Center for Applied Statistics, School of Statistics, Renmin University of China, China", + "bibtex": "@article{Yu_Liu_Sun_Wang_2024, title={Clustered Federated Learning for Heterogeneous Data (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27049}, DOI={10.1609/aaai.v37i13.27049}, abstractNote={Federated Learning (FL) aims to achieve a global model via aggregating models from all devices. However, it can diverge when the data on the users\u2019 devices are heterogeneous. To address this issue, we propose a novel clustered FL method (FPFC) based on a nonconvex pairwise fusion penalty. FPFC can automatically identify clusters without prior knowledge of the number of clusters and the set of devices in each cluster. Our method is implemented in parallel, updates only a subset of devices at each communication round, and allows each participating device to perform inexact computation. We also provide convergence guarantees of FPFC for general nonconvex losses. Experiment results demonstrate the advantages of FPFC over existing methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Xue and Liu, Ziyi and Sun, Yifan and Wang, Wu}, year={2024}, month={Jul.}, pages={16378-16379} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27049/26821", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27049", + "pdf_size": 857092, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2444339741871914299&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "School of Statistics", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25818", + "title": "Clustering What Matters: Optimal Approximation for Clustering with Outliers", + "track": "main", + "status": "Technical", + "abstract": "Clustering with outliers is one of the most fundamental problems in Computer Science. Given a set X of n points and two numbers k and m, the clustering with outliers aims to exclude m points from X, and partition the remaining points into k clusters that minimizes a certain cost function. In this paper, we give a general approach for solving clustering with outliers, which results in a fixed-parameter tractable (FPT) algorithm in k and m (i.e., an algorithm with running time of the form f(k, m) * poly(n) for some function f), that almost matches the approximation ratio for its outlier-free counterpart.\n\nAs a corollary, we obtain FPT approximation algorithms with optimal approximation ratios for k-Median and k-Means with outliers in general and Euclidean metrics. We also exhibit more applications of our approach to other variants of the problem that impose additional constraints on the clustering, such as fairness or matroid constraints.", + "primary_area": "machine learning i", + "author": "Akanksha Agrawal; Tanmay Inamdar; Saket Saurabh; Jie Xue", + "authorids": "", + "aff": "Indian Institute of Technology Madras, Chennai, India; University of Bergen, Bergen, Norway; University of Bergen, Bergen, Norway+The Institute of Mathematical Sciences, HBNI, Chennai, India; New York University Shanghai, China", + "bibtex": "@article{Agrawal_Inamdar_Saurabh_Xue_2023, title={Clustering What Matters: Optimal Approximation for Clustering with Outliers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25818}, DOI={10.1609/aaai.v37i6.25818}, abstractNote={Clustering with outliers is one of the most fundamental problems in Computer Science. Given a set X of n points and two numbers k and m, the clustering with outliers aims to exclude m points from X, and partition the remaining points into k clusters that minimizes a certain cost function. In this paper, we give a general approach for solving clustering with outliers, which results in a fixed-parameter tractable (FPT) algorithm in k and m (i.e., an algorithm with running time of the form f(k, m) * poly(n) for some function f), that almost matches the approximation ratio for its outlier-free counterpart. As a corollary, we obtain FPT approximation algorithms with optimal approximation ratios for k-Median and k-Means with outliers in general and Euclidean metrics. We also exhibit more applications of our approach to other variants of the problem that impose additional constraints on the clustering, such as fairness or matroid constraints.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Agrawal, Akanksha and Inamdar, Tanmay and Saurabh, Saket and Xue, Jie}, year={2023}, month={Jun.}, pages={6666-6674} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25818/25590", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25818", + "pdf_size": 177516, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12974719857713333959&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 12, + "aff_domain": "cse.iitm.ac.in;uib.no;imsc.res.in;nyu.edu", + "email": "cse.iitm.ac.in;uib.no;imsc.res.in;nyu.edu", + "github": "", + "project": "arXiv:2212.00696", + "author_num": 4, + "aff_unique_index": "0;1;1+2;3", + "aff_unique_norm": "Indian Institute of Technology Madras;University of Bergen;The Institute of Mathematical Sciences;New York University Shanghai", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.iitm.ac.in;https://www.uib.no;;https://shanghai.nyu.edu", + "aff_unique_abbr": "IIT Madras;uib;IMS;NYU Shanghai", + "aff_campus_unique_index": "0;1;1+0;2", + "aff_campus_unique": "Chennai;Bergen;Shanghai", + "aff_country_unique_index": "0;1;1+0;2", + "aff_country_unique": "India;Norway;China" + }, + { + "id": "article-25764", + "title": "Co-imitation: Learning Design and Behaviour by Imitation", + "track": "main", + "status": "Technical", + "abstract": "The co-adaptation of robots has been a long-standing research endeavour with the goal of adapting both body and behaviour of a robot for a given task, inspired by the natural evolution of animals. Co-adaptation has the potential to eliminate costly manual hardware engineering as well as improve the performance of systems.\nThe standard approach to co-adaptation is to use a reward function for optimizing behaviour and morphology. However, defining and constructing such reward functions is notoriously difficult and often a significant engineering effort.\nThis paper introduces a new viewpoint on the co-adaptation problem, which we call co-imitation: finding a morphology and a policy that allow an imitator to closely match the behaviour of a demonstrator. To this end we propose a co-imitation methodology for adapting behaviour and morphology by matching state-distributions of the demonstrator. Specifically, we focus on the challenging scenario with mismatched state- and action-spaces between both agents. We find that co-imitation increases behaviour similarity across a variety of tasks and settings, and demonstrate co-imitation by transferring human walking, jogging and kicking skills onto a simulated humanoid.", + "primary_area": "intelligent robotics", + "author": "Chang Rajani; Karol Arndt; David Blanco-Mulero; Kevin Sebastian Luck; Ville Kyrki", + "authorids": "", + "aff": "Department of Computer Science, University of Helsinki, Finland + Department of Electrical Engineering and Automation (EEA), Aalto University, Finland; Department of Electrical Engineering and Automation (EEA), Aalto University, Finland; Department of Electrical Engineering and Automation (EEA), Aalto University, Finland; Department of Electrical Engineering and Automation (EEA), Aalto University, Finland + Finnish Center for Artificial Intelligence, Finland; Department of Electrical Engineering and Automation (EEA), Aalto University, Finland", + "bibtex": "@article{Rajani_Arndt_Blanco-Mulero_Luck_Kyrki_2023, title={Co-imitation: Learning Design and Behaviour by Imitation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25764}, DOI={10.1609/aaai.v37i5.25764}, abstractNote={The co-adaptation of robots has been a long-standing research endeavour with the goal of adapting both body and behaviour of a robot for a given task, inspired by the natural evolution of animals. Co-adaptation has the potential to eliminate costly manual hardware engineering as well as improve the performance of systems.\nThe standard approach to co-adaptation is to use a reward function for optimizing behaviour and morphology. However, defining and constructing such reward functions is notoriously difficult and often a significant engineering effort.\nThis paper introduces a new viewpoint on the co-adaptation problem, which we call co-imitation: finding a morphology and a policy that allow an imitator to closely match the behaviour of a demonstrator. To this end we propose a co-imitation methodology for adapting behaviour and morphology by matching state-distributions of the demonstrator. Specifically, we focus on the challenging scenario with mismatched state- and action-spaces between both agents. We find that co-imitation increases behaviour similarity across a variety of tasks and settings, and demonstrate co-imitation by transferring human walking, jogging and kicking skills onto a simulated humanoid.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rajani, Chang and Arndt, Karol and Blanco-Mulero, David and Luck, Kevin Sebastian and Kyrki, Ville}, year={2023}, month={Jun.}, pages={6200-6208} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25764/25536", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25764", + "pdf_size": 2929346, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16886507978306894953&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 13, + "aff_domain": "helsinki.fi;aalto.fi;aalto.fi;aalto.fi;aalto.fi", + "email": "helsinki.fi;aalto.fi;aalto.fi;aalto.fi;aalto.fi", + "github": "", + "project": "https://sites.google.com/view/co-imitation", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1+2;1", + "aff_unique_norm": "University of Helsinki;Aalto University;Finnish Center for Artificial Intelligence", + "aff_unique_dep": "Department of Computer Science;Department of Electrical Engineering and Automation;", + "aff_unique_url": "https://www.helsinki.fi;https://www.aalto.fi;", + "aff_unique_abbr": "UH;Aalto;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0;0", + "aff_country_unique": "Finland" + }, + { + "id": "article-25419", + "title": "CoMAE: Single Model Hybrid Pre-training on Small-Scale RGB-D Datasets", + "track": "main", + "status": "Technical", + "abstract": "Current RGB-D scene recognition approaches often train two standalone backbones for RGB and depth modalities with the same Places or ImageNet pre-training. However, the pre-trained depth network is still biased by RGB-based models which may result in a suboptimal solution. In this paper, we present a single-model self-supervised hybrid pre-training framework for RGB and depth modalities, termed as CoMAE. Our CoMAE presents a curriculum learning strategy to unify the two popular self-supervised representation learning algorithms: contrastive learning and masked image modeling. Specifically, we first build a patch-level alignment task to pre-train a single encoder shared by two modalities via cross-modal contrastive learning. Then, the pre-trained contrastive encoder is passed to a multi-modal masked autoencoder to capture the finer context features from a generative perspective. In addition, our single-model design without requirement of fusion module is very flexible and robust to generalize to unimodal scenario in both training and testing phases. Extensive experiments on SUN RGB-D and NYUDv2 datasets demonstrate the effectiveness of our CoMAE for RGB and depth representation learning. In addition, our experiment results reveal that CoMAE is a data-efficient representation learner. Although we only use the small-scale and unlabeled training set for pre-training, our CoMAE pre-trained models are still competitive to the state-of-the-art methods with extra large-scale and supervised RGB dataset pre-training. Code will be released at https://github.com/MCG-NJU/CoMAE.", + "primary_area": "computer vision iii", + "author": "Jiange Yang; Sheng Guo; Gangshan Wu; Limin Wang", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; MYbank, Ant Group, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Yang_Guo_Wu_Wang_2023, title={CoMAE: Single Model Hybrid Pre-training on Small-Scale RGB-D Datasets}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25419}, DOI={10.1609/aaai.v37i3.25419}, abstractNote={Current RGB-D scene recognition approaches often train two standalone backbones for RGB and depth modalities with the same Places or ImageNet pre-training. However, the pre-trained depth network is still biased by RGB-based models which may result in a suboptimal solution. In this paper, we present a single-model self-supervised hybrid pre-training framework for RGB and depth modalities, termed as CoMAE. Our CoMAE presents a curriculum learning strategy to unify the two popular self-supervised representation learning algorithms: contrastive learning and masked image modeling. Specifically, we first build a patch-level alignment task to pre-train a single encoder shared by two modalities via cross-modal contrastive learning. Then, the pre-trained contrastive encoder is passed to a multi-modal masked autoencoder to capture the finer context features from a generative perspective. In addition, our single-model design without requirement of fusion module is very flexible and robust to generalize to unimodal scenario in both training and testing phases. Extensive experiments on SUN RGB-D and NYUDv2 datasets demonstrate the effectiveness of our CoMAE for RGB and depth representation learning. In addition, our experiment results reveal that CoMAE is a data-efficient representation learner. Although we only use the small-scale and unlabeled training set for pre-training, our CoMAE pre-trained models are still competitive to the state-of-the-art methods with extra large-scale and supervised RGB dataset pre-training. Code will be released at https://github.com/MCG-NJU/CoMAE.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Jiange and Guo, Sheng and Wu, Gangshan and Wang, Limin}, year={2023}, month={Jun.}, pages={3145-3154} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25419/25191", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25419", + "pdf_size": 863391, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3269959912821246070&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;nju.edu.cn;nju.edu.cn", + "email": "gmail.com;gmail.com;nju.edu.cn;nju.edu.cn", + "github": "https://github.com/MCG-NJU/CoMAE", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Nanjing University;MYbank", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;", + "aff_unique_abbr": "Nanjing U;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26589", + "title": "CoP: Factual Inconsistency Detection by Controlling the Preference", + "track": "main", + "status": "Technical", + "abstract": "Abstractive summarization is the process of generating a summary given a document as input. Although significant progress has been made, the factual inconsistency between the document and the generated summary still limits its practical applications. Previous work found that the probabilities assigned by the generation model reflect its preferences for the generated summary, including the preference for factual consistency, and the preference for the language or knowledge prior as well. To separate the preference for factual consistency, we propose an unsupervised framework named CoP by controlling the preference of the generation model with the help of prompt. More specifically, the framework performs an extra inference step in which a text prompt is introduced as an additional input. In this way, another preference is described by the generation probability of this extra inference process. The difference between the above two preferences, i.e. the difference between the probabilities, could be used as measurements for detecting factual inconsistencies. Interestingly, we found that with the properly designed prompt, our framework could evaluate specific preferences and serve as measurements for fine-grained categories of inconsistency, such as entity-related inconsistency, coreference-related inconsistency, etc. Moreover, our framework could also be extended to the supervised setting to learn better prompt from the labeled data as well. Experiments show that our framework achieves new SOTA results on three factual inconsisency detection tasks.", + "primary_area": "speech natural language processing", + "author": "Shuaijie She; Xiang Geng; Shujian Huang; Jiajun Chen", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University", + "bibtex": "@article{She_Geng_Huang_Chen_2023, title={CoP: Factual Inconsistency Detection by Controlling the Preference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26589}, DOI={10.1609/aaai.v37i11.26589}, abstractNote={Abstractive summarization is the process of generating a summary given a document as input. Although significant progress has been made, the factual inconsistency between the document and the generated summary still limits its practical applications. Previous work found that the probabilities assigned by the generation model reflect its preferences for the generated summary, including the preference for factual consistency, and the preference for the language or knowledge prior as well. To separate the preference for factual consistency, we propose an unsupervised framework named CoP by controlling the preference of the generation model with the help of prompt. More specifically, the framework performs an extra inference step in which a text prompt is introduced as an additional input. In this way, another preference is described by the generation probability of this extra inference process. The difference between the above two preferences, i.e. the difference between the probabilities, could be used as measurements for detecting factual inconsistencies. Interestingly, we found that with the properly designed prompt, our framework could evaluate specific preferences and serve as measurements for fine-grained categories of inconsistency, such as entity-related inconsistency, coreference-related inconsistency, etc. Moreover, our framework could also be extended to the supervised setting to learn better prompt from the labeled data as well. Experiments show that our framework achieves new SOTA results on three factual inconsisency detection tasks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={She, Shuaijie and Geng, Xiang and Huang, Shujian and Chen, Jiajun}, year={2023}, month={Jun.}, pages={13556-13563} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26589/26361", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26589", + "pdf_size": 677765, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3581862160248046326&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing University", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25292", + "title": "Coarse2Fine: Local Consistency Aware Re-prediction for Weakly Supervised Object Localization", + "track": "main", + "status": "Technical", + "abstract": "Weakly supervised object localization aims to localize objects of interest by using only image-level labels. Existing methods generally segment activation map by threshold to obtain mask and generate bounding box. However, the activation map is locally inconsistent, i.e., similar neighboring pixels of the same object are not equally activated, which leads to the blurred boundary issue: the localization result is sensitive to the threshold, and the mask obtained directly from the activation map loses the fine contours of the object, making it difficult to obtain a tight bounding box. In this paper, we introduce the Local Consistency Aware Re-prediction (LCAR) framework, which aims to recover the complete fine object mask from locally inconsistent activation map and hence obtain a tight bounding box. To this end, we propose the self-guided re-prediction module (SGRM), which employs a novel superpixel aggregation network to replace the post-processing of threshold segmentation. In order to derive more reliable pseudo label from the activation map to supervise the SGRM, we further design an affinity refinement module (ARM) that utilizes the original image feature to better align the activation map with the image appearance, and design a self-distillation CAM (SD-CAM) to alleviate the locator dependence on saliency. Experiments demonstrate that our LCAR outperforms the state-of-the-art on both the CUB-200-2011 and ILSVRC datasets, achieving 95.89% and 70.72% of GT-Know localization accuracy, respectively.", + "primary_area": "computer vision ii", + "author": "Yixuan Pan; Yao Yao; Yichao Cao; Chongjin Chen; Xiaobo Lu", + "authorids": "", + "aff": "School of Automation, Southeast University, Nanjing, China + Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Nanjing, China; Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences Shanghai, China + University of Chinese Academy of Sciences, Beijing, China; School of Automation, Southeast University, Nanjing, China + Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Nanjing, China; School of Automation, Southeast University, Nanjing, China + Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Nanjing, China; School of Automation, Southeast University, Nanjing, China + Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Nanjing, China", + "bibtex": "@article{Pan_Yao_Cao_Chen_Lu_2023, title={Coarse2Fine: Local Consistency Aware Re-prediction for Weakly Supervised Object Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25292}, DOI={10.1609/aaai.v37i2.25292}, abstractNote={Weakly supervised object localization aims to localize objects of interest by using only image-level labels. Existing methods generally segment activation map by threshold to obtain mask and generate bounding box. However, the activation map is locally inconsistent, i.e., similar neighboring pixels of the same object are not equally activated, which leads to the blurred boundary issue: the localization result is sensitive to the threshold, and the mask obtained directly from the activation map loses the fine contours of the object, making it difficult to obtain a tight bounding box. In this paper, we introduce the Local Consistency Aware Re-prediction (LCAR) framework, which aims to recover the complete fine object mask from locally inconsistent activation map and hence obtain a tight bounding box. To this end, we propose the self-guided re-prediction module (SGRM), which employs a novel superpixel aggregation network to replace the post-processing of threshold segmentation. In order to derive more reliable pseudo label from the activation map to supervise the SGRM, we further design an affinity refinement module (ARM) that utilizes the original image feature to better align the activation map with the image appearance, and design a self-distillation CAM (SD-CAM) to alleviate the locator dependence on saliency. Experiments demonstrate that our LCAR outperforms the state-of-the-art on both the CUB-200-2011 and ILSVRC datasets, achieving 95.89% and 70.72% of GT-Know localization accuracy, respectively.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Yixuan and Yao, Yao and Cao, Yichao and Chen, Chongjin and Lu, Xiaobo}, year={2023}, month={Jun.}, pages={2002-2010} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25292/25064", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25292", + "pdf_size": 15552615, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9200402542578212652&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "seu.edu.cn;mails.ucas.ac.cn;seu.edu.cn;outlook.com;126.com", + "email": "seu.edu.cn;mails.ucas.ac.cn;seu.edu.cn;outlook.com;126.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2+3;0+1;0+1;0+1", + "aff_unique_norm": "Southeast University;Key Laboratory of Measurement and Control of Complex Systems of Engineering;Shanghai Institute of Microsystem and Information Technology;University of Chinese Academy of Sciences", + "aff_unique_dep": "School of Automation;Ministry of Education;;", + "aff_unique_url": "https://www.seu.edu.cn/;;http://www.sim.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "SEU;;SIM;UCAS", + "aff_campus_unique_index": "0;2+3;0;0;0", + "aff_campus_unique": "Nanjing;;Shanghai;Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26226", + "title": "Code-Aware Cross-Program Transfer Hyperparameter Optimization", + "track": "main", + "status": "Technical", + "abstract": "Hyperparameter tuning is an essential task in automatic machine learning and big data management. \nTo accelerate tuning, many recent studies focus on augmenting BO, the primary hyperparameter tuning strategy, by transferring information from other tuning tasks.\nHowever, existing studies ignore program similarities in their transfer mechanism, thus they are sub-optimal in cross-program transfer when tuning tasks involve different programs. \nThis paper proposes CaTHPO, a code-aware cross-program transfer hyperparameter optimization framework, which makes three improvements. \n(1) It learns code-aware program representation in a self-supervised manner to give an off-the-shelf estimate of program similarities. \n(2) It adjusts the surrogate and AF in BO based on program similarities, thus the hyperparameter search is guided by accumulated information across similar programs. \n(3) It presents a safe controller to dynamically prune undesirable sample points based on tuning experiences of similar programs. \nExtensive experiments on tuning various recommendation models and Spark applications have demonstrated that CatHPO can steadily obtain better and more robust hyperparameter performances within fewer samples than state-of-the-art competitors.", + "primary_area": "machine learning iv", + "author": "Zijia Wang; Xiangyu He; Kehan Chen; Chen Lin; Jinsong Su", + "authorids": "", + "aff": "School of Informatics, Xiamen University; School of Informatics, Xiamen University; School of Informatics, Xiamen University; School of Informatics, Xiamen University; School of Informatics, Xiamen University", + "bibtex": "@article{Wang_He_Chen_Lin_Su_2023, title={Code-Aware Cross-Program Transfer Hyperparameter Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26226}, DOI={10.1609/aaai.v37i9.26226}, abstractNote={Hyperparameter tuning is an essential task in automatic machine learning and big data management. To accelerate tuning, many recent studies focus on augmenting BO, the primary hyperparameter tuning strategy, by transferring information from other tuning tasks.\nHowever, existing studies ignore program similarities in their transfer mechanism, thus they are sub-optimal in cross-program transfer when tuning tasks involve different programs. This paper proposes CaTHPO, a code-aware cross-program transfer hyperparameter optimization framework, which makes three improvements. (1) It learns code-aware program representation in a self-supervised manner to give an off-the-shelf estimate of program similarities. (2) It adjusts the surrogate and AF in BO based on program similarities, thus the hyperparameter search is guided by accumulated information across similar programs. (3) It presents a safe controller to dynamically prune undesirable sample points based on tuning experiences of similar programs. Extensive experiments on tuning various recommendation models and Spark applications have demonstrated that CatHPO can steadily obtain better and more robust hyperparameter performances within fewer samples than state-of-the-art competitors.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zijia and He, Xiangyu and Chen, Kehan and Lin, Chen and Su, Jinsong}, year={2023}, month={Jun.}, pages={10297-10305} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26226/25998", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26226", + "pdf_size": 2589532, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:D_5UuCpBae0J:scholar.google.com/&scioq=Code-Aware+Cross-Program+Transfer+Hyperparameter+Optimization&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "xmu.edu.cn; ; ; ; ", + "email": "xmu.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Xiamen University", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.xmu.edu.cn", + "aff_unique_abbr": "XMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26739", + "title": "CodeAttack: Code-Based Adversarial Attacks for Pre-trained Programming Language Models", + "track": "aaai special track", + "status": "Technical", + "abstract": "Pre-trained programming language (PL) models (such as CodeT5, CodeBERT, GraphCodeBERT, etc.,) have the potential to automate software engineering tasks involving code understanding and code generation. However, these models operate in the natural channel of code, i.e., primarily concerned with the human understanding of code. They are not robust to changes in the input and thus, are potentially susceptible to adversarial attacks in the natural channel. We propose, Code Attack, a simple yet effective black-box attack model that uses code structure to generate effective, efficient, and imperceptible adversarial code samples and demonstrates the vulnerabilities of the state-of-the-art PL models to code-specific adversarial attacks. We evaluate the transferability of CodeAttack on several code-code (translation and repair) and code-NL (summarization) tasks across different programming languages. Code Attack outperforms state-of-the-art adversarial NLP attack models to achieve the best overall drop in performance while being more efficient, imperceptible, consistent, and fluent. The code can be found at https://github.com/reddy-lab-code-research/CodeAttack.", + "primary_area": "safe and robust ai", + "author": "Akshita Jha; Chandan K. Reddy", + "authorids": "", + "aff": "Department of Computer Science, Virginia Tech, Arlington V A - 22203; Department of Computer Science, Virginia Tech, Arlington V A - 22203", + "bibtex": "@article{Jha_Reddy_2023, title={CodeAttack: Code-Based Adversarial Attacks for Pre-trained Programming Language Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26739}, DOI={10.1609/aaai.v37i12.26739}, abstractNote={Pre-trained programming language (PL) models (such as CodeT5, CodeBERT, GraphCodeBERT, etc.,) have the potential to automate software engineering tasks involving code understanding and code generation. However, these models operate in the natural channel of code, i.e., primarily concerned with the human understanding of code. They are not robust to changes in the input and thus, are potentially susceptible to adversarial attacks in the natural channel. We propose, Code Attack, a simple yet effective black-box attack model that uses code structure to generate effective, efficient, and imperceptible adversarial code samples and demonstrates the vulnerabilities of the state-of-the-art PL models to code-specific adversarial attacks. We evaluate the transferability of CodeAttack on several code-code (translation and repair) and code-NL (summarization) tasks across different programming languages. Code Attack outperforms state-of-the-art adversarial NLP attack models to achieve the best overall drop in performance while being more efficient, imperceptible, consistent, and fluent. The code can be found at https://github.com/reddy-lab-code-research/CodeAttack.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jha, Akshita and Reddy, Chandan K.}, year={2023}, month={Jun.}, pages={14892-14900} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26739/26511", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26739", + "pdf_size": 1115862, + "gs_citation": 75, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5734784623228882692&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "vt.edu;cs.vt.edu", + "email": "vt.edu;cs.vt.edu", + "github": "https://github.com/reddy-lab-code-research/CodeAttack", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Virginia Tech", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.vt.edu", + "aff_unique_abbr": "VT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Arlington", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27087", + "title": "CodeStylist: A System for Performing Code Style Transfer Using Neural Networks", + "track": "demonstrations", + "status": "Technical", + "abstract": "Code style refers to attributes of computer programs that affect their readability, maintainability, and performance. Enterprises consider code style as important and enforce style requirements during code commits. Tools that assist in coding style compliance and transformations are highly valuable. However, many key aspects of programming style transfer are difficult to automate, as it can be challenging to specify the patterns required to perform the transfer algorithmically. In this paper, we describe a system called CodeStylist which uses neural methods to perform style transfer on code.", + "primary_area": "", + "author": "Chih-Kai Ting; Karl Munson; Serenity Wade; Anish Savla; Kiran Kate; Kavitha Srinivas", + "authorids": "", + "aff": "University of California Santa Cruz; University of California Santa Cruz; University of California Santa Cruz; University of California Santa Cruz; IBM Research; IBM Research", + "bibtex": "@article{Ting_Munson_Wade_Savla_Kate_Srinivas_2024, title={CodeStylist: A System for Performing Code Style Transfer Using Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27087}, DOI={10.1609/aaai.v37i13.27087}, abstractNote={Code style refers to attributes of computer programs that affect their readability, maintainability, and performance. Enterprises consider code style as important and enforce style requirements during code commits. Tools that assist in coding style compliance and transformations are highly valuable. However, many key aspects of programming style transfer are difficult to automate, as it can be challenging to specify the patterns required to perform the transfer algorithmically. In this paper, we describe a system called CodeStylist which uses neural methods to perform style transfer on code.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ting, Chih-Kai and Munson, Karl and Wade, Serenity and Savla, Anish and Kate, Kiran and Srinivas, Kavitha}, year={2024}, month={Jul.}, pages={16485-16487} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27087/26859", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27087", + "pdf_size": 209347, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1461902928099759202&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "ucsc.edu;ucsc.edu;ucsc.edu;ucsc.edu;us.ibm.com;ibm.com", + "email": "ucsc.edu;ucsc.edu;ucsc.edu;ucsc.edu;us.ibm.com;ibm.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;1", + "aff_unique_norm": "University of California, Santa Cruz;IBM", + "aff_unique_dep": ";IBM Research", + "aff_unique_url": "https://www.ucsc.edu;https://www.ibm.com/research", + "aff_unique_abbr": "UCSC;IBM", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Santa Cruz;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26503", + "title": "Cogito Ergo Summ: Abstractive Summarization of Biomedical Papers via Semantic Parsing Graphs and Consistency Rewards", + "track": "main", + "status": "Technical", + "abstract": "The automatic synthesis of biomedical publications catalyzes a profound research interest elicited by literature congestion. Current sequence-to-sequence models mainly rely on the lexical surface and seldom consider the deep semantic interconnections between the entities mentioned in the source document. Such superficiality translates into fabricated, poorly informative, redundant, and near-extractive summaries that severely restrict their real-world application in biomedicine, where the specialized jargon and the convoluted facts further emphasize task complexity. To fill this gap, we argue that the summarizer should acquire semantic interpretation over input, exploiting structured and unambiguous representations to capture and conserve the most relevant parts of the text content. This paper presents CogitoErgoSumm, the first framework for biomedical abstractive summarization equipping large pre-trained language models with rich semantic graphs. Precisely, we infuse graphs from two complementary semantic parsing techniques with different goals and granularities\u2014Event Extraction and Abstract Meaning Representation, also designing a reward signal to maximize information content preservation through reinforcement learning. Extensive quantitative and qualitative evaluations on the CDSR dataset show that our solution achieves competitive performance according to multiple metrics, despite using 2.5x fewer parameters. Results and ablation studies indicate that our joint text-graph model generates more enlightening, readable, and consistent summaries. Code available at: https://github.com/disi-unibo-nlp/cogito-ergo-summ.", + "primary_area": "speech natural language processing", + "author": "Giacomo Frisoni; Paolo Italiani; Stefano Salvatori; Gianluca Moro", + "authorids": "", + "aff": "Department of Computer Science and Engineering, University of Bologna, Cesena Campus; Department of Computer Science and Engineering, University of Bologna, Cesena Campus; Department of Computer Science and Engineering, University of Bologna, Cesena Campus; Department of Computer Science and Engineering, University of Bologna, Cesena Campus", + "bibtex": "@article{Frisoni_Italiani_Salvatori_Moro_2023, title={Cogito Ergo Summ: Abstractive Summarization of Biomedical Papers via Semantic Parsing Graphs and Consistency Rewards}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26503}, DOI={10.1609/aaai.v37i11.26503}, abstractNote={The automatic synthesis of biomedical publications catalyzes a profound research interest elicited by literature congestion. Current sequence-to-sequence models mainly rely on the lexical surface and seldom consider the deep semantic interconnections between the entities mentioned in the source document. Such superficiality translates into fabricated, poorly informative, redundant, and near-extractive summaries that severely restrict their real-world application in biomedicine, where the specialized jargon and the convoluted facts further emphasize task complexity. To fill this gap, we argue that the summarizer should acquire semantic interpretation over input, exploiting structured and unambiguous representations to capture and conserve the most relevant parts of the text content. This paper presents CogitoErgoSumm, the first framework for biomedical abstractive summarization equipping large pre-trained language models with rich semantic graphs. Precisely, we infuse graphs from two complementary semantic parsing techniques with different goals and granularities\u2014Event Extraction and Abstract Meaning Representation, also designing a reward signal to maximize information content preservation through reinforcement learning. Extensive quantitative and qualitative evaluations on the CDSR dataset show that our solution achieves competitive performance according to multiple metrics, despite using 2.5x fewer parameters. Results and ablation studies indicate that our joint text-graph model generates more enlightening, readable, and consistent summaries. Code available at: https://github.com/disi-unibo-nlp/cogito-ergo-summ.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Frisoni, Giacomo and Italiani, Paolo and Salvatori, Stefano and Moro, Gianluca}, year={2023}, month={Jun.}, pages={12781-12789} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26503/26275", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26503", + "pdf_size": 2114752, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8973031833927907787&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "unibo.it;unibo.it;unibo.it;unibo.it", + "email": "unibo.it;unibo.it;unibo.it;unibo.it", + "github": "https://github.com/disi-unibo-nlp/cogito-ergo-summ", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Bologna", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.unibo.it", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cesena", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25755", + "title": "Collective Intelligence in Human-AI Teams: A Bayesian Theory of Mind Approach", + "track": "main", + "status": "Technical", + "abstract": "We develop a network of Bayesian agents that collectively model the mental states of teammates from the observed communication. Using a generative computational approach to cognition, we make two contributions. First, we show that our agent could generate interventions that improve the collective intelligence of a human-AI team beyond what humans alone would achieve. Second, we develop a real-time measure of human's theory of mind ability and test theories about human cognition. We use data collected from an online experiment in which 145 individuals in 29 human-only teams of five communicate through a chat-based system to solve a cognitive task. We find that humans (a) struggle to fully integrate information from teammates into their decisions, especially when communication load is high, and (b) have cognitive biases which lead them to underweight certain useful, but ambiguous, information. Our theory of mind ability measure predicts both individual- and team-level performance. Observing teams' first 25% of messages explains about 8% of the variation in final team performance, a 170% improvement compared to the current state of the art.", + "primary_area": "humans and ai", + "author": "Samuel Westby; Christoph Riedl", + "authorids": "", + "aff": "Network Science Institute, Northeastern University, Boston, MA; Khoury College of Computer Sciences, Northeastern University, Boston, MA", + "bibtex": "@article{Westby_Riedl_2023, title={Collective Intelligence in Human-AI Teams: A Bayesian Theory of Mind Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25755}, DOI={10.1609/aaai.v37i5.25755}, abstractNote={We develop a network of Bayesian agents that collectively model the mental states of teammates from the observed communication. Using a generative computational approach to cognition, we make two contributions. First, we show that our agent could generate interventions that improve the collective intelligence of a human-AI team beyond what humans alone would achieve. Second, we develop a real-time measure of human\u2019s theory of mind ability and test theories about human cognition. We use data collected from an online experiment in which 145 individuals in 29 human-only teams of five communicate through a chat-based system to solve a cognitive task. We find that humans (a) struggle to fully integrate information from teammates into their decisions, especially when communication load is high, and (b) have cognitive biases which lead them to underweight certain useful, but ambiguous, information. Our theory of mind ability measure predicts both individual- and team-level performance. Observing teams\u2019 first 25% of messages explains about 8% of the variation in final team performance, a 170% improvement compared to the current state of the art.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Westby, Samuel and Riedl, Christoph}, year={2023}, month={Jun.}, pages={6119-6127} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25755/25527", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25755", + "pdf_size": 432278, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17762146868407633085&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "northeastern.edu;northeastern.edu", + "email": "northeastern.edu;northeastern.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Northeastern University", + "aff_unique_dep": "Network Science Institute", + "aff_unique_url": "https://www.northeastern.edu", + "aff_unique_abbr": "NU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Boston", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25730", + "title": "Collusion-Proof and Sybil-Proof Reward Mechanisms for Query Incentive Networks", + "track": "main", + "status": "Technical", + "abstract": "This paper explores reward mechanisms for a query incentive network in which agents seek information from social networks. In a query tree issued by the task owner, each agent is rewarded by the owner for contributing to the solution, for instance, solving the task or inviting others to solve it. The reward mechanism determines the reward for each agent and motivates all agents to propagate and report their information truthfully. In particular, the reward cannot exceed the budget set by the task owner. However, our impossibility results demonstrate that a reward mechanism cannot simultaneously achieve Sybil-proof (agents benefit from manipulating multiple fake identities), collusion-proof (multiple agents pretend as a single agent to improve the reward), and other essential properties. In order to address these issues, we propose two novel reward mechanisms. The first mechanism achieves Sybil-proof and collusion-proof, respectively; the second mechanism sacrifices Sybil-proof to achieve the approximate versions of Sybil-proof and collusion-proof. Additionally, we show experimentally that our second reward mechanism outperforms the existing ones.", + "primary_area": "game theory and economic paradigms", + "author": "Youjia Zhang; Pingzhong Tang", + "authorids": "", + "aff": "IIIS, Tsinghua University; IIIS, Tsinghua University", + "bibtex": "@article{Zhang_Tang_2023, title={Collusion-Proof and Sybil-Proof Reward Mechanisms for Query Incentive Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25730}, DOI={10.1609/aaai.v37i5.25730}, abstractNote={This paper explores reward mechanisms for a query incentive network in which agents seek information from social networks. In a query tree issued by the task owner, each agent is rewarded by the owner for contributing to the solution, for instance, solving the task or inviting others to solve it. The reward mechanism determines the reward for each agent and motivates all agents to propagate and report their information truthfully. In particular, the reward cannot exceed the budget set by the task owner. However, our impossibility results demonstrate that a reward mechanism cannot simultaneously achieve Sybil-proof (agents benefit from manipulating multiple fake identities), collusion-proof (multiple agents pretend as a single agent to improve the reward), and other essential properties. In order to address these issues, we propose two novel reward mechanisms. The first mechanism achieves Sybil-proof and collusion-proof, respectively; the second mechanism sacrifices Sybil-proof to achieve the approximate versions of Sybil-proof and collusion-proof. Additionally, we show experimentally that our second reward mechanism outperforms the existing ones.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Youjia and Tang, Pingzhong}, year={2023}, month={Jun.}, pages={5892-5899} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25730/25502", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25730", + "pdf_size": 531551, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14538173962524076584&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com", + "email": "mails.tsinghua.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Institute for Interdisciplinary Information Sciences", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26821", + "title": "Combating Disinformation on Social Media and Its Challenges: A Computational Perspective", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "The use of social media has accelerated information sharing and instantaneous communications. The low barrier to entering social media enables more users to participate and keeps them engaged longer, incentivizing individuals with a hidden agenda to spread disinformation online to manipulate information and sway opinion. Disinformation, such as fake news, hoaxes, and conspiracy theories, has increasingly become a hindrance to the functioning of online social media as an effective channel for trustworthy information. Therefore, it is imperative to understand disinformation and systematically investigate how to improve resistance against it. This article highlights relevant theories and recent advancements of detecting disinformation from a computational perspective, and urges the need for future interdisciplinary research.", + "primary_area": "", + "author": "Kai Shu", + "authorids": "", + "aff": "Department of Computer Science, Illinois Institute of Technology", + "bibtex": "@article{Shu_2024, title={Combating Disinformation on Social Media and Its Challenges: A Computational Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26821}, DOI={10.1609/aaai.v37i13.26821}, abstractNote={The use of social media has accelerated information sharing and instantaneous communications. The low barrier to entering social media enables more users to participate and keeps them engaged longer, incentivizing individuals with a hidden agenda to spread disinformation online to manipulate information and sway opinion. Disinformation, such as fake news, hoaxes, and conspiracy theories, has increasingly become a hindrance to the functioning of online social media as an effective channel for trustworthy information. Therefore, it is imperative to understand disinformation and systematically investigate how to improve resistance against it. This article highlights relevant theories and recent advancements of detecting disinformation from a computational perspective, and urges the need for future interdisciplinary research.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shu, Kai}, year={2024}, month={Jul.}, pages={15454-15454} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26821/26593", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26821", + "pdf_size": 43673, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3276542977308846007&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "iit.edu", + "email": "iit.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Illinois Institute of Technology", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.iit.edu", + "aff_unique_abbr": "IIT", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26062", + "title": "Combating Mode Collapse via Offline Manifold Entropy Estimation", + "track": "main", + "status": "Technical", + "abstract": "Generative Adversarial Networks (GANs) have shown compelling results in various tasks and applications in recent years. However, mode collapse remains a critical problem in GANs. In this paper, we propose a novel training pipeline to address the mode collapse issue of GANs. Different from existing methods, we propose to generalize the discriminator as feature embedding and maximize the entropy of distributions in the embedding space learned by the discriminator. Specifically, two regularization terms, i.e., Deep Local Linear Embedding (DLLE) and Deep Isometric feature Mapping (DIsoMap), are introduced to encourage the discriminator to learn the structural information embedded in the data, such that the embedding space learned by the discriminator can be well-formed. Based on the well-learned embedding space supported by the discriminator, a non-parametric entropy estimator is designed to efficiently maximize the entropy of embedding vectors, playing as an approximation of maximizing the entropy of the generated distribution. By improving the discriminator and maximizing the distance of the most similar samples in the embedding space, our pipeline effectively reduces the mode collapse without sacrificing the quality of generated samples. Extensive experimental results show the effectiveness of our method which outperforms the GAN baseline, MaF-GAN on CelebA (9.13 vs. 12.43 in FID) and surpasses the recent state-of-the-art energy-based model on the ANIMEFACE dataset (2.80 vs. 2.26 in Inception score).", + "primary_area": "machine learning ii", + "author": "Haozhe Liu; Bing Li; Haoqian Wu; Hanbang Liang; Yawen Huang; Yuexiang Li; Bernard Ghanem; Yefeng Zheng", + "authorids": "", + "aff": "AI Initiative, King Abdullah University of Science and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia+Jarvis Lab, Tencent, Shenzhen 518057, China; AI Initiative, King Abdullah University of Science and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia; YouTu Lab, Tencent, Shenzhen 518057, China+Shenzhen University, Shenzhen 518060, China; Shenzhen University, Shenzhen 518060, China; Jarvis Lab, Tencent, Shenzhen 518057, China; Jarvis Lab, Tencent, Shenzhen 518057, China; AI Initiative, King Abdullah University of Science and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia; Jarvis Lab, Tencent, Shenzhen 518057, China", + "bibtex": "@article{Liu_Li_Wu_Liang_Huang_Li_Ghanem_Zheng_2023, title={Combating Mode Collapse via Offline Manifold Entropy Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26062}, DOI={10.1609/aaai.v37i7.26062}, abstractNote={Generative Adversarial Networks (GANs) have shown compelling results in various tasks and applications in recent years. However, mode collapse remains a critical problem in GANs. In this paper, we propose a novel training pipeline to address the mode collapse issue of GANs. Different from existing methods, we propose to generalize the discriminator as feature embedding and maximize the entropy of distributions in the embedding space learned by the discriminator. Specifically, two regularization terms, i.e., Deep Local Linear Embedding (DLLE) and Deep Isometric feature Mapping (DIsoMap), are introduced to encourage the discriminator to learn the structural information embedded in the data, such that the embedding space learned by the discriminator can be well-formed. Based on the well-learned embedding space supported by the discriminator, a non-parametric entropy estimator is designed to efficiently maximize the entropy of embedding vectors, playing as an approximation of maximizing the entropy of the generated distribution. By improving the discriminator and maximizing the distance of the most similar samples in the embedding space, our pipeline effectively reduces the mode collapse without sacrificing the quality of generated samples. Extensive experimental results show the effectiveness of our method which outperforms the GAN baseline, MaF-GAN on CelebA (9.13 vs. 12.43 in FID) and surpasses the recent state-of-the-art energy-based model on the ANIMEFACE dataset (2.80 vs. 2.26 in Inception score).}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Haozhe and Li, Bing and Wu, Haoqian and Liang, Hanbang and Huang, Yawen and Li, Yuexiang and Ghanem, Bernard and Zheng, Yefeng}, year={2023}, month={Jun.}, pages={8834-8842} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26062/25834", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26062", + "pdf_size": 3183648, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8073654153307818129&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kaust.edu.sa;kaust.edu.sa;email.szu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;kaust.edu.sa", + "email": "kaust.edu.sa;kaust.edu.sa;email.szu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;kaust.edu.sa", + "github": "https://github.com/HaozheLiu-ST/MEE", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0;1+2;2;1;1;0;1", + "aff_unique_norm": "King Abdullah University of Science and Technology;Tencent;Shenzhen University", + "aff_unique_dep": "AI Initiative;Jarvis Lab;", + "aff_unique_url": "https://www.kaust.edu.sa;https://www.tencent.com;https://www.szu.edu.cn", + "aff_unique_abbr": "KAUST;Tencent;SZU", + "aff_campus_unique_index": "0+1;0;1+1;1;1;1;0;1", + "aff_campus_unique": "Thuwal;Shenzhen", + "aff_country_unique_index": "0+1;0;1+1;1;1;1;0;1", + "aff_country_unique": "Saudi Arabia;China" + }, + { + "id": "article-25466", + "title": "Combating Unknown Bias with Effective Bias-Conflicting Scoring and Gradient Alignment", + "track": "main", + "status": "Technical", + "abstract": "Models notoriously suffer from dataset biases which are detrimental to robustness and generalization. The identify-emphasize paradigm shows a promising effect in dealing with unknown biases. However, we find that it is still plagued by two challenges: A, the quality of the identified bias-conflicting samples is far from satisfactory; B, the emphasizing strategies just yield suboptimal performance. In this work, for challenge A, we propose an effective bias-conflicting scoring method to boost the identification accuracy with two practical strategies --- peer-picking and epoch-ensemble. For challenge B, we point out that the gradient contribution statistics can be a reliable indicator to inspect whether the optimization is dominated by bias-aligned samples. Then, we propose gradient alignment, which employs gradient statistics to balance the contributions of the mined bias-aligned and bias-conflicting samples dynamically throughout the learning process, forcing models to leverage intrinsic features to make fair decisions. Experiments are conducted on multiple datasets in various settings, demonstrating that the proposed solution can alleviate the impact of unknown biases and achieve state-of-the-art performance.", + "primary_area": "computer vision iii", + "author": "Bowen Zhao; Chen Chen; Qian-Wei Wang; Anfeng He; Shu-Tao Xia", + "authorids": "", + "aff": "Tsinghua University; Tencent TEG AI; Tsinghua University + Peng Cheng Laboratory; Tencent TEG AI; Tsinghua University + Peng Cheng Laboratory", + "bibtex": "@article{Zhao_Chen_Wang_He_Xia_2023, title={Combating Unknown Bias with Effective Bias-Conflicting Scoring and Gradient Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25466}, DOI={10.1609/aaai.v37i3.25466}, abstractNote={Models notoriously suffer from dataset biases which are detrimental to robustness and generalization. The identify-emphasize paradigm shows a promising effect in dealing with unknown biases. However, we find that it is still plagued by two challenges: A, the quality of the identified bias-conflicting samples is far from satisfactory; B, the emphasizing strategies just yield suboptimal performance. In this work, for challenge A, we propose an effective bias-conflicting scoring method to boost the identification accuracy with two practical strategies --- peer-picking and epoch-ensemble. For challenge B, we point out that the gradient contribution statistics can be a reliable indicator to inspect whether the optimization is dominated by bias-aligned samples. Then, we propose gradient alignment, which employs gradient statistics to balance the contributions of the mined bias-aligned and bias-conflicting samples dynamically throughout the learning process, forcing models to leverage intrinsic features to make fair decisions. Experiments are conducted on multiple datasets in various settings, demonstrating that the proposed solution can alleviate the impact of unknown biases and achieve state-of-the-art performance.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Bowen and Chen, Chen and Wang, Qian-Wei and He, Anfeng and Xia, Shu-Tao}, year={2023}, month={Jun.}, pages={3561-3569} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25466/25238", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25466", + "pdf_size": 2525270, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15450053848979622467&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com;sz.tsinghua.edu.cn; ; ", + "email": "mails.tsinghua.edu.cn;gmail.com;sz.tsinghua.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0+2;1;0+2", + "aff_unique_norm": "Tsinghua University;Tencent;Peng Cheng Laboratory", + "aff_unique_dep": ";Tencent TEG AI;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://ai.tencent.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "THU;Tencent AI;PCL", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25917", + "title": "Combinatorial Causal Bandits", + "track": "main", + "status": "Technical", + "abstract": "In combinatorial causal bandits (CCB), the learning agent chooses at most K variables in each round to intervene, collects feedback from the observed variables, with the goal of minimizing expected regret on the target variable Y. We study under the context of binary generalized linear models (BGLMs) with a succinct parametric representation of the causal models. We present the algorithm BGLM-OFU for Markovian BGLMs (i.e., no hidden variables) based on the maximum likelihood estimation method and give regret analysis for it. For the special case of linear models with hidden variables, we apply causal inference techniques such as the do calculus to convert the original model into a Markovian model, and then show that our BGLM-OFU algorithm and another algorithm based on the linear regression both solve such linear models with hidden variables. Our novelty includes (a) considering the combinatorial intervention action space and the general causal graph structures including ones with hidden variables, (b) integrating and adapting techniques from diverse studies such as generalized linear bandits and online influence maximization, and (c) avoiding unrealistic assumptions (such as knowing the joint distribution of the parents of Y under all interventions) and regret factors exponential to causal graph size in prior studies.", + "primary_area": "machine learning i", + "author": "Shi Feng; Wei Chen", + "authorids": "", + "aff": "Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China; Microsoft Research, Beijing, China", + "bibtex": "@article{Feng_Chen_2023, title={Combinatorial Causal Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25917}, DOI={10.1609/aaai.v37i6.25917}, abstractNote={In combinatorial causal bandits (CCB), the learning agent chooses at most K variables in each round to intervene, collects feedback from the observed variables, with the goal of minimizing expected regret on the target variable Y. We study under the context of binary generalized linear models (BGLMs) with a succinct parametric representation of the causal models. We present the algorithm BGLM-OFU for Markovian BGLMs (i.e., no hidden variables) based on the maximum likelihood estimation method and give regret analysis for it. For the special case of linear models with hidden variables, we apply causal inference techniques such as the do calculus to convert the original model into a Markovian model, and then show that our BGLM-OFU algorithm and another algorithm based on the linear regression both solve such linear models with hidden variables. Our novelty includes (a) considering the combinatorial intervention action space and the general causal graph structures including ones with hidden variables, (b) integrating and adapting techniques from diverse studies such as generalized linear bandits and online influence maximization, and (c) avoiding unrealistic assumptions (such as knowing the joint distribution of the parents of Y under all interventions) and regret factors exponential to causal graph size in prior studies.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feng, Shi and Chen, Wei}, year={2023}, month={Jun.}, pages={7550-7558} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25917/25689", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25917", + "pdf_size": 246980, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4305446196912835663&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "outlook.com;microsoft.com", + "email": "outlook.com;microsoft.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Tsinghua University;Microsoft Research", + "aff_unique_dep": "Institute for Interdisciplinary Information Sciences;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.microsoft.com/en-us/research/group/microsoft-research-asia", + "aff_unique_abbr": "Tsinghua;MSR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25693", + "title": "Combinatorial Civic Crowdfunding with Budgeted Agents: Welfare Optimality at Equilibrium and Optimal Deviation", + "track": "main", + "status": "Technical", + "abstract": "Civic Crowdfunding (CC) uses the ``power of the crowd\" to garner contributions towards public projects. As these projects are non-excludable, agents may prefer to ``free-ride,\" resulting in the project not being funded. Researchers introduce refunds for single project CC to incentivize agents to contribute, guaranteeing the project's funding. These funding guarantees are applicable only when agents have an unlimited budget. This paper focuses on a combinatorial setting, where multiple projects are available for CC and agents have a limited budget. We study specific conditions where funding can be guaranteed. Naturally, funding the optimal social welfare subset of projects is desirable when every available project cannot be funded due to budget restrictions. We prove the impossibility of achieving optimal welfare at equilibrium for any monotone refund scheme. Further, given the contributions of other agents, we prove that it is NP-Hard for an agent to determine its optimal strategy. That is, while profitable deviations may exist for agents instead of funding the optimal welfare subset, it is computationally hard for an agent to find its optimal deviation. Consequently, we study different heuristics agents can use to contribute to the projects in practice. We demonstrate the heuristics' performance as the average-case trade-off between the welfare obtained and an agent's utility through simulations.", + "primary_area": "game theory and economic paradigms", + "author": "Sankarshan Damle; Manisha Padala; Sujit Gujar", + "authorids": "", + "aff": "Machine Learning Lab, International Institute of Information Technology, Hyderabad; Machine Learning Lab, International Institute of Information Technology, Hyderabad; Machine Learning Lab, International Institute of Information Technology, Hyderabad", + "bibtex": "@article{Damle_Padala_Gujar_2023, title={Combinatorial Civic Crowdfunding with Budgeted Agents: Welfare Optimality at Equilibrium and Optimal Deviation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25693}, DOI={10.1609/aaai.v37i5.25693}, abstractNote={Civic Crowdfunding (CC) uses the ``power of the crowd" to garner contributions towards public projects. As these projects are non-excludable, agents may prefer to ``free-ride," resulting in the project not being funded. Researchers introduce refunds for single project CC to incentivize agents to contribute, guaranteeing the project\u2019s funding. These funding guarantees are applicable only when agents have an unlimited budget. This paper focuses on a combinatorial setting, where multiple projects are available for CC and agents have a limited budget. We study specific conditions where funding can be guaranteed. Naturally, funding the optimal social welfare subset of projects is desirable when every available project cannot be funded due to budget restrictions. We prove the impossibility of achieving optimal welfare at equilibrium for any monotone refund scheme. Further, given the contributions of other agents, we prove that it is NP-Hard for an agent to determine its optimal strategy. That is, while profitable deviations may exist for agents instead of funding the optimal welfare subset, it is computationally hard for an agent to find its optimal deviation. Consequently, we study different heuristics agents can use to contribute to the projects in practice. We demonstrate the heuristics\u2019 performance as the average-case trade-off between the welfare obtained and an agent\u2019s utility through simulations.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Damle, Sankarshan and Padala, Manisha and Gujar, Sujit}, year={2023}, month={Jun.}, pages={5582-5590} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25693/25465", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25693", + "pdf_size": 1294804, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15184617025036393691&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "research.iiit.ac.in;research.iiit.ac.in;iiit.ac.in", + "email": "research.iiit.ac.in;research.iiit.ac.in;iiit.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "International Institute of Information Technology", + "aff_unique_dep": "Machine Learning Lab", + "aff_unique_url": "https://iiit Hyderabad.ac.in", + "aff_unique_abbr": "IIIT Hyderabad", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hyderabad", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26352", + "title": "Combining Adversaries with Anti-adversaries in Training", + "track": "main", + "status": "Technical", + "abstract": "Adversarial training is an effective learning technique to improve the robustness of deep neural networks. In this study, the influence of adversarial training on deep learning models in terms of fairness, robustness, and generalization is theoretically investigated under more general perturbation scope that different samples can have different perturbation directions (the adversarial and anti-adversarial directions) and varied perturbation bounds. Our theoretical explorations suggest that the combination of adversaries and anti-adversaries (samples with anti-adversarial perturbations) in training can be more effective in achieving better fairness between classes and a better tradeoff between robustness and generalization in some typical learning scenarios (e.g., noisy label learning and imbalance learning) compared with standard adversarial training. On the basis of our theoretical findings, a more general learning objective that combines adversaries and anti-adversaries with varied bounds on each training sample is presented. Meta learning is utilized to optimize the combination weights. Experiments on benchmark datasets under different learning scenarios verify our theoretical findings and the effectiveness of the proposed methodology.", + "primary_area": "machine learning iv", + "author": "Xiaoling Zhou; Nan Yang; Ou Wu", + "authorids": "", + "aff": "Center for Applied Mathematics, Tianjin University, China; Center for Applied Mathematics, Tianjin University, China; Center for Applied Mathematics, Tianjin University, China", + "bibtex": "@article{Zhou_Yang_Wu_2023, title={Combining Adversaries with Anti-adversaries in Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26352}, DOI={10.1609/aaai.v37i9.26352}, abstractNote={Adversarial training is an effective learning technique to improve the robustness of deep neural networks. In this study, the influence of adversarial training on deep learning models in terms of fairness, robustness, and generalization is theoretically investigated under more general perturbation scope that different samples can have different perturbation directions (the adversarial and anti-adversarial directions) and varied perturbation bounds. Our theoretical explorations suggest that the combination of adversaries and anti-adversaries (samples with anti-adversarial perturbations) in training can be more effective in achieving better fairness between classes and a better tradeoff between robustness and generalization in some typical learning scenarios (e.g., noisy label learning and imbalance learning) compared with standard adversarial training. On the basis of our theoretical findings, a more general learning objective that combines adversaries and anti-adversaries with varied bounds on each training sample is presented. Meta learning is utilized to optimize the combination weights. Experiments on benchmark datasets under different learning scenarios verify our theoretical findings and the effectiveness of the proposed methodology.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Xiaoling and Yang, Nan and Wu, Ou}, year={2023}, month={Jun.}, pages={11435-11442} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26352/26124", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26352", + "pdf_size": 3137566, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14984211400672312840&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "Center for Applied Mathematics", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26815", + "title": "Combining Runtime Monitoring and Machine Learning with Human Feedback", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "State-of-the-art machine-learned controllers for autonomous systems demonstrate unbeatable performance in scenarios known from training. However, in evolving environments---changing weather or unexpected anomalies---, safety and interpretability remain the greatest challenges for autonomous systems to be reliable and are the urgent scientific challenges.\n\nExisting machine-learning approaches focus on recovering lost performance but leave the system open to potential safety violations. Formal methods address this problem by rigorously analysing a smaller representation of the system but they rarely prioritize performance of the controller. \n\nWe propose to combine insights from formal verification and runtime monitoring with interpretable machine-learning design for guaranteeing reliability of autonomous systems.", + "primary_area": "", + "author": "Anna Lukina", + "authorids": "", + "aff": "Delft University of Technology, The Netherlands", + "bibtex": "@article{Lukina_2024, title={Combining Runtime Monitoring and Machine Learning with Human Feedback}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26815}, DOI={10.1609/aaai.v37i13.26815}, abstractNote={State-of-the-art machine-learned controllers for autonomous systems demonstrate unbeatable performance in scenarios known from training. However, in evolving environments---changing weather or unexpected anomalies---, safety and interpretability remain the greatest challenges for autonomous systems to be reliable and are the urgent scientific challenges. Existing machine-learning approaches focus on recovering lost performance but leave the system open to potential safety violations. Formal methods address this problem by rigorously analysing a smaller representation of the system but they rarely prioritize performance of the controller. We propose to combine insights from formal verification and runtime monitoring with interpretable machine-learning design for guaranteeing reliability of autonomous systems.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lukina, Anna}, year={2024}, month={Jul.}, pages={15448-15448} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26815/26587", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26815", + "pdf_size": 95188, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:euMA-bECy-AJ:scholar.google.com/&scioq=Combining+Runtime+Monitoring+and+Machine+Learning+with+Human+Feedback&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "tudelft.nl", + "email": "tudelft.nl", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Delft University of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tudelft.nl", + "aff_unique_abbr": "TUDelft", + "aff_country_unique_index": "0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-25909", + "title": "Combining Slow and Fast: Complementary Filtering for Dynamics Learning", + "track": "main", + "status": "Technical", + "abstract": "Modeling an unknown dynamical system is crucial in order to predict the future behavior of the system. A standard approach is training recurrent models on measurement data. While these models typically provide exact short-term predictions, accumulating errors yield deteriorated long-term behavior. In contrast, models with reliable long-term predictions can often be obtained, either by training a robust but less detailed model, or by leveraging physics-based simulations. In both cases, inaccuracies in the models yield a lack of short-time details. Thus, different models with contrastive properties on different time horizons are available. This observation immediately raises the question: Can we obtain predictions that combine the best of both worlds? Inspired by sensor fusion tasks, we interpret the problem in the frequency domain and leverage classical methods from signal processing, in particular complementary filters. This filtering technique combines two signals by applying a high-pass filter to one signal, and low-pass filtering the other. Essentially, the high-pass filter extracts high-frequencies, whereas the low-pass filter extracts low frequencies. Applying this concept to dynamics model learning enables the construction of models that yield accurate long- and short-term predictions. Here, we propose two methods, one being purely learning-based and the other one being a hybrid model that requires an additional physics-based simulator.", + "primary_area": "machine learning i", + "author": "Katharina Ensinger; Sebastian Ziesche; Barbara Rakitsch; Michael Tiemann; Sebastian Trimpe", + "authorids": "", + "aff": "Bosch Center for Artificial Intelligence, Renningen, Germany+Institute for Data Science in Mechanical Engineering, RWTH Aachen University; Bosch Center for Artificial Intelligence, Renningen, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany; Institute for Data Science in Mechanical Engineering, RWTH Aachen University", + "bibtex": "@article{Ensinger_Ziesche_Rakitsch_Tiemann_Trimpe_2023, title={Combining Slow and Fast: Complementary Filtering for Dynamics Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25909}, DOI={10.1609/aaai.v37i6.25909}, abstractNote={Modeling an unknown dynamical system is crucial in order to predict the future behavior of the system. A standard approach is training recurrent models on measurement data. While these models typically provide exact short-term predictions, accumulating errors yield deteriorated long-term behavior. In contrast, models with reliable long-term predictions can often be obtained, either by training a robust but less detailed model, or by leveraging physics-based simulations. In both cases, inaccuracies in the models yield a lack of short-time details. Thus, different models with contrastive properties on different time horizons are available. This observation immediately raises the question: Can we obtain predictions that combine the best of both worlds? Inspired by sensor fusion tasks, we interpret the problem in the frequency domain and leverage classical methods from signal processing, in particular complementary filters. This filtering technique combines two signals by applying a high-pass filter to one signal, and low-pass filtering the other. Essentially, the high-pass filter extracts high-frequencies, whereas the low-pass filter extracts low frequencies. Applying this concept to dynamics model learning enables the construction of models that yield accurate long- and short-term predictions. Here, we propose two methods, one being purely learning-based and the other one being a hybrid model that requires an additional physics-based simulator.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ensinger, Katharina and Ziesche, Sebastian and Rakitsch, Barbara and Tiemann, Michael and Trimpe, Sebastian}, year={2023}, month={Jun.}, pages={7476-7484} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25909/25681", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25909", + "pdf_size": 3858935, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12138471640147698692&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "bosch.com;de.bosch.com;de.bosch.com;de.bosch.com;dsme.rwth-aachen.de", + "email": "bosch.com;de.bosch.com;de.bosch.com;de.bosch.com;dsme.rwth-aachen.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;1", + "aff_unique_norm": "Bosch Center for Artificial Intelligence;RWTH Aachen University", + "aff_unique_dep": "Artificial Intelligence;Institute for Data Science in Mechanical Engineering", + "aff_unique_url": "https://www.bosch-ai.com;https://www.rwth-aachen.de", + "aff_unique_abbr": "BCAI;RWTH", + "aff_campus_unique_index": "0+1;0;0;0;1", + "aff_campus_unique": "Renningen;Aachen", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25697", + "title": "Commitment Games with Conditional Information Disclosure", + "track": "main", + "status": "Technical", + "abstract": "The conditional commitment abilities of mutually transparent computer agents have been studied in previous work on commitment games and program equilibrium. This literature has shown how these abilities can help resolve Prisoner\u2019s Dilemmas and other failures of cooperation in complete information settings. But inefficiencies due to private information have been neglected thus far in this literature, despite the fact that these problems are pervasive and might also be addressed by greater mutual transparency. In this work, we introduce a framework for commitment games with a new kind of conditional commitment device, which agents can use to conditionally disclose private information. We prove a folk theorem for this setting that provides sufficient conditions for ex post efficiency, and thus represents a model of ideal cooperation between agents without a third-party mediator. Further, extending previous work on program equilibrium, we develop an implementation of conditional information disclosure. We show that this implementation forms program \u03b5-Bayesian Nash equilibria corresponding to the Bayesian Nash equilibria of these commitment games.", + "primary_area": "game theory and economic paradigms", + "author": "Anthony DiGiovanni; Jesse Clifton", + "authorids": "", + "aff": "Center on Long-Term Risk, London, UK; Center on Long-Term Risk, London, UK", + "bibtex": "@article{DiGiovanni_Clifton_2023, title={Commitment Games with Conditional Information Disclosure}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25697}, DOI={10.1609/aaai.v37i5.25697}, abstractNote={The conditional commitment abilities of mutually transparent computer agents have been studied in previous work on commitment games and program equilibrium. This literature has shown how these abilities can help resolve Prisoner\u2019s Dilemmas and other failures of cooperation in complete information settings. But inefficiencies due to private information have been neglected thus far in this literature, despite the fact that these problems are pervasive and might also be addressed by greater mutual transparency. In this work, we introduce a framework for commitment games with a new kind of conditional commitment device, which agents can use to conditionally disclose private information. We prove a folk theorem for this setting that provides sufficient conditions for ex post efficiency, and thus represents a model of ideal cooperation between agents without a third-party mediator. Further, extending previous work on program equilibrium, we develop an implementation of conditional information disclosure. We show that this implementation forms program \u03b5-Bayesian Nash equilibria corresponding to the Bayesian Nash equilibria of these commitment games.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={DiGiovanni, Anthony and Clifton, Jesse}, year={2023}, month={Jun.}, pages={5616-5623} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25697/25469", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25697", + "pdf_size": 165921, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1419705110298729446&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "longtermrisk.org;longtermrisk.org", + "email": "longtermrisk.org;longtermrisk.org", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Center on Long-Term Risk", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25791", + "title": "Common Knowledge of Abstract Groups", + "track": "main", + "status": "Technical", + "abstract": "Epistemic logics typically talk about knowledge of individual agents or groups of explicitly listed agents. Often, however, one wishes to express knowledge of groups of agents specified by a given property, as in \u2018it is common knowledge among economists\u2019. We introduce such a logic of common knowledge, which we term abstract-group epistemic logic (AGEL). That is, AGEL features a common knowledge operator for groups of agents given by concepts in a separate agent logic that we keep generic, with one possible agent logic being ALC. We show that AGEL is EXPTIME-complete, with the lower bound established by reduction from standard group epistemic logic, and the upper bound by a satisfiability-preserving embedding into the full \u00b5-calculus. Further main results include a finite model property (not enjoyed by the full \u00b5-calculus) and a complete axiomatization.", + "primary_area": "knowledge representation and reasoning", + "author": "Merlin Humml; Lutz Schr\u00f6der", + "authorids": "", + "aff": "Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg, Erlangen, Germany; Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg, Erlangen, Germany", + "bibtex": "@article{Humml_Schr\u00f6der_2023, title={Common Knowledge of Abstract Groups}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25791}, DOI={10.1609/aaai.v37i5.25791}, abstractNote={Epistemic logics typically talk about knowledge of individual agents or groups of explicitly listed agents. Often, however, one wishes to express knowledge of groups of agents specified by a given property, as in \u2018it is common knowledge among economists\u2019. We introduce such a logic of common knowledge, which we term abstract-group epistemic logic (AGEL). That is, AGEL features a common knowledge operator for groups of agents given by concepts in a separate agent logic that we keep generic, with one possible agent logic being ALC. We show that AGEL is EXPTIME-complete, with the lower bound established by reduction from standard group epistemic logic, and the upper bound by a satisfiability-preserving embedding into the full \u00b5-calculus. Further main results include a finite model property (not enjoyed by the full \u00b5-calculus) and a complete axiomatization.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Humml, Merlin and Schr\u00f6der, Lutz}, year={2023}, month={Jun.}, pages={6434-6441} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25791/25563", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25791", + "pdf_size": 166346, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2595213723726562589&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "fau.de;fau.de", + "email": "fau.de;fau.de", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg", + "aff_unique_dep": "", + "aff_unique_url": "https://www fau.de", + "aff_unique_abbr": "FAU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Erlangen", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25990", + "title": "Communication-Efficient Collaborative Best Arm Identification", + "track": "main", + "status": "Technical", + "abstract": "We investigate top-m arm identification, a basic problem in bandit theory, in a multi-agent learning model in which agents collaborate to learn an objective function. We are interested in designing collaborative learning algorithms that achieve maximum speedup (compared to single-agent learning algorithms) using minimum communication cost, as communication is frequently the bottleneck in multi-agent learning. We give both algorithmic and impossibility results, and conduct a set of experiments to demonstrate the effectiveness of our algorithms.", + "primary_area": "machine learning ii", + "author": "Nikolai Karpov; Qin Zhang", + "authorids": "", + "aff": "Indiana University Bloomington; Indiana University Bloomington", + "bibtex": "@article{Karpov_Zhang_2023, title={Communication-Efficient Collaborative Best Arm Identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25990}, DOI={10.1609/aaai.v37i7.25990}, abstractNote={We investigate top-m arm identification, a basic problem in bandit theory, in a multi-agent learning model in which agents collaborate to learn an objective function. We are interested in designing collaborative learning algorithms that achieve maximum speedup (compared to single-agent learning algorithms) using minimum communication cost, as communication is frequently the bottleneck in multi-agent learning. We give both algorithmic and impossibility results, and conduct a set of experiments to demonstrate the effectiveness of our algorithms.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Karpov, Nikolai and Zhang, Qin}, year={2023}, month={Jun.}, pages={8203-8210} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25990/25762", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25990", + "pdf_size": 336147, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9591656177766343199&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "iu.edu;indiana.edu", + "email": "iu.edu;indiana.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indiana University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.indiana.edu", + "aff_unique_abbr": "IU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Bloomington", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25327", + "title": "Compact Transformer Tracker with Correlative Masked Modeling", + "track": "main", + "status": "Technical", + "abstract": "Transformer framework has been showing superior performances in visual object tracking for its great strength in information aggregation across the template and search image with the well-known attention mechanism. Most recent advances focus on exploring attention mechanism variants for better information aggregation. We find these schemes are equivalent to or even just a subset of the basic self-attention mechanism. In this paper, we prove that the vanilla self-attention structure is sufficient for information aggregation, and structural adaption is unnecessary. The key is not the attention structure, but how to extract the discriminative feature for tracking and enhance the communication between the target and search image. Based on this finding, we adopt the basic vision transformer (ViT) architecture as our main tracker and concatenate the template and search image for feature embedding. To guide the encoder to capture the invariant feature for tracking, we attach a lightweight correlative masked decoder which reconstructs the original template and search image from the corresponding masked tokens. The correlative masked decoder serves as a plugin for the compact transformer tracker and is skipped in inference. Our compact tracker uses the most simple structure which only consists of a ViT backbone and a box head, and can run at 40 fps. Extensive experiments show the proposed compact transform tracker outperforms existing approaches, including advanced attention variants, and demonstrates the sufficiency of self-attention in tracking tasks. Our method achieves state-of-the-art performance on five challenging datasets, along with the VOT2020, UAV123, LaSOT, TrackingNet, and GOT-10k benchmarks. Our project is available at https://github.com/HUSTDML/CTTrack.", + "primary_area": "computer vision ii", + "author": "Zikai Song; Run Luo; Junqing Yu; Yi-Ping Phoebe Chen; Wei Yang", + "authorids": "", + "aff": "Huazhong University of Science and Technology, China; Huazhong University of Science and Technology, China; Huazhong University of Science and Technology, China; La Trobe University, Australia; Huazhong University of Science and Technology, China", + "bibtex": "@article{Song_Luo_Yu_Chen_Yang_2023, title={Compact Transformer Tracker with Correlative Masked Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25327}, DOI={10.1609/aaai.v37i2.25327}, abstractNote={Transformer framework has been showing superior performances in visual object tracking for its great strength in information aggregation across the template and search image with the well-known attention mechanism. Most recent advances focus on exploring attention mechanism variants for better information aggregation. We find these schemes are equivalent to or even just a subset of the basic self-attention mechanism. In this paper, we prove that the vanilla self-attention structure is sufficient for information aggregation, and structural adaption is unnecessary. The key is not the attention structure, but how to extract the discriminative feature for tracking and enhance the communication between the target and search image. Based on this finding, we adopt the basic vision transformer (ViT) architecture as our main tracker and concatenate the template and search image for feature embedding. To guide the encoder to capture the invariant feature for tracking, we attach a lightweight correlative masked decoder which reconstructs the original template and search image from the corresponding masked tokens. The correlative masked decoder serves as a plugin for the compact transformer tracker and is skipped in inference. Our compact tracker uses the most simple structure which only consists of a ViT backbone and a box head, and can run at 40 fps. Extensive experiments show the proposed compact transform tracker outperforms existing approaches, including advanced attention variants, and demonstrates the sufficiency of self-attention in tracking tasks. Our method achieves state-of-the-art performance on five challenging datasets, along with the VOT2020, UAV123, LaSOT, TrackingNet, and GOT-10k benchmarks. Our project is available at https://github.com/HUSTDML/CTTrack.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Zikai and Luo, Run and Yu, Junqing and Chen, Yi-Ping Phoebe and Yang, Wei}, year={2023}, month={Jun.}, pages={2321-2329} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25327/25099", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25327", + "pdf_size": 1471855, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7756320265092346449&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;latrobe.edu.au;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;latrobe.edu.au;hust.edu.cn", + "github": "https://github.com/HUSTDML/CTTrack", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Huazhong University of Science and Technology;La Trobe University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hust.edu.cn;https://www.latrobe.edu.au", + "aff_unique_abbr": "HUST;LTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26513", + "title": "Competition or Cooperation? Exploring Unlabeled Data via Challenging Minimax Game for Semi-supervised Relation Extraction", + "track": "main", + "status": "Technical", + "abstract": "Semi-Supervised Relation Extraction aims at learning well-performed RE models with limited labeled and large-scale unlabeled data. Existing methods mainly suffer from semantic drift and insufficient supervision, which severely limit the performance. To address these problems, recent work tends to design dual modules to work cooperatively for mutual enhancement. However, the consensus of two modules greatly restricts the model from exploring diverse relation expressions in unlabeled set, which hinders the performance as well as model generalization. To tackle this problem, in this paper, we propose a novel competition-based method AdvSRE. We set up a challenging minimax game on unlabeled data between two modules, Generator and Discriminator, and assign them with conflicting objectives. During the competition game, one module may find any possible chance to beat the other, which develops two modules' abilities until relation expressions cannot be further explored. To exploit label information, Discriminator is further asked to predict specific relation for each sentence. Experiment results on two benchmarks show new state-of-the-art performance over baselines, demonstrating the effectiveness of proposed AdvSRE.", + "primary_area": "speech natural language processing", + "author": "Yu Hong; Jiahang Li; Jianchuan Feng; Chenghua Huang; Zhixu Li; JIanfeng Qu; Yanghua Xiao; Wei Wang", + "authorids": "", + "aff": "Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; School of Computer Science and Technology, Soochow University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University + Fudan-Aishu Cognitive Intelligence Joint Research Center, Shanghai, China; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University", + "bibtex": "@article{Hong_Li_Feng_Huang_Li_Qu_Xiao_Wang_2023, title={Competition or Cooperation? Exploring Unlabeled Data via Challenging Minimax Game for Semi-supervised Relation Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26513}, DOI={10.1609/aaai.v37i11.26513}, abstractNote={Semi-Supervised Relation Extraction aims at learning well-performed RE models with limited labeled and large-scale unlabeled data. Existing methods mainly suffer from semantic drift and insufficient supervision, which severely limit the performance. To address these problems, recent work tends to design dual modules to work cooperatively for mutual enhancement. However, the consensus of two modules greatly restricts the model from exploring diverse relation expressions in unlabeled set, which hinders the performance as well as model generalization. To tackle this problem, in this paper, we propose a novel competition-based method AdvSRE. We set up a challenging minimax game on unlabeled data between two modules, Generator and Discriminator, and assign them with conflicting objectives. During the competition game, one module may find any possible chance to beat the other, which develops two modules\u2019 abilities until relation expressions cannot be further explored. To exploit label information, Discriminator is further asked to predict specific relation for each sentence. Experiment results on two benchmarks show new state-of-the-art performance over baselines, demonstrating the effectiveness of proposed AdvSRE.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hong, Yu and Li, Jiahang and Feng, Jianchuan and Huang, Chenghua and Li, Zhixu and Qu, JIanfeng and Xiao, Yanghua and Wang, Wei}, year={2023}, month={Jun.}, pages={12872-12880} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26513/26285", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26513", + "pdf_size": 1316753, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1166355168620787838&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 4, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;suda.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;suda.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1;0+0;0", + "aff_unique_norm": "Fudan University;Soochow University", + "aff_unique_dep": "School of Computer Science;School of Computer Science and Technology", + "aff_unique_url": "https://www.fudan.edu.cn;https://eng.suda.edu.cn/", + "aff_unique_abbr": "Fudan;Soochow U", + "aff_campus_unique_index": "0;0;0;0;0;0+0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25706", + "title": "Competition, Alignment, and Equilibria in Digital Marketplaces", + "track": "main", + "status": "Technical", + "abstract": "Competition between traditional platforms is known to improve user utility by aligning the platform's actions with user preferences. But to what extent is alignment exhibited in data-driven marketplaces? To study this question from a theoretical perspective, we introduce a duopoly market where platform actions are bandit algorithms and the two platforms compete for user participation. A salient feature of this market is that the quality of recommendations depends on both the bandit algorithm and the amount of data provided by interactions from users. This interdependency between the algorithm performance and the actions of users complicates the structure of market equilibria and their quality in terms of user utility. Our main finding is that competition in this market does not perfectly align market outcomes with user utility. Interestingly, market outcomes exhibit misalignment not only when the platforms have separate data repositories, but also when the platforms have a shared data repository. Nonetheless, the data sharing assumptions impact what mechanism drives misalignment and also affect the specific form of misalignment (e.g. the quality of the best-case and worst-case market outcomes). More broadly, our work illustrates that competition in digital marketplaces has subtle consequences for user utility that merit further investigation.", + "primary_area": "game theory and economic paradigms", + "author": "Meena Jagadeesan; Michael I. Jordan; Nika Haghtalab", + "authorids": "", + "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", + "bibtex": "@article{Jagadeesan_Jordan_Haghtalab_2023, title={Competition, Alignment, and Equilibria in Digital Marketplaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25706}, DOI={10.1609/aaai.v37i5.25706}, abstractNote={Competition between traditional platforms is known to improve user utility by aligning the platform\u2019s actions with user preferences. But to what extent is alignment exhibited in data-driven marketplaces? To study this question from a theoretical perspective, we introduce a duopoly market where platform actions are bandit algorithms and the two platforms compete for user participation. A salient feature of this market is that the quality of recommendations depends on both the bandit algorithm and the amount of data provided by interactions from users. This interdependency between the algorithm performance and the actions of users complicates the structure of market equilibria and their quality in terms of user utility. Our main finding is that competition in this market does not perfectly align market outcomes with user utility. Interestingly, market outcomes exhibit misalignment not only when the platforms have separate data repositories, but also when the platforms have a shared data repository. Nonetheless, the data sharing assumptions impact what mechanism drives misalignment and also affect the specific form of misalignment (e.g. the quality of the best-case and worst-case market outcomes). More broadly, our work illustrates that competition in digital marketplaces has subtle consequences for user utility that merit further investigation.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jagadeesan, Meena and Jordan, Michael I. and Haghtalab, Nika}, year={2023}, month={Jun.}, pages={5689-5696} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25706/25478", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25706", + "pdf_size": 162522, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4336128354648010621&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "berkeley.edu;cs.berkeley.edu;berkeley.edu", + "email": "berkeley.edu;cs.berkeley.edu;berkeley.edu", + "github": "", + "project": "https://arxiv.org/abs/2208.14423", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, Berkeley", + "aff_unique_dep": "", + "aff_unique_url": "https://www.berkeley.edu", + "aff_unique_abbr": "UC Berkeley", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25977", + "title": "Complement Sparsification: Low-Overhead Model Pruning for Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Federated Learning (FL) is a privacy-preserving distributed deep learning paradigm that involves substantial communication and computation effort, which is a problem for resource-constrained mobile and IoT devices. Model pruning/sparsification develops sparse models that could solve this problem, but existing sparsification solutions cannot satisfy at the same time the requirements for low bidirectional communication overhead between the server and the clients, low computation overhead at the clients, and good model accuracy, under the FL assumption that the server does not have access to raw data to fine-tune the pruned models. We propose Complement Sparsification (CS), a pruning mechanism that satisfies all these requirements through a complementary and collaborative pruning done at the server and the clients. At each round, CS creates a global sparse model that contains the weights that capture the general data distribution of all clients, while the clients create local sparse models with the weights pruned from the global model to capture the local trends. For improved model performance, these two types of complementary sparse models are aggregated into a dense model in each round, which is subsequently pruned in an iterative process. CS requires little computation overhead on the top of vanilla FL for both the server and the clients. We demonstrate that CS is an approximation of vanilla FL and, thus, its models perform well. We evaluate CS experimentally with two popular FL benchmark datasets. CS achieves substantial reduction in bidirectional communication, while achieving performance comparable with vanilla FL. In addition, CS outperforms baseline pruning mechanisms for FL.", + "primary_area": "machine learning ii", + "author": "Xiaopeng Jiang; Cristian Borcea", + "authorids": "", + "aff": "Department of Computer Science, New Jersey Institute of Technology, Newark, NJ, USA; Department of Computer Science, New Jersey Institute of Technology, Newark, NJ, USA", + "bibtex": "@article{Jiang_Borcea_2023, title={Complement Sparsification: Low-Overhead Model Pruning for Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25977}, DOI={10.1609/aaai.v37i7.25977}, abstractNote={Federated Learning (FL) is a privacy-preserving distributed deep learning paradigm that involves substantial communication and computation effort, which is a problem for resource-constrained mobile and IoT devices. Model pruning/sparsification develops sparse models that could solve this problem, but existing sparsification solutions cannot satisfy at the same time the requirements for low bidirectional communication overhead between the server and the clients, low computation overhead at the clients, and good model accuracy, under the FL assumption that the server does not have access to raw data to fine-tune the pruned models. We propose Complement Sparsification (CS), a pruning mechanism that satisfies all these requirements through a complementary and collaborative pruning done at the server and the clients. At each round, CS creates a global sparse model that contains the weights that capture the general data distribution of all clients, while the clients create local sparse models with the weights pruned from the global model to capture the local trends. For improved model performance, these two types of complementary sparse models are aggregated into a dense model in each round, which is subsequently pruned in an iterative process. CS requires little computation overhead on the top of vanilla FL for both the server and the clients. We demonstrate that CS is an approximation of vanilla FL and, thus, its models perform well. We evaluate CS experimentally with two popular FL benchmark datasets. CS achieves substantial reduction in bidirectional communication, while achieving performance comparable with vanilla FL. In addition, CS outperforms baseline pruning mechanisms for FL.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Xiaopeng and Borcea, Cristian}, year={2023}, month={Jun.}, pages={8087-8095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25977/25749", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25977", + "pdf_size": 274580, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17921462182247390894&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "njit.edu;njit.edu", + "email": "njit.edu;njit.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "New Jersey Institute of Technology", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.njit.edu", + "aff_unique_abbr": "NJIT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Newark", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25081", + "title": "Complex Dynamic Neurons Improved Spiking Transformer Network for Efficient Automatic Speech Recognition", + "track": "main", + "status": "Technical", + "abstract": "The spiking neural network (SNN) using leaky-integrated-and-fire (LIF) neurons has been commonly used in automatic speech recognition (ASR) tasks. However, the LIF neuron is still relatively simple compared to that in the biological brain. Further research on more types of neurons with different scales of neuronal dynamics is necessary. Here we introduce four types of neuronal dynamics to post-process the sequential patterns generated from the spiking transformer to get the complex dynamic neuron improved spiking transformer neural network (DyTr-SNN). We found that the DyTr-SNN could handle the non-toy automatic speech recognition task well, representing a lower phoneme error rate, lower computational cost, and higher robustness. These results indicate that the further cooperation of SNNs and neural dynamics at the neuron and network scales might have much in store for the future, especially on the ASR tasks.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Qingyu Wang; Tielin Zhang; Minglun Han; Yi Wang; Duzhen Zhang; Bo Xu", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Shanghai, China; Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Shanghai, China; Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Shanghai, China; School of Artificial Intelligence, Jilin University, Changchun, China; Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Shanghai, China; Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Shanghai, China", + "bibtex": "@article{Wang_Zhang_Han_Wang_Zhang_Xu_2023, title={Complex Dynamic Neurons Improved Spiking Transformer Network for Efficient Automatic Speech Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25081}, DOI={10.1609/aaai.v37i1.25081}, abstractNote={The spiking neural network (SNN) using leaky-integrated-and-fire (LIF) neurons has been commonly used in automatic speech recognition (ASR) tasks. However, the LIF neuron is still relatively simple compared to that in the biological brain. Further research on more types of neurons with different scales of neuronal dynamics is necessary. Here we introduce four types of neuronal dynamics to post-process the sequential patterns generated from the spiking transformer to get the complex dynamic neuron improved spiking transformer neural network (DyTr-SNN). We found that the DyTr-SNN could handle the non-toy automatic speech recognition task well, representing a lower phoneme error rate, lower computational cost, and higher robustness. These results indicate that the further cooperation of SNNs and neural dynamics at the neuron and network scales might have much in store for the future, especially on the ASR tasks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Qingyu and Zhang, Tielin and Han, Minglun and Wang, Yi and Zhang, Duzhen and Xu, Bo}, year={2023}, month={Jun.}, pages={102-109} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25081/24853", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25081", + "pdf_size": 1752231, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15014748921091666279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "ia.ac.cn;ia.ac.cn; ; ; ; ", + "email": "ia.ac.cn;ia.ac.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+0;0+1+0;0+1+0;2;0+1+0;0+1+0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Jilin University", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;http://www.jlu.edu.cn", + "aff_unique_abbr": "CAS;UCAS;JLU", + "aff_campus_unique_index": "0+0+1;0+0+1;0+0+1;2;0+0+1;0+0+1", + "aff_campus_unique": "Beijing;Shanghai;Changchun", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25692", + "title": "Complexity of Probabilistic Inference in Random Dichotomous Hedonic Games", + "track": "main", + "status": "Technical", + "abstract": "Hedonic games model cooperative games where agents desire to form coalitions, and only care about the composition of the coalitions of which they are members. Focusing on various classes of dichotomous hedonic games, where each agent either approves or disapproves a given coalition, we propose the random extension, where players have an independent participation probability. We initiate the research on the computational complexity of computing the probability that coalitions and partitions are optimal or stable. While some cases admit efficient algorithms (e.g., agents approve only few coalitions), they become computationally hard (#P-hard) in their complementary scenario. We then investigate the distribution of coalitions in perfect partitions and their performance in majority games, where an agent approves coalitions in which the agent is friends with the majority of its members. When friendships independently form with a constant probability, we prove that the number of coalitions of size 3 converges in distribution to a Poisson random variable.", + "primary_area": "game theory and economic paradigms", + "author": "Saar Cohen; Noa Agmon", + "authorids": "", + "aff": "Department of Computer Science, Bar-Ilan University, Israel; Department of Computer Science, Bar-Ilan University, Israel", + "bibtex": "@article{Cohen_Agmon_2023, title={Complexity of Probabilistic Inference in Random Dichotomous Hedonic Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25692}, DOI={10.1609/aaai.v37i5.25692}, abstractNote={Hedonic games model cooperative games where agents desire to form coalitions, and only care about the composition of the coalitions of which they are members. Focusing on various classes of dichotomous hedonic games, where each agent either approves or disapproves a given coalition, we propose the random extension, where players have an independent participation probability. We initiate the research on the computational complexity of computing the probability that coalitions and partitions are optimal or stable. While some cases admit efficient algorithms (e.g., agents approve only few coalitions), they become computationally hard (#P-hard) in their complementary scenario. We then investigate the distribution of coalitions in perfect partitions and their performance in majority games, where an agent approves coalitions in which the agent is friends with the majority of its members. When friendships independently form with a constant probability, we prove that the number of coalitions of size 3 converges in distribution to a Poisson random variable.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cohen, Saar and Agmon, Noa}, year={2023}, month={Jun.}, pages={5573-5581} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25692/25464", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25692", + "pdf_size": 178758, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14333146087356433075&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;cs.biu.ac.il", + "email": "gmail.com;cs.biu.ac.il", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Bar-Ilan University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.biu.ac.il", + "aff_unique_abbr": "BIU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25507", + "title": "Complexity of Reasoning with Cardinality Minimality Conditions", + "track": "main", + "status": "Technical", + "abstract": "Many AI-related reasoning problems are based on the problem of satisfiability of propositional formulas with some cardinality-minimality condition. While the complexity of the satisfiability problem (SAT) is well understood when considering systematically all fragments of propositional logic within Schaefer\u2019s framework, this is not the case when such minimality condition is added. We consider the CardMinSat problem, which asks, given a formula \u03c6 and an atom x, whether x is true in some cardinality-minimal model of \u03c6. We completely classify the computational complexity of the CardMinSat problem within Schaefer\u2019s framework, thus paving the way for a better understanding of the tractability frontier of many AI-related reasoning problems. To this end we use advanced algebraic tools.", + "primary_area": "constraint satisfaction and optimization", + "author": "Nadia Creignou; Fr\u00e9d\u00e9ric Olive; Johannes Schmidt", + "authorids": "", + "aff": "Aix Marseille Univ, CNRS, LIS, Marseille, France; Aix Marseille Univ, CNRS, LIS, Marseille, France; J\u00f6nk\u00f6ping University, Department of Computer Science and Informatics, School of Engineering, Sweden", + "bibtex": "@article{Creignou_Olive_Schmidt_2023, title={Complexity of Reasoning with Cardinality Minimality Conditions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25507}, DOI={10.1609/aaai.v37i4.25507}, abstractNote={Many AI-related reasoning problems are based on the problem of satisfiability of propositional formulas with some cardinality-minimality condition. While the complexity of the satisfiability problem (SAT) is well understood when considering systematically all fragments of propositional logic within Schaefer\u2019s framework, this is not the case when such minimality condition is added. We consider the CardMinSat problem, which asks, given a formula \u03c6 and an atom x, whether x is true in some cardinality-minimal model of \u03c6. We completely classify the computational complexity of the CardMinSat problem within Schaefer\u2019s framework, thus paving the way for a better understanding of the tractability frontier of many AI-related reasoning problems. To this end we use advanced algebraic tools.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Creignou, Nadia and Olive, Fr\u00e9d\u00e9ric and Schmidt, Johannes}, year={2023}, month={Jun.}, pages={3932-3940} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25507/25279", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25507", + "pdf_size": 207459, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:o8ZBhnYfsIMJ:scholar.google.com/&scioq=Complexity+of+Reasoning+with+Cardinality+Minimality+Conditions&hl=en&as_sdt=0,44", + "gs_version_total": 6, + "aff_domain": "lis-lab.fr;lis-lab.fr;ju.se", + "email": "lis-lab.fr;lis-lab.fr;ju.se", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Aix Marseille University;J\u00f6nk\u00f6ping University", + "aff_unique_dep": "Laboratoire d'Informatique et Syst\u00e8mes;Department of Computer Science and Informatics", + "aff_unique_url": "https://www.univ-amu.fr;https://www.ju.se", + "aff_unique_abbr": "AMU;JU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Marseille;", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "France;Sweden" + }, + { + "id": "article-25768", + "title": "Complexity of Safety and coSafety Fragments of Linear Temporal Logic", + "track": "main", + "status": "Technical", + "abstract": "Linear Temporal Logic (LTL) is the de-facto standard temporal logic for system specification, whose foundational properties have been studied for over five decades. Safety and cosafety properties of LTL define notable fragments of LTL, where a prefix of a trace suffices to establish whether a formula is true or not over that trace. In this paper, we study the complexity of the problems of satisfiability, validity, and realizability over infinite and finite traces for the safety and cosafety fragments of LTL. As for satisfiability and validity over infinite traces, we prove that the majority of the fragments have the same complexity as full LTL, that is, they are PSPACE-complete. The picture is radically different for realizability: we find fragments with the same expressive power whose complexity varies from 2EXPTIME-complete (as full LTL) to EXPTIME-complete. Notably, for all cosafety fragments, the complexity of the three problems does not change passing from infinite to finite traces, while for all safety fragments the complexity of satisfiability (resp., realizability) over finite traces drops to NP-complete (resp., \u03a0\u1d3e\u2082- complete).", + "primary_area": "knowledge representation and reasoning", + "author": "Alessandro Artale; Luca Geatti; Nicola Gigante; Andrea Mazzullo; Angelo Montanari", + "authorids": "", + "aff": "Free University of Bozen-Bolzano; University of Udine; Free University of Bozen-Bolzano; Free University of Bozen-Bolzano; University of Udine", + "bibtex": "@article{Artale_Geatti_Gigante_Mazzullo_Montanari_2023, title={Complexity of Safety and coSafety Fragments of Linear Temporal Logic}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25768}, DOI={10.1609/aaai.v37i5.25768}, abstractNote={Linear Temporal Logic (LTL) is the de-facto standard temporal logic for system specification, whose foundational properties have been studied for over five decades. Safety and cosafety properties of LTL define notable fragments of LTL, where a prefix of a trace suffices to establish whether a formula is true or not over that trace. In this paper, we study the complexity of the problems of satisfiability, validity, and realizability over infinite and finite traces for the safety and cosafety fragments of LTL. As for satisfiability and validity over infinite traces, we prove that the majority of the fragments have the same complexity as full LTL, that is, they are PSPACE-complete. The picture is radically different for realizability: we find fragments with the same expressive power whose complexity varies from 2EXPTIME-complete (as full LTL) to EXPTIME-complete. Notably, for all cosafety fragments, the complexity of the three problems does not change passing from infinite to finite traces, while for all safety fragments the complexity of satisfiability (resp., realizability) over finite traces drops to NP-complete (resp., \u03a0\u1d3e\u2082- complete).}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Artale, Alessandro and Geatti, Luca and Gigante, Nicola and Mazzullo, Andrea and Montanari, Angelo}, year={2023}, month={Jun.}, pages={6236-6244} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25768/25540", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25768", + "pdf_size": 149831, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13239227514011751692&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "inf.unibz.it;uniud.it;inf.unibz.it;inf.unibz.it;uniud.it", + "email": "inf.unibz.it;uniud.it;inf.unibz.it;inf.unibz.it;uniud.it", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "Free University of Bozen-Bolzano;University of Udine", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unibz.it;https://www.unidue.it", + "aff_unique_abbr": "UNIBZ;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26082", + "title": "Compositional Prototypical Networks for Few-Shot Classification", + "track": "main", + "status": "Technical", + "abstract": "It is assumed that pre-training provides the feature extractor with strong class transferability and that high novel class generalization can be achieved by simply reusing the transferable feature extractor. In this work, our motivation is to explicitly learn some fine-grained and transferable meta-knowledge so that feature reusability can be further improved. Concretely, inspired by the fact that humans can use learned concepts or components to help them recognize novel classes, we propose Compositional Prototypical Networks (CPN) to learn a transferable prototype for each human-annotated attribute, which we call a component prototype. We empirically demonstrate that the learned component prototypes have good class transferability and can be reused to construct compositional prototypes for novel classes. Then a learnable weight generator is utilized to adaptively fuse the compositional and visual prototypes. Extensive experiments demonstrate that our method can achieve state-of-the-art results on different datasets and settings. The performance gains are especially remarkable in the 5-way 1-shot setting. The code is available at https://github.com/fikry102/CPN.", + "primary_area": "machine learning ii", + "author": "Qiang Lyu; Weiqiang Wang", + "authorids": "", + "aff": "School of Computer Science and Technology, University of Chinese Academy of Sciences; School of Computer Science and Technology, University of Chinese Academy of Sciences", + "bibtex": "@article{Lyu_Wang_2023, title={Compositional Prototypical Networks for Few-Shot Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26082}, DOI={10.1609/aaai.v37i7.26082}, abstractNote={It is assumed that pre-training provides the feature extractor with strong class transferability and that high novel class generalization can be achieved by simply reusing the transferable feature extractor. In this work, our motivation is to explicitly learn some fine-grained and transferable meta-knowledge so that feature reusability can be further improved. Concretely, inspired by the fact that humans can use learned concepts or components to help them recognize novel classes, we propose Compositional Prototypical Networks (CPN) to learn a transferable prototype for each human-annotated attribute, which we call a component prototype. We empirically demonstrate that the learned component prototypes have good class transferability and can be reused to construct compositional prototypes for novel classes. Then a learnable weight generator is utilized to adaptively fuse the compositional and visual prototypes. Extensive experiments demonstrate that our method can achieve state-of-the-art results on different datasets and settings. The performance gains are especially remarkable in the 5-way 1-shot setting. The code is available at https://github.com/fikry102/CPN.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Qiang and Wang, Weiqiang}, year={2023}, month={Jun.}, pages={9011-9019} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26082/25854", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26082", + "pdf_size": 1055407, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15119194831375469105&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.ucas.ac.cn;ucas.ac.cn", + "email": "mails.ucas.ac.cn;ucas.ac.cn", + "github": "https://github.com/fikry102/CPN", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Chinese Academy of Sciences", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ucas.ac.cn", + "aff_unique_abbr": "UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25956", + "title": "Compressed Decentralized Learning of Conditional Mean Embedding Operators in Reproducing Kernel Hilbert Spaces", + "track": "main", + "status": "Technical", + "abstract": "Conditional mean embedding (CME) operators encode conditional probability densities within Reproducing Kernel Hilbert Space (RKHS). In this paper, we present a decentralized algorithm for a collection of agents to cooperatively approximate CME over a network. Communication constraints limit the agents from sending all data to their neighbors; we only allow sparse representations of covariance operators to be exchanged among agents, compositions of which defines CME. Using a coherence-based compression scheme, we present a consensus-type algorithm that preserves the average of the approximations of the covariance operators across the network. We theoretically prove that the iterative dynamics in RKHS is stable. We then empirically study our algorithm to estimate CMEs to learn spectra of Koopman operators for Markovian dynamical systems and to execute approximate value iteration for Markov decision processes (MDPs).", + "primary_area": "machine learning ii", + "author": "Boya Hou; Sina Sanjari; Nathan Dahlin; Subhonmesh Bose", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, University of Illinois Urbana-Champaign, Urbana, IL 61801; Department of Electrical and Computer Engineering, University of Illinois Urbana-Champaign, Urbana, IL 61801; Department of Electrical and Computer Engineering, University of Illinois Urbana-Champaign, Urbana, IL 61801; Department of Electrical and Computer Engineering, University of Illinois Urbana-Champaign, Urbana, IL 61801", + "bibtex": "@article{Hou_Sanjari_Dahlin_Bose_2023, title={Compressed Decentralized Learning of Conditional Mean Embedding Operators in Reproducing Kernel Hilbert Spaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25956}, DOI={10.1609/aaai.v37i7.25956}, abstractNote={Conditional mean embedding (CME) operators encode conditional probability densities within Reproducing Kernel Hilbert Space (RKHS). In this paper, we present a decentralized algorithm for a collection of agents to cooperatively approximate CME over a network. Communication constraints limit the agents from sending all data to their neighbors; we only allow sparse representations of covariance operators to be exchanged among agents, compositions of which defines CME. Using a coherence-based compression scheme, we present a consensus-type algorithm that preserves the average of the approximations of the covariance operators across the network. We theoretically prove that the iterative dynamics in RKHS is stable. We then empirically study our algorithm to estimate CMEs to learn spectra of Koopman operators for Markovian dynamical systems and to execute approximate value iteration for Markov decision processes (MDPs).}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hou, Boya and Sanjari, Sina and Dahlin, Nathan and Bose, Subhonmesh}, year={2023}, month={Jun.}, pages={7902-7909} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25956/25728", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25956", + "pdf_size": 1861393, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1423865981874568439&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Illinois Urbana-Champaign", + "aff_unique_dep": "Department of Electrical and Computer Engineering", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Urbana", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26537", + "title": "Compressed Heterogeneous Graph for Abstractive Multi-Document Summarization", + "track": "main", + "status": "Technical", + "abstract": "Multi-document summarization (MDS) aims to generate a summary for a number of related documents. We propose HGSum \u2014 an MDS model that extends an encoder-decoder architecture to incorporate a heterogeneous graph to represent different semantic units (e.g., words and sentences) of the documents. This contrasts with existing MDS models which do not consider different edge types of graphs and as such do not capture the diversity of relationships in the documents. To preserve only key information and relationships of the documents in the heterogeneous graph, HGSum uses graph pooling to compress the input graph. And to guide HGSum to learn the compression, we introduce an additional objective that maximizes the similarity between the compressed graph and the graph constructed from the ground-truth summary during training. HGSum is trained end-to-end with the graph similarity and standard cross-entropy objectives. Experimental results over Multi-News, WCEP-100, and Arxiv show that HGSum outperforms state-of-the-art MDS models. The code for our model and experiments is available at: https://github.com/oaimli/HGSum.", + "primary_area": "speech natural language processing", + "author": "Miao Li; Jianzhong Qi; Jey Han Lau", + "authorids": "", + "aff": "School of Computing and Information Systems, The University of Melbourne; School of Computing and Information Systems, The University of Melbourne; School of Computing and Information Systems, The University of Melbourne", + "bibtex": "@article{Li_Qi_Lau_2023, title={Compressed Heterogeneous Graph for Abstractive Multi-Document Summarization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26537}, DOI={10.1609/aaai.v37i11.26537}, abstractNote={Multi-document summarization (MDS) aims to generate a summary for a number of related documents. We propose HGSum \u2014 an MDS model that extends an encoder-decoder architecture to incorporate a heterogeneous graph to represent different semantic units (e.g., words and sentences) of the documents. This contrasts with existing MDS models which do not consider different edge types of graphs and as such do not capture the diversity of relationships in the documents. To preserve only key information and relationships of the documents in the heterogeneous graph, HGSum uses graph pooling to compress the input graph. And to guide HGSum to learn the compression, we introduce an additional objective that maximizes the similarity between the compressed graph and the graph constructed from the ground-truth summary during training. HGSum is trained end-to-end with the graph similarity and standard cross-entropy objectives. Experimental results over Multi-News, WCEP-100, and Arxiv show that HGSum outperforms state-of-the-art MDS models. The code for our model and experiments is available at: https://github.com/oaimli/HGSum.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Miao and Qi, Jianzhong and Lau, Jey Han}, year={2023}, month={Jun.}, pages={13085-13093} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26537/26309", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26537", + "pdf_size": 1337255, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3177395121768495820&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "github": "https://github.com/oaimli/HGSum", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The University of Melbourne", + "aff_unique_dep": "School of Computing and Information Systems", + "aff_unique_url": "https://www.unimelb.edu.au", + "aff_unique_abbr": "UniMelb", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Melbourne", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26857", + "title": "Compressing Cross-Lingual Multi-Task Models at Qualtrics", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Experience management is an emerging business area where organizations focus on understanding the feedback of customers and employees in order to improve their end-to-end experiences.\nThis results in a unique set of machine learning problems to help understand how people feel, discover issues they care about, and find which actions need to be taken on data that are different in content and distribution from traditional NLP domains.\nIn this paper, we present a case study of building text analysis applications that perform multiple classification tasks efficiently in 12 languages in the nascent business area of experience management.\nIn order to scale up modern ML methods on experience data, we leverage cross lingual and multi-task modeling techniques to consolidate our models into a single deployment to avoid overhead.\nWe also make use of model compression and model distillation to reduce overall inference latency and hardware cost to the level acceptable for business needs while maintaining model prediction quality.\nOur findings show that multi-task modeling improves task performance for a subset of experience management tasks in both XLM-R and mBert architectures.\nAmong the compressed architectures we explored, we found that MiniLM achieved the best compression/performance tradeoff.\nOur case study demonstrates a speedup of up to 15.61x with 2.60% average task degradation (or 3.29x speedup with 1.71% degradation) and estimated savings of 44% over using the original full-size model.\nThese results demonstrate a successful scaling up of text classification for the challenging new area of ML for experience management.", + "primary_area": "emerging applications of ai", + "author": "Daniel Campos; Daniel Perry; Samir Joshi; Yashmeet Gambhir; Wei Du; Zhengzheng Xing; Aaron Colak", + "authorids": "", + "aff": "Qualtrics*; Qualtrics; Qualtrics; Qualtrics; Qualtrics; Qualtrics; Qualtrics", + "bibtex": "@article{Campos_Perry_Joshi_Gambhir_Du_Xing_Colak_2024, title={Compressing Cross-Lingual Multi-Task Models at Qualtrics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26857}, DOI={10.1609/aaai.v37i13.26857}, abstractNote={Experience management is an emerging business area where organizations focus on understanding the feedback of customers and employees in order to improve their end-to-end experiences.\nThis results in a unique set of machine learning problems to help understand how people feel, discover issues they care about, and find which actions need to be taken on data that are different in content and distribution from traditional NLP domains.\nIn this paper, we present a case study of building text analysis applications that perform multiple classification tasks efficiently in 12 languages in the nascent business area of experience management.\nIn order to scale up modern ML methods on experience data, we leverage cross lingual and multi-task modeling techniques to consolidate our models into a single deployment to avoid overhead.\nWe also make use of model compression and model distillation to reduce overall inference latency and hardware cost to the level acceptable for business needs while maintaining model prediction quality.\nOur findings show that multi-task modeling improves task performance for a subset of experience management tasks in both XLM-R and mBert architectures.\nAmong the compressed architectures we explored, we found that MiniLM achieved the best compression/performance tradeoff.\nOur case study demonstrates a speedup of up to 15.61x with 2.60% average task degradation (or 3.29x speedup with 1.71% degradation) and estimated savings of 44% over using the original full-size model.\nThese results demonstrate a successful scaling up of text classification for the challenging new area of ML for experience management.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Campos, Daniel and Perry, Daniel and Joshi, Samir and Gambhir, Yashmeet and Du, Wei and Xing, Zhengzheng and Colak, Aaron}, year={2024}, month={Jul.}, pages={15661-15667} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26857/26629", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26857", + "pdf_size": 107965, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3474970017414628173&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "illinois.edu;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com", + "email": "illinois.edu;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com;qualtrics.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Qualtrics", + "aff_unique_dep": "", + "aff_unique_url": "https://www.qualtrics.com", + "aff_unique_abbr": "Qualtrics", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26304", + "title": "Compressing Transformers: Features Are Low-Rank, but Weights Are Not!", + "track": "main", + "status": "Technical", + "abstract": "Transformer and its variants achieve excellent results in various computer vision and natural language processing tasks, but high computational costs and reliance on large training datasets restrict their deployment in resource-constrained settings. Low-rank approximation of model weights has been effective in compressing CNN models, but its application to transformers has been less explored and is less effective. Existing methods require the complete dataset to fine-tune compressed models, which are both time-consuming and data-hungry. This paper reveals that the features (i.e., activations) are low-rank, but model weights are surprisingly not low-rank. Hence, AAFM is proposed, which adaptively determines the compressed model structure and locally compresses each linear layer's output features rather than the model weights. A second stage, GFM, optimizes the entire compressed network holistically. Both AAFM and GFM only use few training samples without labels, that is, they are few-shot, unsupervised, fast and effective. For example, with only 2K images without labels, 33% of the parameters are removed in DeiT-B with 18.8% relative throughput increase, but only a 0.23% accuracy loss for ImageNet recognition. The proposed methods are successfully applied to the language modeling task in NLP, too. Besides, the few-shot compressed models generalize well in downstream tasks.", + "primary_area": "machine learning iv", + "author": "Hao Yu; Jianxin Wu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Yu_Wu_2023, title={Compressing Transformers: Features Are Low-Rank, but Weights Are Not!}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26304}, DOI={10.1609/aaai.v37i9.26304}, abstractNote={Transformer and its variants achieve excellent results in various computer vision and natural language processing tasks, but high computational costs and reliance on large training datasets restrict their deployment in resource-constrained settings. Low-rank approximation of model weights has been effective in compressing CNN models, but its application to transformers has been less explored and is less effective. Existing methods require the complete dataset to fine-tune compressed models, which are both time-consuming and data-hungry. This paper reveals that the features (i.e., activations) are low-rank, but model weights are surprisingly not low-rank. Hence, AAFM is proposed, which adaptively determines the compressed model structure and locally compresses each linear layer\u2019s output features rather than the model weights. A second stage, GFM, optimizes the entire compressed network holistically. Both AAFM and GFM only use few training samples without labels, that is, they are few-shot, unsupervised, fast and effective. For example, with only 2K images without labels, 33% of the parameters are removed in DeiT-B with 18.8% relative throughput increase, but only a 0.23% accuracy loss for ImageNet recognition. The proposed methods are successfully applied to the language modeling task in NLP, too. Besides, the few-shot compressed models generalize well in downstream tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Hao and Wu, Jianxin}, year={2023}, month={Jun.}, pages={11007-11015} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26304/26076", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26304", + "pdf_size": 154024, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6208020967070454591&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff_domain": "lamda.nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26273", + "title": "Computably Continuous Reinforcement-Learning Objectives Are PAC-Learnable", + "track": "main", + "status": "Technical", + "abstract": "In reinforcement learning, the classic objectives of maximizing discounted and finite-horizon cumulative rewards are PAC-learnable: There are algorithms that learn a near-optimal policy with high probability using a finite amount of samples and computation. \nIn recent years, researchers have introduced objectives and corresponding reinforcement-learning algorithms beyond the classic cumulative rewards, such as objectives specified as linear temporal logic formulas. \nHowever, questions about the PAC-learnability of these new objectives have remained open.\n\n\nThis work demonstrates the PAC-learnability of general reinforcement-learning objectives through sufficient conditions for PAC-learnability in two analysis settings. \nIn particular, for the analysis that considers only sample complexity, we prove that if an objective given as an oracle is uniformly continuous, then it is PAC-learnable.\nFurther, for the analysis that considers computational complexity, we prove that if an objective is computable, then it is PAC-learnable. \nIn other words, if a procedure computes successive approximations of the objective's value, then the objective is PAC-learnable.\n\n\nWe give three applications of our condition on objectives from the literature with previously unknown PAC-learnability and prove that these objectives are PAC-learnable. \nOverall, our result helps verify existing objectives' PAC-learnability. \nAlso, as some studied objectives that are not uniformly continuous have been shown to be not PAC-learnable, our results could guide the design of new PAC-learnable objectives.", + "primary_area": "machine learning iv", + "author": "Cambridge Yang; Michael Littman; Michael Carbin", + "authorids": "", + "aff": "MIT; Brown University; MIT", + "bibtex": "@article{Yang_Littman_Carbin_2023, title={Computably Continuous Reinforcement-Learning Objectives Are PAC-Learnable}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26273}, DOI={10.1609/aaai.v37i9.26273}, abstractNote={In reinforcement learning, the classic objectives of maximizing discounted and finite-horizon cumulative rewards are PAC-learnable: There are algorithms that learn a near-optimal policy with high probability using a finite amount of samples and computation. In recent years, researchers have introduced objectives and corresponding reinforcement-learning algorithms beyond the classic cumulative rewards, such as objectives specified as linear temporal logic formulas. However, questions about the PAC-learnability of these new objectives have remained open. This work demonstrates the PAC-learnability of general reinforcement-learning objectives through sufficient conditions for PAC-learnability in two analysis settings. In particular, for the analysis that considers only sample complexity, we prove that if an objective given as an oracle is uniformly continuous, then it is PAC-learnable.\nFurther, for the analysis that considers computational complexity, we prove that if an objective is computable, then it is PAC-learnable. In other words, if a procedure computes successive approximations of the objective\u2019s value, then the objective is PAC-learnable. We give three applications of our condition on objectives from the literature with previously unknown PAC-learnability and prove that these objectives are PAC-learnable. Overall, our result helps verify existing objectives\u2019 PAC-learnability. Also, as some studied objectives that are not uniformly continuous have been shown to be not PAC-learnable, our results could guide the design of new PAC-learnable objectives.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Cambridge and Littman, Michael and Carbin, Michael}, year={2023}, month={Jun.}, pages={10729-10736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26273/26045", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26273", + "pdf_size": 210001, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:vteErzVhwGYJ:scholar.google.com/&scioq=Computably+Continuous+Reinforcement-Learning+Objectives+Are+PAC-Learnable&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff_domain": "csail.mit.edu;cs.brown.edu;csail.mit.edu", + "email": "csail.mit.edu;cs.brown.edu;csail.mit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Brown University", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.brown.edu", + "aff_unique_abbr": "MIT;Brown", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26443", + "title": "Computing Divergences between Discrete Decomposable Models", + "track": "main", + "status": "Technical", + "abstract": "There are many applications that benefit from computing the exact divergence between 2 discrete probability measures, including machine learning. Unfortunately, in the absence of any assumptions on the structure or independencies within these distributions, computing the divergence between them is an intractable problem in high dimensions. We show that we are able to compute a wide family of functionals and divergences, such as the alpha-beta divergence, between two decomposable models, i.e. chordal Markov networks, in time exponential to the treewidth of these models. The alpha-beta divergence is a family of divergences that include popular divergences such as the Kullback-Leibler divergence, the Hellinger distance, and the chi-squared divergence. Thus, we can accurately compute the exact values of any of this broad class of divergences to the extent to which we can accurately model the two distributions using decomposable models.", + "primary_area": "reasoning under uncertainty", + "author": "Loong Kuan Lee; Nico Piatkowski; Fran\u00e7ois Petitjean; Geoffrey I. Webb", + "authorids": "", + "aff": "Department of Data Science and AI, Monash University, Melbourne, Australia; Fraunhofer IAIS, Schloss Birlinghoven, 53757 Sankt Augustin, Germany; Department of Data Science and AI, Monash University, Melbourne, Australia; Department of Data Science and AI, Monash University, Melbourne, Australia", + "bibtex": "@article{Lee_Piatkowski_Petitjean_Webb_2023, title={Computing Divergences between Discrete Decomposable Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26443}, DOI={10.1609/aaai.v37i10.26443}, abstractNote={There are many applications that benefit from computing the exact divergence between 2 discrete probability measures, including machine learning. Unfortunately, in the absence of any assumptions on the structure or independencies within these distributions, computing the divergence between them is an intractable problem in high dimensions. We show that we are able to compute a wide family of functionals and divergences, such as the alpha-beta divergence, between two decomposable models, i.e. chordal Markov networks, in time exponential to the treewidth of these models. The alpha-beta divergence is a family of divergences that include popular divergences such as the Kullback-Leibler divergence, the Hellinger distance, and the chi-squared divergence. Thus, we can accurately compute the exact values of any of this broad class of divergences to the extent to which we can accurately model the two distributions using decomposable models.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Loong Kuan and Piatkowski, Nico and Petitjean, Fran\u00e7ois and Webb, Geoffrey I.}, year={2023}, month={Jun.}, pages={12243-12251} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26443/26215", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26443", + "pdf_size": 173861, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11365222646250058318&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "lklee.dev; ; ; ", + "email": "lklee.dev; ; ; ", + "github": "", + "project": "https://arxiv.org/abs/2112.04583", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Monash University;Fraunhofer Institute for Applied Information Technology", + "aff_unique_dep": "Department of Data Science and AI;", + "aff_unique_url": "https://www.monash.edu;https://www.iais.fraunhofer.de/", + "aff_unique_abbr": "Monash;Fraunhofer IAIS", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Melbourne;", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Australia;Germany" + }, + { + "id": "article-25598", + "title": "ConTextual Masked Auto-Encoder for Dense Passage Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Dense passage retrieval aims to retrieve the relevant passages of a query from a large corpus based on dense representations (i.e., vectors) of the query and the passages. Recent studies have explored improving pre-trained language models to boost dense retrieval performance. This paper proposes CoT-MAE (ConTextual Masked Auto-Encoder), a simple yet effective generative pre-training method for dense passage retrieval. CoT-MAE employs an asymmetric encoder-decoder architecture that learns to compress the sentence semantics into a dense vector through self-supervised and context-supervised masked auto-encoding. Precisely, self-supervised masked auto-encoding learns to model the semantics of the tokens inside a text span, and context-supervised masked auto-encoding learns to model the semantical correlation between the text spans. We conduct experiments on large-scale passage retrieval benchmarks and show considerable improvements over strong baselines, demonstrating the high efficiency of CoT-MAE. Our code is available at https://github.com/caskcsg/ir/tree/main/cotmae.", + "primary_area": "data mining and knowledge management", + "author": "Xing Wu; Guangyuan Ma; Meng Lin; Zijia Lin; Zhongyuan Wang; Songlin Hu", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences+Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences+Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Kuaishou Technology; Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences", + "bibtex": "@article{Wu_Ma_Lin_Lin_Wang_Hu_2023, title={ConTextual Masked Auto-Encoder for Dense Passage Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25598}, DOI={10.1609/aaai.v37i4.25598}, abstractNote={Dense passage retrieval aims to retrieve the relevant passages of a query from a large corpus based on dense representations (i.e., vectors) of the query and the passages. Recent studies have explored improving pre-trained language models to boost dense retrieval performance. This paper proposes CoT-MAE (ConTextual Masked Auto-Encoder), a simple yet effective generative pre-training method for dense passage retrieval. CoT-MAE employs an asymmetric encoder-decoder architecture that learns to compress the sentence semantics into a dense vector through self-supervised and context-supervised masked auto-encoding. Precisely, self-supervised masked auto-encoding learns to model the semantics of the tokens inside a text span, and context-supervised masked auto-encoding learns to model the semantical correlation between the text spans. We conduct experiments on large-scale passage retrieval benchmarks and show considerable improvements over strong baselines, demonstrating the high efficiency of CoT-MAE. Our code is available at https://github.com/caskcsg/ir/tree/main/cotmae.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xing and Ma, Guangyuan and Lin, Meng and Lin, Zijia and Wang, Zhongyuan and Hu, Songlin}, year={2023}, month={Jun.}, pages={4738-4746} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25598/25370", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25598", + "pdf_size": 631267, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10192904752819550023&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn;tsinghua.org.cn;kuaishou.com;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn;tsinghua.org.cn;kuaishou.com;iie.ac.cn", + "github": "https://github.com/caskcsg/ir/tree/main/cotmae", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0+1+2;0+1;2;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Kuaishou Technology", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.kuaishou.com", + "aff_unique_abbr": "CAS;UCAS;Kuaishou", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27057", + "title": "ConceptX: A Framework for Latent Concept Analysis", + "track": "demonstrations", + "status": "Technical", + "abstract": "The opacity of deep neural networks remains a challenge in deploying solutions where explanation is as important as precision. We present ConceptX, a human-in-the-loop framework for interpreting and annotating latent representational space in pre-trained Language Models (pLMs). We use an unsupervised method to discover concepts learned in these models and enable a graphical interface for humans to generate explanations for the concepts. To facilitate the process, we provide auto-annotations of the concepts (based on traditional linguistic ontologies). Such annotations enable development of a linguistic resource that directly represents latent concepts learned within deep NLP models. These include not just traditional linguistic concepts, but also task-specific or sensitive concepts (words grouped based on gender or religious connotation) that helps the annotators to mark bias in the model. The framework consists of two parts (i) concept discovery and (ii) annotation platform.", + "primary_area": "", + "author": "Firoj Alam; Fahim Dalvi; Nadir Durrani; Hassan Sajjad; Abdul Rafae Khan; Jia Xu", + "authorids": "", + "aff": "Qatar Computing Research Institute, HBKU Research Complex, Qatar; Qatar Computing Research Institute, HBKU Research Complex, Qatar; Qatar Computing Research Institute, HBKU Research Complex, Qatar; Faculty of Computer Science, Dalhousie University, Canada; School of Engineering and Science, Stevens Institute of Technology, USA; School of Engineering and Science, Stevens Institute of Technology, USA", + "bibtex": "@article{Alam_Dalvi_Durrani_Sajjad_Khan_Xu_2024, title={ConceptX: A Framework for Latent Concept Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27057}, DOI={10.1609/aaai.v37i13.27057}, abstractNote={The opacity of deep neural networks remains a challenge in deploying solutions where explanation is as important as precision. We present ConceptX, a human-in-the-loop framework for interpreting and annotating latent representational space in pre-trained Language Models (pLMs). We use an unsupervised method to discover concepts learned in these models and enable a graphical interface for humans to generate explanations for the concepts. To facilitate the process, we provide auto-annotations of the concepts (based on traditional linguistic ontologies). Such annotations enable development of a linguistic resource that directly represents latent concepts learned within deep NLP models. These include not just traditional linguistic concepts, but also task-specific or sensitive concepts (words grouped based on gender or religious connotation) that helps the annotators to mark bias in the model. The framework consists of two parts (i) concept discovery and (ii) annotation platform.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alam, Firoj and Dalvi, Fahim and Durrani, Nadir and Sajjad, Hassan and Khan, Abdul Rafae and Xu, Jia}, year={2024}, month={Jul.}, pages={16395-16397} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27057/26829", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27057", + "pdf_size": 1203126, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13300577687471059024&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "hbku.edu.qa;hbku.edu.qa;hbku.edu.qa;dal.ca;stevens.edu;stevens.edu", + "email": "hbku.edu.qa;hbku.edu.qa;hbku.edu.qa;dal.ca;stevens.edu;stevens.edu", + "github": "https://github.com/hsajjad/ConceptX", + "project": "https://micromappers.qcri.org/", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;2", + "aff_unique_norm": "Qatar Computing Research Institute;Dalhousie University;Stevens Institute of Technology", + "aff_unique_dep": ";Faculty of Computer Science;School of Engineering and Science", + "aff_unique_url": "https://www.qcri.org;https://www.dal.ca;https://www.stevens.edu", + "aff_unique_abbr": "QCRI;Dal;SIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "HBKU Research Complex;", + "aff_country_unique_index": "0;0;0;1;2;2", + "aff_country_unique": "Qatar;Canada;United States" + }, + { + "id": "article-26129", + "title": "Conceptual Reinforcement Learning for Language-Conditioned Tasks", + "track": "main", + "status": "Technical", + "abstract": "Despite the broad application of deep reinforcement learning (RL), transferring and adapting the policy to unseen but similar environments is still a significant challenge. Recently, the language-conditioned policy is proposed to facilitate policy transfer through learning the joint representation of observation and text that catches the compact and invariant information across various environments. Existing studies of language-conditioned RL methods often learn the joint representation as a simple latent layer for the given instances (episode-specific observation and text), which inevitably includes noisy or irrelevant information and cause spurious correlations that are dependent on instances, thus hurting generalization performance and training efficiency. To address the above issue, we propose a conceptual reinforcement learning (CRL) framework to learn the concept-like joint representation for language-conditioned policy. The key insight is that concepts are compact and invariant representations in human cognition through extracting similarities from numerous instances in real-world. In CRL, we propose a multi-level attention encoder and two mutual information constraints for learning compact and invariant concepts. Verified in two challenging environments, RTFM and Messenger, CRL significantly improves the training efficiency (up to 70%) and generalization ability (up to 30%) to the new environment dynamics.", + "primary_area": "machine learning iii", + "author": "Shaohui Peng; Xing Hu; Rui Zhang; Jiaming Guo; Qi Yi; Ruizhi Chen; Zidong Du; Ling Li; Qi Guo; Yunji Chen", + "authorids": "", + "aff": "SKL of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies; SKL of Processors, Institute of Computing Technology, CAS; SKL of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies; SKL of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies + University of Science and Technology of China; SKL of Processors, Institute of Computing Technology, CAS + Cambricon Technologies + University of Science and Technology of China; University of Chinese Academy of Sciences + SKL of Computer Science, Institute of Software, CAS; SKL of Processors, Institute of Computing Technology, CAS + Cambricon Technologies; University of Chinese Academy of Sciences + SKL of Computer Science, Institute of Software, CAS; SKL of Processors, Institute of Computing Technology, CAS; SKL of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences", + "bibtex": "@article{Peng_Hu_Zhang_Guo_Yi_Chen_Du_Li_Guo_Chen_2023, title={Conceptual Reinforcement Learning for Language-Conditioned Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26129}, DOI={10.1609/aaai.v37i8.26129}, abstractNote={Despite the broad application of deep reinforcement learning (RL), transferring and adapting the policy to unseen but similar environments is still a significant challenge. Recently, the language-conditioned policy is proposed to facilitate policy transfer through learning the joint representation of observation and text that catches the compact and invariant information across various environments. Existing studies of language-conditioned RL methods often learn the joint representation as a simple latent layer for the given instances (episode-specific observation and text), which inevitably includes noisy or irrelevant information and cause spurious correlations that are dependent on instances, thus hurting generalization performance and training efficiency. To address the above issue, we propose a conceptual reinforcement learning (CRL) framework to learn the concept-like joint representation for language-conditioned policy. The key insight is that concepts are compact and invariant representations in human cognition through extracting similarities from numerous instances in real-world. In CRL, we propose a multi-level attention encoder and two mutual information constraints for learning compact and invariant concepts. Verified in two challenging environments, RTFM and Messenger, CRL significantly improves the training efficiency (up to 70%) and generalization ability (up to 30%) to the new environment dynamics.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Shaohui and Hu, Xing and Zhang, Rui and Guo, Jiaming and Yi, Qi and Chen, Ruizhi and Du, Zidong and Li, Ling and Guo, Qi and Chen, Yunji}, year={2023}, month={Jun.}, pages={9426-9434} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26129/25901", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26129", + "pdf_size": 2540676, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=614655736585767146&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;mail.ustc.edu.cn;iscas.ac.cn;iscas.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;mail.ustc.edu.cn;iscas.ac.cn;iscas.ac.cn", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1+2;0;0+1+2;0+1+2+3;0+2+3;1+4;0+2;1+4;0;0+1", + "aff_unique_norm": "Institute of Computing Technology;University of Chinese Academy of Sciences;Cambricon Technologies;University of Science and Technology of China;Chinese Academy of Sciences", + "aff_unique_dep": "SKL of Processors;;;;SKL of Computer Science", + "aff_unique_url": "http://www.ict.ac.cn;http://www.ucas.ac.cn;https://www.cambricon.com;http://www.ustc.edu.cn;http://www.ios.ac.cn", + "aff_unique_abbr": "ICT;UCAS;;USTC;CAS", + "aff_campus_unique_index": ";;;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0;0+0+0;0+0+0+0;0+0+0;0+0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26172", + "title": "Concurrent Multi-Label Prediction in Event Streams", + "track": "main", + "status": "Technical", + "abstract": "Streams of irregularly occurring events are commonly modeled as a marked temporal point process. Many real-world datasets such as e-commerce transactions and electronic health records often involve events where multiple event types co-occur, e.g. multiple items purchased or multiple diseases diagnosed simultaneously. In this paper, we tackle multi-label prediction in such a problem setting, and propose a novel Transformer-based Conditional Mixture of Bernoulli Network (TCMBN) that leverages neural density estimation to capture complex temporal dependence as well as probabilistic dependence between concurrent event types. We also propose potentially incorporating domain knowledge in the objective by regularizing the predicted probability. To represent probabilistic dependence of concurrent event types graphically, we design a two-step approach that first learns the mixture of Bernoulli network and then solves a least-squares semi-definite constrained program to numerically approximate the sparse precision matrix from a learned covariance matrix. This approach proves to be effective for event prediction while also providing an interpretable and possibly non-stationary structure for insights into event co-occurrence. We demonstrate the superior performance of our approach compared to existing baselines on multiple synthetic and real benchmarks.", + "primary_area": "machine learning iii", + "author": "Xiao Shou; Tian Gao; Dharmashankar Subramanian; Debarun Bhattacharjya; Kristin P. Bennett", + "authorids": "", + "aff": "Department of Mathematical Sciences, Rensselaer Polytechnic Institute + Department of Computer Science, Rensselaer Polytechnic Institute; Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY , USA; Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY , USA; Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY , USA; Department of Mathematical Sciences, Rensselaer Polytechnic Institute + Department of Computer Science, Rensselaer Polytechnic Institute", + "bibtex": "@article{Shou_Gao_Subramanian_Bhattacharjya_Bennett_2023, title={Concurrent Multi-Label Prediction in Event Streams}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26172}, DOI={10.1609/aaai.v37i8.26172}, abstractNote={Streams of irregularly occurring events are commonly modeled as a marked temporal point process. Many real-world datasets such as e-commerce transactions and electronic health records often involve events where multiple event types co-occur, e.g. multiple items purchased or multiple diseases diagnosed simultaneously. In this paper, we tackle multi-label prediction in such a problem setting, and propose a novel Transformer-based Conditional Mixture of Bernoulli Network (TCMBN) that leverages neural density estimation to capture complex temporal dependence as well as probabilistic dependence between concurrent event types. We also propose potentially incorporating domain knowledge in the objective by regularizing the predicted probability. To represent probabilistic dependence of concurrent event types graphically, we design a two-step approach that first learns the mixture of Bernoulli network and then solves a least-squares semi-definite constrained program to numerically approximate the sparse precision matrix from a learned covariance matrix. This approach proves to be effective for event prediction while also providing an interpretable and possibly non-stationary structure for insights into event co-occurrence. We demonstrate the superior performance of our approach compared to existing baselines on multiple synthetic and real benchmarks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shou, Xiao and Gao, Tian and Subramanian, Dharmashankar and Bhattacharjya, Debarun and Bennett, Kristin P.}, year={2023}, month={Jun.}, pages={9820-9828} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26172/25944", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26172", + "pdf_size": 584946, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=743360099405134944&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;1;1;0+0", + "aff_unique_norm": "Rensselaer Polytechnic Institute;IBM T. J. Watson Research Center", + "aff_unique_dep": "Department of Mathematical Sciences;Research AI", + "aff_unique_url": "https://www.rpi.edu;https://www.ibm.com/research/watson", + "aff_unique_abbr": "RPI;IBM Watson", + "aff_campus_unique_index": ";1;1;1;", + "aff_campus_unique": ";Yorktown Heights", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25549", + "title": "Conditional Diffusion Based on Discrete Graph Structures for Molecular Graph Generation", + "track": "main", + "status": "Technical", + "abstract": "Learning the underlying distribution of molecular graphs and generating high-fidelity samples is a fundamental research problem in drug discovery and material science. However, accurately modeling distribution and rapidly generating novel molecular graphs remain crucial and challenging goals. To accomplish these goals, we propose a novel Conditional Diffusion model based on discrete Graph Structures (CDGS) for molecular graph generation. Specifically, we construct a forward graph diffusion process on both graph structures and inherent features through stochastic differential equations (SDE) and derive discrete graph structures as the condition for reverse generative processes. We present a specialized hybrid graph noise prediction model that extracts the global context and the local node-edge dependency from intermediate graph states. We further utilize ordinary differential equation (ODE) solvers for efficient graph sampling, based on the semi-linear structure of the probability flow ODE. We also combine the solvers with gradient guidance from the molecule property predictor for similarity-constrained molecule optimization. Experiments on diverse datasets validate the effectiveness of our framework. Particularly, the proposed method still generates high-quality molecular graphs in a limited number of steps.", + "primary_area": "data mining and knowledge management", + "author": "Han Huang; Leilei Sun; Bowen Du; Weifeng Lv", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University, China; State Key Laboratory of Software Development Environment, Beihang University, China; State Key Laboratory of Software Development Environment, Beihang University, China; State Key Laboratory of Software Development Environment, Beihang University, China", + "bibtex": "@article{Huang_Sun_Du_Lv_2023, title={Conditional Diffusion Based on Discrete Graph Structures for Molecular Graph Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25549}, DOI={10.1609/aaai.v37i4.25549}, abstractNote={Learning the underlying distribution of molecular graphs and generating high-fidelity samples is a fundamental research problem in drug discovery and material science. However, accurately modeling distribution and rapidly generating novel molecular graphs remain crucial and challenging goals. To accomplish these goals, we propose a novel Conditional Diffusion model based on discrete Graph Structures (CDGS) for molecular graph generation. Specifically, we construct a forward graph diffusion process on both graph structures and inherent features through stochastic differential equations (SDE) and derive discrete graph structures as the condition for reverse generative processes. We present a specialized hybrid graph noise prediction model that extracts the global context and the local node-edge dependency from intermediate graph states. We further utilize ordinary differential equation (ODE) solvers for efficient graph sampling, based on the semi-linear structure of the probability flow ODE. We also combine the solvers with gradient guidance from the molecule property predictor for similarity-constrained molecule optimization. Experiments on diverse datasets validate the effectiveness of our framework. Particularly, the proposed method still generates high-quality molecular graphs in a limited number of steps.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Han and Sun, Leilei and Du, Bowen and Lv, Weifeng}, year={2023}, month={Jun.}, pages={4302-4311} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25549/25321", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25549", + "pdf_size": 1931729, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5364445277106171680&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "State Key Laboratory of Software Development Environment", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "Beihang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25789", + "title": "Conditional Syntax Splitting for Non-monotonic Inference Operators", + "track": "main", + "status": "Technical", + "abstract": "Syntax splitting is a property of inductive inference operators that ensures we can restrict our attention to parts of the conditional belief base that share atoms with a given query. To apply syntax splitting, a conditional belief base needs to consist of syntactically disjoint conditionals. This requirement is often too strong in practice, as conditionals might share atoms. In this paper we introduce the concept of conditional syntax splitting, inspired by the notion of conditional independence as known from probability theory. We show that lexicographic inference and system W satisfy conditional syntax splitting, and connect conditional syntax splitting to several known properties from the literature on non-monotonic reasoning, including the drowning effect.", + "primary_area": "knowledge representation and reasoning", + "author": "Jesse Heyninck; Gabriele Kern-Isberner; Thomas Meyer; Jonas Philipp Haldimann; Christoph Beierle", + "authorids": "", + "aff": "Open Universiteit, the Netherlands; Technische Universit\u00e4t Dortmund, Germany; University of Cape Town and CAIR, South-Africa; FernUniversit\u00e4t in Hagen, Germany; FernUniversit\u00e4t in Hagen, Germany", + "bibtex": "@article{Heyninck_Kern-Isberner_Meyer_Haldimann_Beierle_2023, title={Conditional Syntax Splitting for Non-monotonic Inference Operators}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25789}, DOI={10.1609/aaai.v37i5.25789}, abstractNote={Syntax splitting is a property of inductive inference operators that ensures we can restrict our attention to parts of the conditional belief base that share atoms with a given query. To apply syntax splitting, a conditional belief base needs to consist of syntactically disjoint conditionals. This requirement is often too strong in practice, as conditionals might share atoms. In this paper we introduce the concept of conditional syntax splitting, inspired by the notion of conditional independence as known from probability theory. We show that lexicographic inference and system W satisfy conditional syntax splitting, and connect conditional syntax splitting to several known properties from the literature on non-monotonic reasoning, including the drowning effect.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Heyninck, Jesse and Kern-Isberner, Gabriele and Meyer, Thomas and Haldimann, Jonas Philipp and Beierle, Christoph}, year={2023}, month={Jun.}, pages={6416-6424} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25789/25561", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25789", + "pdf_size": 161613, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12547435014237327141&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ou.nl; ; ; ; ", + "email": "ou.nl; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;3", + "aff_unique_norm": "Open Universiteit;Technische Universit\u00e4t Dortmund;University of Cape Town;FernUniversit\u00e4t in Hagen", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.openuniversiteit.nl/;https://www.tu-dortmund.de;https://www.uct.ac.za;https://www.fernuni-hagen.de", + "aff_unique_abbr": "OU;TUDo;UCT;FUH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;1;1", + "aff_country_unique": "the Netherlands;Germany;South Africa" + }, + { + "id": "article-25968", + "title": "Confidence-Aware Training of Smoothed Classifiers for Certified Robustness", + "track": "main", + "status": "Technical", + "abstract": "Any classifier can be \"smoothed out\" under Gaussian noise to build a new classifier that is provably robust to l2-adversarial perturbations, viz., by averaging its predictions over the noise via randomized smoothing. Under the smoothed classifiers, the fundamental trade-off between accuracy and (adversarial) robustness has been well evidenced in the literature: i.e., increasing the robustness of a classifier for an input can be at the expense of decreased accuracy for some other inputs. In this paper, we propose a simple training method leveraging this trade-off to obtain robust smoothed classifiers, in particular, through a sample-wise control of robustness over the training samples. We make this control feasible by using \"accuracy under Gaussian noise\" as an easy-to-compute proxy of adversarial robustness for an input. Specifically, we differentiate the training objective depending on this proxy to filter out samples that are unlikely to benefit from the worst-case (adversarial) objective. Our experiments show that the proposed method, despite its simplicity, consistently exhibits improved certified robustness upon state-of-the-art training methods. Somewhat surprisingly, we find these improvements persist even for other notions of robustness, e.g., to various types of common corruptions. Code is available at https://github.com/alinlab/smoothing-catrs.", + "primary_area": "machine learning ii", + "author": "Jongheon Jeong; Seojin Kim; Jinwoo Shin", + "authorids": "", + "aff": "Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST)", + "bibtex": "@article{Jeong_Kim_Shin_2023, title={Confidence-Aware Training of Smoothed Classifiers for Certified Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25968}, DOI={10.1609/aaai.v37i7.25968}, abstractNote={Any classifier can be "smoothed out" under Gaussian noise to build a new classifier that is provably robust to l2-adversarial perturbations, viz., by averaging its predictions over the noise via randomized smoothing. Under the smoothed classifiers, the fundamental trade-off between accuracy and (adversarial) robustness has been well evidenced in the literature: i.e., increasing the robustness of a classifier for an input can be at the expense of decreased accuracy for some other inputs. In this paper, we propose a simple training method leveraging this trade-off to obtain robust smoothed classifiers, in particular, through a sample-wise control of robustness over the training samples. We make this control feasible by using "accuracy under Gaussian noise" as an easy-to-compute proxy of adversarial robustness for an input. Specifically, we differentiate the training objective depending on this proxy to filter out samples that are unlikely to benefit from the worst-case (adversarial) objective. Our experiments show that the proposed method, despite its simplicity, consistently exhibits improved certified robustness upon state-of-the-art training methods. Somewhat surprisingly, we find these improvements persist even for other notions of robustness, e.g., to various types of common corruptions. Code is available at https://github.com/alinlab/smoothing-catrs.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jeong, Jongheon and Kim, Seojin and Shin, Jinwoo}, year={2023}, month={Jun.}, pages={8005-8013} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25968/25740", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25968", + "pdf_size": 262516, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15446203807550726615&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "https://github.com/alinlab/smoothing-catrs", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26771", + "title": "Conflicting Interactions among Protection Mechanisms for Machine Learning Models", + "track": "aaai special track", + "status": "Technical", + "abstract": "Nowadays, systems based on machine learning (ML) are widely used in different domains.\nGiven their popularity, ML models have become targets for various attacks.\nAs a result, research at the intersection of security/privacy and ML has flourished. Typically such work has focused on individual types of security/privacy concerns and mitigations thereof.\n\nHowever, in real-life deployments, an ML model will need to be protected against several concerns simultaneously.\nA protection mechanism optimal for a specific security or privacy concern may interact negatively with mechanisms intended to address other concerns. Despite its practical relevance, the potential for such conflicts has not been studied adequately.\n\nIn this work, we first provide a framework for analyzing such conflicting interactions.\nWe then focus on systematically analyzing pairwise interactions between protection mechanisms for one concern, model and data ownership verification, with two other classes of ML protection mechanisms: differentially private training, and robustness against model evasion.\nWe find that several pairwise interactions result in conflicts.\n\nWe also explore potential approaches for avoiding such conflicts. First, we study the effect of hyperparameter relaxations, finding that there is no sweet spot balancing the performance of both protection mechanisms.\nSecond, we explore whether modifying one type of protection mechanism (ownership verification) so as to decouple it from factors that may be impacted by a conflicting mechanism (differentially private training or robustness to model evasion) can avoid conflict.\nWe show that this approach can indeed avoid the conflict between ownership verification mechanisms when combined with differentially private training, but has no effect on robustness to model evasion. We conclude by identifying the gaps in the landscape of studying interactions between other types of ML protection mechanisms.", + "primary_area": "safe and robust ai", + "author": "Sebastian Szyller; N. Asokan", + "authorids": "", + "aff": "Aalto University; University of Waterloo + Aalto University", + "bibtex": "@article{Szyller_Asokan_2023, title={Conflicting Interactions among Protection Mechanisms for Machine Learning Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26771}, DOI={10.1609/aaai.v37i12.26771}, abstractNote={Nowadays, systems based on machine learning (ML) are widely used in different domains.\nGiven their popularity, ML models have become targets for various attacks.\nAs a result, research at the intersection of security/privacy and ML has flourished. Typically such work has focused on individual types of security/privacy concerns and mitigations thereof. However, in real-life deployments, an ML model will need to be protected against several concerns simultaneously.\nA protection mechanism optimal for a specific security or privacy concern may interact negatively with mechanisms intended to address other concerns. Despite its practical relevance, the potential for such conflicts has not been studied adequately. In this work, we first provide a framework for analyzing such conflicting interactions.\nWe then focus on systematically analyzing pairwise interactions between protection mechanisms for one concern, model and data ownership verification, with two other classes of ML protection mechanisms: differentially private training, and robustness against model evasion.\nWe find that several pairwise interactions result in conflicts. We also explore potential approaches for avoiding such conflicts. First, we study the effect of hyperparameter relaxations, finding that there is no sweet spot balancing the performance of both protection mechanisms.\nSecond, we explore whether modifying one type of protection mechanism (ownership verification) so as to decouple it from factors that may be impacted by a conflicting mechanism (differentially private training or robustness to model evasion) can avoid conflict.\nWe show that this approach can indeed avoid the conflict between ownership verification mechanisms when combined with differentially private training, but has no effect on robustness to model evasion. We conclude by identifying the gaps in the landscape of studying interactions between other types of ML protection mechanisms.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Szyller, Sebastian and Asokan, N.}, year={2023}, month={Jun.}, pages={15179-15187} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26771/26543", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26771", + "pdf_size": 162964, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=906438661569928842&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sebszyller.com;acm.org", + "email": "sebszyller.com;acm.org", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+0", + "aff_unique_norm": "Aalto University;University of Waterloo", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.aalto.fi;https://uwaterloo.ca", + "aff_unique_abbr": "Aalto;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+0", + "aff_country_unique": "Finland;Canada" + }, + { + "id": "article-26385", + "title": "Consensus Learning for Cooperative Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Almost all multi-agent reinforcement learning algorithms without communication follow the principle of centralized training with decentralized execution. During the centralized training, agents can be guided by the same signals, such as the global state. However, agents lack the shared signal and choose actions given local observations during execution. Inspired by viewpoint invariance and contrastive learning, we propose consensus learning for cooperative multi-agent reinforcement learning in this study. Although based on local observations, different agents can infer the same consensus in discrete spaces without communication. We feed the inferred one-hot consensus to the network of agents as an explicit input in a decentralized way, thereby fostering their cooperative spirit. With minor model modifications, our suggested framework can be extended to a variety of multi-agent reinforcement learning algorithms. Moreover, we carry out these variants on some fully cooperative tasks and get convincing results.", + "primary_area": "multiagent systems", + "author": "Zhiwei Xu; Bin Zhang; Dapeng Li; Zeren Zhang; Guangchong Zhou; Hao Chen; Guoliang Fan", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences; School of Artificial Intelligence, University of Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; School of Artificial Intelligence, University of Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; School of Artificial Intelligence, University of Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences", + "bibtex": "@article{Xu_Zhang_Li_Zhang_Zhou_Chen_Fan_2023, title={Consensus Learning for Cooperative Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26385}, DOI={10.1609/aaai.v37i10.26385}, abstractNote={Almost all multi-agent reinforcement learning algorithms without communication follow the principle of centralized training with decentralized execution. During the centralized training, agents can be guided by the same signals, such as the global state. However, agents lack the shared signal and choose actions given local observations during execution. Inspired by viewpoint invariance and contrastive learning, we propose consensus learning for cooperative multi-agent reinforcement learning in this study. Although based on local observations, different agents can infer the same consensus in discrete spaces without communication. We feed the inferred one-hot consensus to the network of agents as an explicit input in a decentralized way, thereby fostering their cooperative spirit. With minor model modifications, our suggested framework can be extended to a variety of multi-agent reinforcement learning algorithms. Moreover, we carry out these variants on some fully cooperative tasks and get convincing results.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zhiwei and Zhang, Bin and Li, Dapeng and Zhang, Zeren and Zhou, Guangchong and Chen, Hao and Fan, Guoliang}, year={2023}, month={Jun.}, pages={11726-11734} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26385/26157", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26385", + "pdf_size": 4240635, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14568163160915471279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;1;0;1;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25552", + "title": "Constrained Market Share Maximization by Signal-Guided Optimization", + "track": "main", + "status": "Technical", + "abstract": "With the rapid development of the airline\nindustry, maximizing the market share with a\nconstrained budget is an urgent econometric problem for an airline. We investigate the problem by adjusting flight frequencies on different flight routes. Owing to the large search space of solutions and the difficulty of predicting the market, this problem is in general daunting to solve. This paper proposes a novel two-stage optimization method to address the challenges. On the higher level, we use a signal to guide the optimization process toward a constrained satisfying solution. On the lower level, we consider the consecutive itineraries in real scenarios and model the unseen correlations between routes in itineraries for market share prediction. In theory, we prove the convergence of our optimization approach. In the experiment, we empirically verify the superiority of both our prediction model and optimization approach over existing works with large-scale real-world data. Our code has been released at: https://github.com/codingAndBS/AirlineMarket.", + "primary_area": "data mining and knowledge management", + "author": "Bo Hui; Yuchen Fang; Tian Xia; Sarp Aykent; Wei-Shinn Ku", + "authorids": "", + "aff": "Auburn University; Beijing University of Posts and Telecommunications; Auburn University; Auburn University; Auburn University", + "bibtex": "@article{Hui_Fang_Xia_Aykent_Ku_2023, title={Constrained Market Share Maximization by Signal-Guided Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25552}, DOI={10.1609/aaai.v37i4.25552}, abstractNote={With the rapid development of the airline\nindustry, maximizing the market share with a\nconstrained budget is an urgent econometric problem for an airline. We investigate the problem by adjusting flight frequencies on different flight routes. Owing to the large search space of solutions and the difficulty of predicting the market, this problem is in general daunting to solve. This paper proposes a novel two-stage optimization method to address the challenges. On the higher level, we use a signal to guide the optimization process toward a constrained satisfying solution. On the lower level, we consider the consecutive itineraries in real scenarios and model the unseen correlations between routes in itineraries for market share prediction. In theory, we prove the convergence of our optimization approach. In the experiment, we empirically verify the superiority of both our prediction model and optimization approach over existing works with large-scale real-world data. Our code has been released at: https://github.com/codingAndBS/AirlineMarket.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hui, Bo and Fang, Yuchen and Xia, Tian and Aykent, Sarp and Ku, Wei-Shinn}, year={2023}, month={Jun.}, pages={4330-4338} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25552/25324", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25552", + "pdf_size": 337803, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12950594431772873159&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "auburn.edu;bupt.edu.cn;auburn.edu;auburn.edu;auburn.edu", + "email": "auburn.edu;bupt.edu.cn;auburn.edu;auburn.edu;auburn.edu", + "github": "https://github.com/codingAndBS/AirlineMarket", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Auburn University;Beijing University of Posts and Telecommunications", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.auburn.edu;http://www.bupt.edu.cn/", + "aff_unique_abbr": "Auburn;BUPT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26757", + "title": "Constrained Reinforcement Learning in Hard Exploration Problems", + "track": "aaai special track", + "status": "Technical", + "abstract": "One approach to guaranteeing safety in Reinforcement Learning is through cost constraints that are dependent on the policy. Recent works in constrained RL have developed methods that ensure constraints are enforced even at learning time while maximizing the overall value of the policy. Unfortunately, as demonstrated in our experimental results, such approaches do not perform well on complex multi-level tasks, with longer episode lengths or sparse rewards. To that end, we propose a scalable hierarchical approach for constrained RL problems that employs backward cost value functions in the context of task hierarchy and a novel intrinsic reward function in lower levels of the hierarchy to enable cost constraint enforcement. One of our key contributions is in proving that backward value functions are theoretically viable even when there are multiple levels of decision making. We also show that our new approach, referred to as Hierarchically Limited consTraint Enforcement (HiLiTE) significantly improves on state of the art Constrained RL approaches for many benchmark problems from literature. We further demonstrate that this performance (on value and constraint enforcement) clearly outperforms existing best approaches for constrained RL and hierarchical RL.", + "primary_area": "safe and robust ai", + "author": "Pathmanathan Pankayaraj; Pradeep Varakantham", + "authorids": "", + "aff": "Singapore Management University, Singapore; Singapore Management University, Singapore", + "bibtex": "@article{Pankayaraj_Varakantham_2023, title={Constrained Reinforcement Learning in Hard Exploration Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26757}, DOI={10.1609/aaai.v37i12.26757}, abstractNote={One approach to guaranteeing safety in Reinforcement Learning is through cost constraints that are dependent on the policy. Recent works in constrained RL have developed methods that ensure constraints are enforced even at learning time while maximizing the overall value of the policy. Unfortunately, as demonstrated in our experimental results, such approaches do not perform well on complex multi-level tasks, with longer episode lengths or sparse rewards. To that end, we propose a scalable hierarchical approach for constrained RL problems that employs backward cost value functions in the context of task hierarchy and a novel intrinsic reward function in lower levels of the hierarchy to enable cost constraint enforcement. One of our key contributions is in proving that backward value functions are theoretically viable even when there are multiple levels of decision making. We also show that our new approach, referred to as Hierarchically Limited consTraint Enforcement (HiLiTE) significantly improves on state of the art Constrained RL approaches for many benchmark problems from literature. We further demonstrate that this performance (on value and constraint enforcement) clearly outperforms existing best approaches for constrained RL and hierarchical RL.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pankayaraj, Pathmanathan and Varakantham, Pradeep}, year={2023}, month={Jun.}, pages={15055-15063} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26757/26529", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26757", + "pdf_size": 379435, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=153175105261392507&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "smu.edu.sg;smu.edu.sg", + "email": "smu.edu.sg;smu.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Singapore Management University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.smu.edu.sg", + "aff_unique_abbr": "SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25632", + "title": "Constrained Submodular Optimization for Vaccine Design", + "track": "main", + "status": "Technical", + "abstract": "Advances in machine learning have enabled the prediction of immune system responses to prophylactic and therapeutic vaccines. However, the engineering task of designing vaccines remains a challenge. In particular, the genetic variability of the human immune system makes it difficult to design peptide vaccines that provide widespread immunity in vaccinated populations. We introduce a framework for evaluating and designing peptide vaccines that uses probabilistic machine learning models, and demonstrate its ability to produce designs for a SARS-CoV-2 vaccine that outperform previous designs. We provide a theoretical analysis of the approximability, scalability, and complexity of our framework.", + "primary_area": "domain s of application", + "author": "Zheng Dai; David K. Gifford", + "authorids": "", + "aff": "Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology", + "bibtex": "@article{Dai_Gifford_2023, title={Constrained Submodular Optimization for Vaccine Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25632}, DOI={10.1609/aaai.v37i4.25632}, abstractNote={Advances in machine learning have enabled the prediction of immune system responses to prophylactic and therapeutic vaccines. However, the engineering task of designing vaccines remains a challenge. In particular, the genetic variability of the human immune system makes it difficult to design peptide vaccines that provide widespread immunity in vaccinated populations. We introduce a framework for evaluating and designing peptide vaccines that uses probabilistic machine learning models, and demonstrate its ability to produce designs for a SARS-CoV-2 vaccine that outperform previous designs. We provide a theoretical analysis of the approximability, scalability, and complexity of our framework.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Zheng and Gifford, David K.}, year={2023}, month={Jun.}, pages={5045-5053} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25632/25404", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25632", + "pdf_size": 1023658, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8903859874455521759&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mit.edu;mit.edu", + "email": "mit.edu;mit.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", + "aff_unique_url": "https://www.csail.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25522", + "title": "Constraint Optimization over Semirings", + "track": "main", + "status": "Technical", + "abstract": "Interpretations of logical formulas over semirings (other than the Boolean semiring) have applications in various areas of computer science including logic, AI, databases, and security. Such interpretations provide richer information beyond the truth or falsity of a statement. Examples of such semirings include Viterbi semiring, min-max or access control semiring, tropical semiring, and fuzzy semiring. \n \n The present work investigates the complexity of constraint optimization problems over semirings. The generic optimization problem we study is the following: Given a propositional formula phi over n variable and a semiring (K,+, . ,0,1), find the maximum value over all possible interpretations of phi over K. This can be seen as a generalization of the well-known satisfiability problem (a propositional formula is satisfiable if and only if the maximum value over all interpretations/assignments over the Boolean semiring is 1). A related problem is to find an interpretation that achieves the maximum value. In this work, we first focus on these optimization problems over the Viterbi semiring, which we call optConfVal and optConf. \n \n We first show that for general propositional formulas in negation normal form, optConfVal and optConf are in FP^NP. We then investigate optConf when the input formula phi is represented in the conjunctive normal form. For CNF formulae, we first derive an upper bound on the value of optConf as a function of the number of maximum satisfiable clauses. In particular, we show that if r is the maximum number of satisfiable clauses in a CNF formula with m clauses, then its optConf value is at most 1/4^(m-r). Building on this we establish that optConf for CNF formulae is hard for the complexity class FP^NP[log]. We also design polynomial-time approximation algorithms and establish an inapproximability for optConfVal. We establish similar complexity results for these optimization problems over other semirings including tropical, fuzzy, and access control semirings.", + "primary_area": "constraint satisfaction and optimization", + "author": "A. Pavan; Kuldeep S. Meel; N. V. Vinodchandran; Arnab Bhattacharyya", + "authorids": "", + "aff": "Iowa State University; National University of Singapore; University of Nebraska-Lincoln; National University of Singapore", + "bibtex": "@article{Pavan_Meel_Vinodchandran_Bhattacharyya_2023, title={Constraint Optimization over Semirings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25522}, DOI={10.1609/aaai.v37i4.25522}, abstractNote={Interpretations of logical formulas over semirings (other than the Boolean semiring) have applications in various areas of computer science including logic, AI, databases, and security. Such interpretations provide richer information beyond the truth or falsity of a statement. Examples of such semirings include Viterbi semiring, min-max or access control semiring, tropical semiring, and fuzzy semiring. The present work investigates the complexity of constraint optimization problems over semirings. The generic optimization problem we study is the following: Given a propositional formula phi over n variable and a semiring (K,+, . ,0,1), find the maximum value over all possible interpretations of phi over K. This can be seen as a generalization of the well-known satisfiability problem (a propositional formula is satisfiable if and only if the maximum value over all interpretations/assignments over the Boolean semiring is 1). A related problem is to find an interpretation that achieves the maximum value. In this work, we first focus on these optimization problems over the Viterbi semiring, which we call optConfVal and optConf. We first show that for general propositional formulas in negation normal form, optConfVal and optConf are in FP^NP. We then investigate optConf when the input formula phi is represented in the conjunctive normal form. For CNF formulae, we first derive an upper bound on the value of optConf as a function of the number of maximum satisfiable clauses. In particular, we show that if r is the maximum number of satisfiable clauses in a CNF formula with m clauses, then its optConf value is at most 1/4^(m-r). Building on this we establish that optConf for CNF formulae is hard for the complexity class FP^NP[log]. We also design polynomial-time approximation algorithms and establish an inapproximability for optConfVal. We establish similar complexity results for these optimization problems over other semirings including tropical, fuzzy, and access control semirings.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pavan, A. and Meel, Kuldeep S. and Vinodchandran, N. V. and Bhattacharyya, Arnab}, year={2023}, month={Jun.}, pages={4070-4077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25522/25294", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25522", + "pdf_size": 164399, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5149227874854187844&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Iowa State University;National University of Singapore;University of Nebraska-Lincoln", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.iastate.edu;https://www.nus.edu.sg;https://www.unl.edu", + "aff_unique_abbr": "ISU;NUS;UNL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "article-26892", + "title": "Context-Aware Analysis of Group Submissions for Group Anomaly Detection and Performance Prediction", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "Learning exercises that activate students\u2019 additional cognitive understanding of course concepts facilitate contextualizing the content knowledge and developing higher-order thinking and problem-solving skills. Student-generated instructional materials such as course summaries and problem sets are amongst the instructional strategies that reflect active learning and constructivist philosophy.\n\nThe contributions of this work are twofold: 1) We introduce a practical implementation of inside-outside learning strategy in an undergraduate deep learning course and will share our experiences in incorporating student-generated instructional materials learning strategy in course design, and 2) We develop a context-aware deep learning framework to draw insights from the student-generated materials for (i) Detecting anomalies in group activities and (ii) Predicting the median quiz performance of students in each group. This work opens up an avenue for effectively implementing a constructivism learning strategy in large-scale and online courses to build a sense of community between learners while providing an automated tool for instructors to identify at-risk groups.", + "primary_area": "", + "author": "Narges Norouzi; Amir Mazaheri", + "authorids": "", + "aff": "University of California, Berkeley + University of California, Santa Cruz; University of Central Florida", + "bibtex": "@article{Norouzi_Mazaheri_2024, title={Context-Aware Analysis of Group Submissions for Group Anomaly Detection and Performance Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26892}, DOI={10.1609/aaai.v37i13.26892}, abstractNote={Learning exercises that activate students\u2019 additional cognitive understanding of course concepts facilitate contextualizing the content knowledge and developing higher-order thinking and problem-solving skills. Student-generated instructional materials such as course summaries and problem sets are amongst the instructional strategies that reflect active learning and constructivist philosophy. The contributions of this work are twofold: 1) We introduce a practical implementation of inside-outside learning strategy in an undergraduate deep learning course and will share our experiences in incorporating student-generated instructional materials learning strategy in course design, and 2) We develop a context-aware deep learning framework to draw insights from the student-generated materials for (i) Detecting anomalies in group activities and (ii) Predicting the median quiz performance of students in each group. This work opens up an avenue for effectively implementing a constructivism learning strategy in large-scale and online courses to build a sense of community between learners while providing an automated tool for instructors to identify at-risk groups.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Norouzi, Narges and Mazaheri, Amir}, year={2024}, month={Jul.}, pages={15938-15946} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26892/26664", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26892", + "pdf_size": 589449, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8927558130327161345&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "berkeley.edu;knights.ucf.edu", + "email": "berkeley.edu;knights.ucf.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "University of California, Berkeley;University of California, Santa Cruz;University of Central Florida", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.berkeley.edu;https://www.ucsc.edu;https://www.ucf.edu", + "aff_unique_abbr": "UC Berkeley;UCSC;UCF", + "aff_campus_unique_index": "0+1", + "aff_campus_unique": "Berkeley;Santa Cruz;", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25861", + "title": "Context-Aware Safe Medication Recommendations with Molecular Graph and DDI Graph Embedding", + "track": "main", + "status": "Technical", + "abstract": "Molecular structures and Drug-Drug Interactions (DDI) are recognized as important knowledge to guide medication recommendation (MR) tasks, and medical concept embedding has been applied to boost their performance. Though promising performance has been achieved by leveraging Graph Neural Network (GNN) models to encode the molecular structures of medications or/and DDI, we observe that existing models are still defective: 1) to differentiate medications with similar molecules but different functionality; or/and 2) to properly capture the unintended reactions between drugs in the embedding space. To alleviate this limitation, we propose Carmen, a cautiously designed graph embedding-based MR framework. Carmen consists of four components, including patient representation learning, context information extraction, a context-aware GNN, and DDI encoding. Carmen incorporates the visit history into the representation learning of molecular graphs to distinguish molecules with similar topology but dissimilar activity. Its DDI encoding module is specially devised for the non-transitive interaction DDI graphs. The experiments on real-world datasets demonstrate that Carmen achieves remarkable performance improvement over state-of-the-art models and can improve the safety of recommended drugs with a proper DDI graph encoding.", + "primary_area": "machine learning i", + "author": "Qianyu Chen; Xin Li; Kunnan Geng; Mingzhong Wang", + "authorids": "", + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Beijing Institute of Technology; University of the Sunshine Coast", + "bibtex": "@article{Chen_Li_Geng_Wang_2023, title={Context-Aware Safe Medication Recommendations with Molecular Graph and DDI Graph Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25861}, DOI={10.1609/aaai.v37i6.25861}, abstractNote={Molecular structures and Drug-Drug Interactions (DDI) are recognized as important knowledge to guide medication recommendation (MR) tasks, and medical concept embedding has been applied to boost their performance. Though promising performance has been achieved by leveraging Graph Neural Network (GNN) models to encode the molecular structures of medications or/and DDI, we observe that existing models are still defective: 1) to differentiate medications with similar molecules but different functionality; or/and 2) to properly capture the unintended reactions between drugs in the embedding space. To alleviate this limitation, we propose Carmen, a cautiously designed graph embedding-based MR framework. Carmen consists of four components, including patient representation learning, context information extraction, a context-aware GNN, and DDI encoding. Carmen incorporates the visit history into the representation learning of molecular graphs to distinguish molecules with similar topology but dissimilar activity. Its DDI encoding module is specially devised for the non-transitive interaction DDI graphs. The experiments on real-world datasets demonstrate that Carmen achieves remarkable performance improvement over state-of-the-art models and can improve the safety of recommended drugs with a proper DDI graph encoding.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Qianyu and Li, Xin and Geng, Kunnan and Wang, Mingzhong}, year={2023}, month={Jun.}, pages={7053-7060} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25861/25633", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25861", + "pdf_size": 1257497, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17274081953580999616&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;usc.edu.au", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;usc.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Beijing Institute of Technology;University of the Sunshine Coast", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.usc.edu.au", + "aff_unique_abbr": "BIT;USC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25301", + "title": "Context-Aware Transformer for 3D Point Cloud Automatic Annotation", + "track": "main", + "status": "Technical", + "abstract": "3D automatic annotation has received increased attention since manually annotating 3D point clouds is laborious. However, existing methods are usually complicated, e.g., pipelined training for 3D foreground/background segmentation, cylindrical object proposals, and point completion. Furthermore, they often overlook the inter-object feature correlation that is particularly informative to hard samples for 3D annotation. \nTo this end, we propose a simple yet effective end-to-end Context-Aware Transformer (CAT) as an automated 3D-box labeler to generate precise 3D box annotations from 2D boxes, trained with a small number of human annotations. We adopt the general encoder-decoder architecture, where the CAT encoder consists of an intra-object encoder (local) and an inter-object encoder (global), performing self-attention along the sequence and batch dimensions, respectively. The former models intra-object interactions among points and the latter extracts feature relations among different objects, thus boosting scene-level understanding.\nVia local and global encoders, CAT can generate high-quality 3D box annotations with a streamlined workflow, allowing it to outperform existing state-of-the-arts by up to 1.79% 3D AP on the hard task of the KITTI test set.", + "primary_area": "computer vision ii", + "author": "Xiaoyan Qian; Chang Liu; Xiaojuan Qi; Siew-Chong Tan; Edmund Lam; Ngai Wong", + "authorids": "", + "aff": "The University of Hong Kong, Pokfulam, Hong Kong; The University of Hong Kong, Pokfulam, Hong Kong; The University of Hong Kong, Pokfulam, Hong Kong; The University of Hong Kong, Pokfulam, Hong Kong; The University of Hong Kong, Pokfulam, Hong Kong; The University of Hong Kong, Pokfulam, Hong Kong", + "bibtex": "@article{Qian_Liu_Qi_Tan_Lam_Wong_2023, title={Context-Aware Transformer for 3D Point Cloud Automatic Annotation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25301}, DOI={10.1609/aaai.v37i2.25301}, abstractNote={3D automatic annotation has received increased attention since manually annotating 3D point clouds is laborious. However, existing methods are usually complicated, e.g., pipelined training for 3D foreground/background segmentation, cylindrical object proposals, and point completion. Furthermore, they often overlook the inter-object feature correlation that is particularly informative to hard samples for 3D annotation. To this end, we propose a simple yet effective end-to-end Context-Aware Transformer (CAT) as an automated 3D-box labeler to generate precise 3D box annotations from 2D boxes, trained with a small number of human annotations. We adopt the general encoder-decoder architecture, where the CAT encoder consists of an intra-object encoder (local) and an inter-object encoder (global), performing self-attention along the sequence and batch dimensions, respectively. The former models intra-object interactions among points and the latter extracts feature relations among different objects, thus boosting scene-level understanding.\nVia local and global encoders, CAT can generate high-quality 3D box annotations with a streamlined workflow, allowing it to outperform existing state-of-the-arts by up to 1.79% 3D AP on the hard task of the KITTI test set.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qian, Xiaoyan and Liu, Chang and Qi, Xiaojuan and Tan, Siew-Chong and Lam, Edmund and Wong, Ngai}, year={2023}, month={Jun.}, pages={2082-2090} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25301/25073", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25301", + "pdf_size": 4474677, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18339710025958799034&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "connect.hku.hk;connect.hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk", + "email": "connect.hku.hk;connect.hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "The University of Hong Kong", + "aff_unique_dep": "", + "aff_unique_url": "https://www.hku.hk", + "aff_unique_abbr": "HKU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Pokfulam", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26611", + "title": "Continual Graph Convolutional Network for Text Classification", + "track": "main", + "status": "Technical", + "abstract": "Graph convolutional network (GCN) has been successfully applied to capture global non-consecutive and long-distance semantic information for text classification. However, while GCN-based methods have shown promising results in offline evaluations, they commonly follow a seen-token-seen-document paradigm by constructing a fixed document-token graph and cannot make inferences on new documents. It is a challenge to deploy them in online systems to infer steaming text data. In this work, we present a continual GCN model (ContGCN) to generalize inferences from observed documents to unobserved documents. Concretely, we propose a new all-token-any-document paradigm to dynamically update the document-token graph in every batch during both the training and testing phases of an online system. Moreover, we design an occurrence memory module and a self-supervised contrastive learning objective to update ContGCN in a label-free manner. A 3-month A/B test on Huawei public opinion analysis system shows ContGCN achieves 8.86% performance gain compared with state-of-the-art methods. Offline experiments on five public datasets also show ContGCN can improve inference quality. The source code will be released at https://github.com/Jyonn/ContGCN.", + "primary_area": "speech natural language processing", + "author": "Tiandeng Wu; Qijiong Liu; Yi Cao; Yao Huang; Xiao-Ming Wu; Jiandong Ding", + "authorids": "", + "aff": "Huawei Technologies Co., Ltd., China; The Hong Kong Polytechnic University, Hong Kong; Huawei Technologies Co., Ltd., China; Huawei Technologies Co., Ltd., China; The Hong Kong Polytechnic University, Hong Kong; Huawei Technologies Co., Ltd., China", + "bibtex": "@article{Wu_Liu_Cao_Huang_Wu_Ding_2023, title={Continual Graph Convolutional Network for Text Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26611}, DOI={10.1609/aaai.v37i11.26611}, abstractNote={Graph convolutional network (GCN) has been successfully applied to capture global non-consecutive and long-distance semantic information for text classification. However, while GCN-based methods have shown promising results in offline evaluations, they commonly follow a seen-token-seen-document paradigm by constructing a fixed document-token graph and cannot make inferences on new documents. It is a challenge to deploy them in online systems to infer steaming text data. In this work, we present a continual GCN model (ContGCN) to generalize inferences from observed documents to unobserved documents. Concretely, we propose a new all-token-any-document paradigm to dynamically update the document-token graph in every batch during both the training and testing phases of an online system. Moreover, we design an occurrence memory module and a self-supervised contrastive learning objective to update ContGCN in a label-free manner. A 3-month A/B test on Huawei public opinion analysis system shows ContGCN achieves 8.86% performance gain compared with state-of-the-art methods. Offline experiments on five public datasets also show ContGCN can improve inference quality. The source code will be released at https://github.com/Jyonn/ContGCN.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Tiandeng and Liu, Qijiong and Cao, Yi and Huang, Yao and Wu, Xiao-Ming and Ding, Jiandong}, year={2023}, month={Jun.}, pages={13754-13762} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26611/26383", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26611", + "pdf_size": 3261364, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9256519072154680326&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "huawei.com;connect.polyu.hk;huawei.com;huawei.com;polyu.edu.hk;huawei.com", + "email": "huawei.com;connect.polyu.hk;huawei.com;huawei.com;polyu.edu.hk;huawei.com", + "github": "https://github.com/Jyonn/ContGCN", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;1;0", + "aff_unique_norm": "Huawei Technologies Co., Ltd.;The Hong Kong Polytechnic University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.huawei.com;https://www.polyu.edu.hk", + "aff_unique_abbr": "Huawei;PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26157", + "title": "Continual Learning with Scaled Gradient Projection", + "track": "main", + "status": "Technical", + "abstract": "In neural networks, continual learning results in gradient interference among sequential tasks, leading to catastrophic forgetting of old tasks while learning new ones. This issue is addressed in recent methods by storing the important gradient spaces for old tasks and updating the model orthogonally during new tasks. However, such restrictive orthogonal gradient updates hamper the learning capability of the new tasks resulting in sub-optimal performance. To improve new learning while minimizing forgetting, in this paper we propose a Scaled Gradient Projection (SGP) method, where we combine the orthogonal gradient projections with scaled gradient steps along the important gradient spaces for the past tasks. The degree of gradient scaling along these spaces depends on the importance of the bases spanning them. We propose an efficient method for computing and accumulating importance of these bases using the singular value decomposition of the input representations for each task. We conduct extensive experiments ranging from continual image classification to reinforcement learning tasks and report better performance with less training overhead than the state-of-the-art approaches.", + "primary_area": "machine learning iii", + "author": "Gobinda Saha; Kaushik Roy", + "authorids": "", + "aff": "Elmore Family School of Electrical and Computer Engineering, Purdue University, West Lafayette, Indiana, USA; Elmore Family School of Electrical and Computer Engineering, Purdue University, West Lafayette, Indiana, USA", + "bibtex": "@article{Saha_Roy_2023, title={Continual Learning with Scaled Gradient Projection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26157}, DOI={10.1609/aaai.v37i8.26157}, abstractNote={In neural networks, continual learning results in gradient interference among sequential tasks, leading to catastrophic forgetting of old tasks while learning new ones. This issue is addressed in recent methods by storing the important gradient spaces for old tasks and updating the model orthogonally during new tasks. However, such restrictive orthogonal gradient updates hamper the learning capability of the new tasks resulting in sub-optimal performance. To improve new learning while minimizing forgetting, in this paper we propose a Scaled Gradient Projection (SGP) method, where we combine the orthogonal gradient projections with scaled gradient steps along the important gradient spaces for the past tasks. The degree of gradient scaling along these spaces depends on the importance of the bases spanning them. We propose an efficient method for computing and accumulating importance of these bases using the singular value decomposition of the input representations for each task. We conduct extensive experiments ranging from continual image classification to reinforcement learning tasks and report better performance with less training overhead than the state-of-the-art approaches.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Saha, Gobinda and Roy, Kaushik}, year={2023}, month={Jun.}, pages={9677-9685} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26157/25929", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26157", + "pdf_size": 747513, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=972069125031492825&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "purdue.edu;purdue.edu", + "email": "purdue.edu;purdue.edu", + "github": "https://github.com/sahagobinda/sgp", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Purdue University", + "aff_unique_dep": "Elmore Family School of Electrical and Computer Engineering", + "aff_unique_url": "https://www.purdue.edu", + "aff_unique_abbr": "Purdue", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "West Lafayette", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26294", + "title": "Continual Variational Autoencoder via Continual Generative Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Humans and other living beings have the ability of short and long-term memorization during their entire lifespan. However, most existing Continual Learning (CL) methods can only account for short-term information when training on infinite streams of data. In this paper, we develop a new unsupervised continual learning framework consisting of two memory systems using Variational Autoencoders (VAEs). We develop a Short-Term Memory (STM), and a parameterised scalable memory implemented by a Teacher model aiming to preserve the long-term information. To incrementally enrich the Teacher's knowledge during training, we propose the Knowledge Incremental Assimilation Mechanism (KIAM), which evaluates the knowledge similarity between the STM and the already accumulated information as signals to expand the Teacher's capacity. Then we train a VAE as a Student module and propose a new Knowledge Distillation (KD) approach that gradually transfers generative knowledge from the Teacher to the Student module. To ensure the quality and diversity of knowledge in KD, we propose a new expert pruning approach that selectively removes the Teacher's redundant parameters, associated with unnecessary experts which have learnt overlapping information with other experts. This mechanism further reduces the complexity of the Teacher's module while ensuring the diversity of knowledge for the KD procedure. We show theoretically and empirically that the proposed framework can train a statistically diversified Teacher module for continual VAE learning which is applicable to learning infinite data streams.", + "primary_area": "machine learning iv", + "author": "Fei Ye; Adrian G. Bors", + "authorids": "", + "aff": "Department of Computer Science, University of York, York YO10 5GH, UK; Department of Computer Science, University of York, York YO10 5GH, UK", + "bibtex": "@article{Ye_Bors_2023, title={Continual Variational Autoencoder via Continual Generative Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26294}, DOI={10.1609/aaai.v37i9.26294}, abstractNote={Humans and other living beings have the ability of short and long-term memorization during their entire lifespan. However, most existing Continual Learning (CL) methods can only account for short-term information when training on infinite streams of data. In this paper, we develop a new unsupervised continual learning framework consisting of two memory systems using Variational Autoencoders (VAEs). We develop a Short-Term Memory (STM), and a parameterised scalable memory implemented by a Teacher model aiming to preserve the long-term information. To incrementally enrich the Teacher\u2019s knowledge during training, we propose the Knowledge Incremental Assimilation Mechanism (KIAM), which evaluates the knowledge similarity between the STM and the already accumulated information as signals to expand the Teacher\u2019s capacity. Then we train a VAE as a Student module and propose a new Knowledge Distillation (KD) approach that gradually transfers generative knowledge from the Teacher to the Student module. To ensure the quality and diversity of knowledge in KD, we propose a new expert pruning approach that selectively removes the Teacher\u2019s redundant parameters, associated with unnecessary experts which have learnt overlapping information with other experts. This mechanism further reduces the complexity of the Teacher\u2019s module while ensuring the diversity of knowledge for the KD procedure. We show theoretically and empirically that the proposed framework can train a statistically diversified Teacher module for continual VAE learning which is applicable to learning infinite data streams.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Fei and Bors, Adrian G.}, year={2023}, month={Jun.}, pages={10918-10926} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26294/26066", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26294", + "pdf_size": 1192019, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7431144113410706217&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "york.ac.uk;york.ac.uk", + "email": "york.ac.uk;york.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of York", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.york.ac.uk", + "aff_unique_abbr": "York", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "York", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25883", + "title": "Continuous Mixtures of Tractable Probabilistic Models", + "track": "main", + "status": "Technical", + "abstract": "Probabilistic models based on continuous latent spaces, such as variational autoencoders, can be understood as uncountable mixture models where components depend continuously on the latent code. They have proven to be expressive tools for generative and probabilistic modelling, but are at odds with tractable probabilistic inference, that is, computing marginals and conditionals of the represented probability distribution. Meanwhile, tractable probabilistic models such as probabilistic circuits (PCs) can be understood as hierarchical discrete mixture models, and thus are capable of performing exact inference efficiently but often show subpar performance in comparison to continuous latent-space models. In this paper, we investigate a hybrid approach, namely continuous mixtures of tractable models with a small latent dimension. While these models are analytically intractable, they are well amenable to numerical integration schemes based on a finite set of integration points. With a large enough number of integration points the approximation becomes de-facto exact. Moreover, for a finite set of integration points, the integration method effectively compiles the continuous mixture into a standard PC. In experiments, we show that this simple scheme proves remarkably effective, as PCs learnt this way set new state of the art for tractable models on many standard density estimation benchmarks.", + "primary_area": "machine learning i", + "author": "Alvaro H.C. Correia; Gennaro Gala; Erik Quaeghebeur; Cassio de Campos; Robert Peharz", + "authorids": "", + "aff": "Eindhoven University of Technology; Eindhoven University of Technology; Eindhoven University of Technology; Eindhoven University of Technology; Eindhoven University of Technology+Graz University of Technology", + "bibtex": "@article{Correia_Gala_Quaeghebeur_de Campos_Peharz_2023, title={Continuous Mixtures of Tractable Probabilistic Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25883}, DOI={10.1609/aaai.v37i6.25883}, abstractNote={Probabilistic models based on continuous latent spaces, such as variational autoencoders, can be understood as uncountable mixture models where components depend continuously on the latent code. They have proven to be expressive tools for generative and probabilistic modelling, but are at odds with tractable probabilistic inference, that is, computing marginals and conditionals of the represented probability distribution. Meanwhile, tractable probabilistic models such as probabilistic circuits (PCs) can be understood as hierarchical discrete mixture models, and thus are capable of performing exact inference efficiently but often show subpar performance in comparison to continuous latent-space models. In this paper, we investigate a hybrid approach, namely continuous mixtures of tractable models with a small latent dimension. While these models are analytically intractable, they are well amenable to numerical integration schemes based on a finite set of integration points. With a large enough number of integration points the approximation becomes de-facto exact. Moreover, for a finite set of integration points, the integration method effectively compiles the continuous mixture into a standard PC. In experiments, we show that this simple scheme proves remarkably effective, as PCs learnt this way set new state of the art for tractable models on many standard density estimation benchmarks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Correia, Alvaro H.C. and Gala, Gennaro and Quaeghebeur, Erik and de Campos, Cassio and Peharz, Robert}, year={2023}, month={Jun.}, pages={7244-7252} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25883/25655", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25883", + "pdf_size": 1605118, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9367259947421867688&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "tue.nl;tue.nl;tue.nl;tue.nl;tue.nl", + "email": "tue.nl;tue.nl;tue.nl;tue.nl;tue.nl", + "github": "", + "project": "arxiv.org/abs/2209.10584", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "Eindhoven University of Technology;Graz University of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tue.nl;https://www.tugraz.at", + "aff_unique_abbr": "TU/e;TUGraz", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+1", + "aff_country_unique": "Netherlands;Austria" + }, + { + "id": "article-25557", + "title": "Continuous Trajectory Generation Based on Two-Stage GAN", + "track": "main", + "status": "Technical", + "abstract": "Simulating the human mobility and generating large-scale trajectories are of great use in many real-world applications, such as urban planning, epidemic spreading analysis, and geographic privacy protect. Although many previous works have studied the problem of trajectory generation, the continuity of the generated trajectories has been neglected, which makes these methods useless for practical urban simulation scenarios. To solve this problem, we propose a novel two-stage generative adversarial framework to generate the continuous trajectory on the road network, namely TS-TrajGen, which efficiently integrates prior domain knowledge of human mobility with model-free learning paradigm. Specifically, we build the generator under the human mobility hypothesis of the A* algorithm to learn the human mobility behavior. For the discriminator, we combine the sequential reward with the mobility yaw reward to enhance the effectiveness of the generator. Finally, we propose a novel two-stage generation process to overcome the weak point of the existing stochastic generation process. Extensive experiments on two real-world datasets and two case studies demonstrate that our framework yields significant improvements over the state-of-the-art methods.", + "primary_area": "data mining and knowledge management", + "author": "Wenjun Jiang; Wayne Xin Zhao; Jingyuan Wang; Jiawei Jiang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, Beijing, China+Pengcheng Laboratory, Shenzhen, China+School of Economics and Management, Beihang University, Beijing, China; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China+Pengcheng Laboratory, Shenzhen, China+School of Economics and Management, Beihang University, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China", + "bibtex": "@article{Jiang_Zhao_Wang_Jiang_2023, title={Continuous Trajectory Generation Based on Two-Stage GAN}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25557}, DOI={10.1609/aaai.v37i4.25557}, abstractNote={Simulating the human mobility and generating large-scale trajectories are of great use in many real-world applications, such as urban planning, epidemic spreading analysis, and geographic privacy protect. Although many previous works have studied the problem of trajectory generation, the continuity of the generated trajectories has been neglected, which makes these methods useless for practical urban simulation scenarios. To solve this problem, we propose a novel two-stage generative adversarial framework to generate the continuous trajectory on the road network, namely TS-TrajGen, which efficiently integrates prior domain knowledge of human mobility with model-free learning paradigm. Specifically, we build the generator under the human mobility hypothesis of the A* algorithm to learn the human mobility behavior. For the discriminator, we combine the sequential reward with the mobility yaw reward to enhance the effectiveness of the generator. Finally, we propose a novel two-stage generation process to overcome the weak point of the existing stochastic generation process. Extensive experiments on two real-world datasets and two case studies demonstrate that our framework yields significant improvements over the state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Wenjun and Zhao, Wayne Xin and Wang, Jingyuan and Jiang, Jiawei}, year={2023}, month={Jun.}, pages={4374-4382} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25557/25329", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25557", + "pdf_size": 4962664, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11589524967237632862&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;ruc.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;ruc.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+0;2;0+1+0;0", + "aff_unique_norm": "Beihang University;Pengcheng Laboratory;Renmin University of China", + "aff_unique_dep": "School of Computer Science and Engineering;;Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.buaa.edu.cn;;http://www.ruc.edu.cn", + "aff_unique_abbr": "BUAA;;RUC", + "aff_campus_unique_index": "0+1+0;0;0+1+0;0", + "aff_campus_unique": "Beijing;Shenzhen", + "aff_country_unique_index": "0+0+0;0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26356", + "title": "ContraFeat: Contrasting Deep Features for Semantic Discovery", + "track": "main", + "status": "Technical", + "abstract": "StyleGAN has shown strong potential for disentangled semantic control, thanks to its special design of multi-layer intermediate latent variables. However, existing semantic discovery methods on StyleGAN rely on manual selection of modified latent layers to obtain satisfactory manipulation results, which is tedious and demanding. In this paper, we propose a model that automates this process and achieves state-of-the-art semantic discovery performance. The model consists of an attention-equipped navigator module and losses contrasting deep-feature changes. We propose two model variants, with one contrasting samples in a binary manner, and another one contrasting samples with learned prototype variation patterns. The proposed losses are computed with pretrained deep features, based on our assumption that the features implicitly possess the desired semantic variation structure including consistency and orthogonality. Additionally, we design two metrics to quantitatively evaluate the performance of semantic discovery methods on FFHQ dataset, and also show that disentangled representations can be derived via a simple training process. Experimentally, we show that our models achieve state-of-the-art semantic discovery results without relying on layer-wise manual selection, and these discovered semantics can be used to manipulate real-world images.", + "primary_area": "machine learning iv", + "author": "Xinqi Zhu; Chang Xu; Dacheng Tao", + "authorids": "", + "aff": "The University of Sydney, Australia; The University of Sydney, Australia; The University of Sydney, Australia", + "bibtex": "@article{Zhu_Xu_Tao_2023, title={ContraFeat: Contrasting Deep Features for Semantic Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26356}, DOI={10.1609/aaai.v37i9.26356}, abstractNote={StyleGAN has shown strong potential for disentangled semantic control, thanks to its special design of multi-layer intermediate latent variables. However, existing semantic discovery methods on StyleGAN rely on manual selection of modified latent layers to obtain satisfactory manipulation results, which is tedious and demanding. In this paper, we propose a model that automates this process and achieves state-of-the-art semantic discovery performance. The model consists of an attention-equipped navigator module and losses contrasting deep-feature changes. We propose two model variants, with one contrasting samples in a binary manner, and another one contrasting samples with learned prototype variation patterns. The proposed losses are computed with pretrained deep features, based on our assumption that the features implicitly possess the desired semantic variation structure including consistency and orthogonality. Additionally, we design two metrics to quantitatively evaluate the performance of semantic discovery methods on FFHQ dataset, and also show that disentangled representations can be derived via a simple training process. Experimentally, we show that our models achieve state-of-the-art semantic discovery results without relying on layer-wise manual selection, and these discovered semantics can be used to manipulate real-world images.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Xinqi and Xu, Chang and Tao, Dacheng}, year={2023}, month={Jun.}, pages={11470-11478} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26356/26128", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26356", + "pdf_size": 7129128, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=406563343855131903&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "uni.sydney.edu.au;sydney.edu.au;gmail.com", + "email": "uni.sydney.edu.au;sydney.edu.au;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The University of Sydney", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sydney.edu.au", + "aff_unique_abbr": "USYD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25659", + "title": "Contrastive Attention Networks for Attribution of Early Modern Print", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we develop machine learning techniques to identify unknown printers in early modern (c.~1500--1800) English printed books.\nSpecifically, we focus on matching uniquely damaged character type-imprints in anonymously printed books to works with known printers in order to provide evidence of their origins.\nUntil now, this work has been limited to manual investigations by analytical bibliographers.\nWe present a Contrastive Attention-based Metric Learning approach to identify similar damage across character image pairs, which is sensitive to very subtle differences in glyph shapes, yet robust to various confounding sources of noise associated with digitized historical books. \nTo overcome the scarce amount of supervised data, we design a random data synthesis procedure that aims to simulate bends, fractures, and inking variations induced by the early printing process.\nOur method successfully improves downstream damaged type-imprint matching among printed works from this period, as validated by in-domain human experts. The results of our approach on two important philosophical works from the Early Modern period demonstrate potential to extend the extant historical research about the origins and content of these books.", + "primary_area": "domain s of application", + "author": "Nikolai Vogler; Kartik Goyal; Kishore PV Reddy; Elizaveta Pertseva; Samuel V. Lemley; Christopher N. Warren; Max G'Sell; Taylor Berg-Kirkpatrick", + "authorids": "", + "aff": "University of California, San Diego; Toyota Technological Institute at Chicago; University of California, San Diego; University of California, San Diego; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; University of California, San Diego", + "bibtex": "@article{Vogler_Goyal_Reddy_Pertseva_Lemley_Warren_G\u2019Sell_Berg-Kirkpatrick_2023, title={Contrastive Attention Networks for Attribution of Early Modern Print}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25659}, DOI={10.1609/aaai.v37i4.25659}, abstractNote={In this paper, we develop machine learning techniques to identify unknown printers in early modern (c.~1500--1800) English printed books.\nSpecifically, we focus on matching uniquely damaged character type-imprints in anonymously printed books to works with known printers in order to provide evidence of their origins.\nUntil now, this work has been limited to manual investigations by analytical bibliographers.\nWe present a Contrastive Attention-based Metric Learning approach to identify similar damage across character image pairs, which is sensitive to very subtle differences in glyph shapes, yet robust to various confounding sources of noise associated with digitized historical books. To overcome the scarce amount of supervised data, we design a random data synthesis procedure that aims to simulate bends, fractures, and inking variations induced by the early printing process.\nOur method successfully improves downstream damaged type-imprint matching among printed works from this period, as validated by in-domain human experts. The results of our approach on two important philosophical works from the Early Modern period demonstrate potential to extend the extant historical research about the origins and content of these books.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vogler, Nikolai and Goyal, Kartik and Reddy, Kishore PV and Pertseva, Elizaveta and Lemley, Samuel V. and Warren, Christopher N. and G\u2019Sell, Max and Berg-Kirkpatrick, Taylor}, year={2023}, month={Jun.}, pages={5285-5293} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25659/25431", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25659", + "pdf_size": 5883356, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7366851783574319394&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ucsd.edu;ttic.edu; ; ;cmu.edu; ; ;ucsd.edu", + "email": "ucsd.edu;ttic.edu; ; ;cmu.edu; ; ;ucsd.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;2;2;2;0", + "aff_unique_norm": "University of California, San Diego;Toyota Technological Institute at Chicago;Carnegie Mellon University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsd.edu;https://www.tti-chicago.org;https://www.cmu.edu", + "aff_unique_abbr": "UCSD;TTI Chicago;CMU", + "aff_campus_unique_index": "0;1;0;0;0", + "aff_campus_unique": "San Diego;Chicago;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25819", + "title": "Contrastive Classification and Representation Learning with Probabilistic Interpretation", + "track": "main", + "status": "Technical", + "abstract": "Cross entropy loss has served as the main objective function for classification-based tasks. Widely deployed for learning neural network classifiers, it shows both effectiveness and a probabilistic interpretation. Recently, after the success of self supervised contrastive representation learning methods, supervised contrastive methods have been proposed to learn representations and have shown superior and more robust performance, compared to solely training with cross entropy loss. However, cross entropy loss is still needed to train the final classification layer. In this work, we investigate the possibility of learning both the representation and the classifier using one objective function that combines the robustness of contrastive learning and the probabilistic interpretation of cross entropy loss. First, we revisit a previously proposed contrastive-based objective function that approximates cross entropy loss and present a simple extension to learn the classifier jointly. Second, we propose a new version of the supervised contrastive training that learns jointly the parameters of the classifier and the backbone of the network. We empirically show that these proposed objective functions demonstrate state-of-the-art performance and show a significant improvement over the standard cross entropy loss with more training stability and robustness in various challenging settings.", + "primary_area": "machine learning i", + "author": "Rahaf Aljundi; Yash Patel; Milan Sulc; Nikolay Chumerin; Daniel Olmeda Reino", + "authorids": "", + "aff": "Toyota Motor Europe; Visual Recognition Group, Czech Technical University in Prague; Visual Recognition Group, Czech Technical University in Prague; Toyota Motor Europe; Toyota Motor Europe", + "bibtex": "@article{Aljundi_Patel_Sulc_Chumerin_Olmeda Reino_2023, title={Contrastive Classification and Representation Learning with Probabilistic Interpretation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25819}, DOI={10.1609/aaai.v37i6.25819}, abstractNote={Cross entropy loss has served as the main objective function for classification-based tasks. Widely deployed for learning neural network classifiers, it shows both effectiveness and a probabilistic interpretation. Recently, after the success of self supervised contrastive representation learning methods, supervised contrastive methods have been proposed to learn representations and have shown superior and more robust performance, compared to solely training with cross entropy loss. However, cross entropy loss is still needed to train the final classification layer. In this work, we investigate the possibility of learning both the representation and the classifier using one objective function that combines the robustness of contrastive learning and the probabilistic interpretation of cross entropy loss. First, we revisit a previously proposed contrastive-based objective function that approximates cross entropy loss and present a simple extension to learn the classifier jointly. Second, we propose a new version of the supervised contrastive training that learns jointly the parameters of the classifier and the backbone of the network. We empirically show that these proposed objective functions demonstrate state-of-the-art performance and show a significant improvement over the standard cross entropy loss with more training stability and robustness in various challenging settings.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aljundi, Rahaf and Patel, Yash and Sulc, Milan and Chumerin, Nikolay and Olmeda Reino, Daniel}, year={2023}, month={Jun.}, pages={6675-6683} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25819/25591", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25819", + "pdf_size": 163615, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17835955836763045675&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "toyota-europe.com; ; ; ; ", + "email": "toyota-europe.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;0", + "aff_unique_norm": "Toyota Motor Corporation;Czech Technical University in Prague", + "aff_unique_dep": ";Visual Recognition Group", + "aff_unique_url": "https://www.toyota-europe.com;https://www.cvut.cz", + "aff_unique_abbr": "TME;CTU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Prague", + "aff_country_unique_index": "0;1;1;0;0", + "aff_country_unique": "Europe;Czech Republic" + }, + { + "id": "article-26370", + "title": "Contrastive Identity-Aware Learning for Multi-Agent Value Decomposition", + "track": "main", + "status": "Technical", + "abstract": "Value Decomposition (VD) aims to deduce the contributions of agents for decentralized policies in the presence of only global rewards, and has recently emerged as a powerful credit assignment paradigm for tackling cooperative Multi-Agent Reinforcement Learning (MARL) problems. One of the main challenges in VD is to promote diverse behaviors among agents, while existing methods directly encourage the diversity of learned agent networks with various strategies. However, we argue that these dedicated designs for agent networks are still limited by the indistinguishable VD network, leading to homogeneous agent behaviors and thus downgrading the cooperation capability. In this paper, we propose a novel Contrastive Identity-Aware learning (CIA) method, explicitly boosting the credit-level distinguishability of the VD network to break the bottleneck of multi-agent diversity. Specifically, our approach leverages contrastive learning to maximize the mutual information between the temporal credits and identity representations of different agents, encouraging the full expressiveness of credit assignment and further the emergence of individualities. The algorithm implementation of the proposed CIA module is simple yet effective that can be readily incorporated into various VD architectures. Experiments on the SMAC benchmarks and across different VD backbones demonstrate that the proposed method yields results superior to the state-of-the-art counterparts. Our code is available at https://github.com/liushunyu/CIA.", + "primary_area": "multiagent systems", + "author": "Shunyu Liu; Yihe Zhou; Jie Song; Tongya Zheng; Kaixuan Chen; Tongtian Zhu; Zunlei Feng; Mingli Song", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University", + "bibtex": "@article{Liu_Zhou_Song_Zheng_Chen_Zhu_Feng_Song_2023, title={Contrastive Identity-Aware Learning for Multi-Agent Value Decomposition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26370}, DOI={10.1609/aaai.v37i10.26370}, abstractNote={Value Decomposition (VD) aims to deduce the contributions of agents for decentralized policies in the presence of only global rewards, and has recently emerged as a powerful credit assignment paradigm for tackling cooperative Multi-Agent Reinforcement Learning (MARL) problems. One of the main challenges in VD is to promote diverse behaviors among agents, while existing methods directly encourage the diversity of learned agent networks with various strategies. However, we argue that these dedicated designs for agent networks are still limited by the indistinguishable VD network, leading to homogeneous agent behaviors and thus downgrading the cooperation capability. In this paper, we propose a novel Contrastive Identity-Aware learning (CIA) method, explicitly boosting the credit-level distinguishability of the VD network to break the bottleneck of multi-agent diversity. Specifically, our approach leverages contrastive learning to maximize the mutual information between the temporal credits and identity representations of different agents, encouraging the full expressiveness of credit assignment and further the emergence of individualities. The algorithm implementation of the proposed CIA module is simple yet effective that can be readily incorporated into various VD architectures. Experiments on the SMAC benchmarks and across different VD backbones demonstrate that the proposed method yields results superior to the state-of-the-art counterparts. Our code is available at https://github.com/liushunyu/CIA.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shunyu and Zhou, Yihe and Song, Jie and Zheng, Tongya and Chen, Kaixuan and Zhu, Tongtian and Feng, Zunlei and Song, Mingli}, year={2023}, month={Jun.}, pages={11595-11603} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26370/26142", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26370", + "pdf_size": 752953, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4313626416393408815&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "https://github.com/liushunyu/CIA", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26596", + "title": "Contrastive Learning Reduces Hallucination in Conversations", + "track": "main", + "status": "Technical", + "abstract": "Pre-trained language models (LMs) store knowledge in their parameters and can generate informative responses when used in conversational systems. However, LMs suffer from the problem of \u201challucination:\u201d they may generate plausible-looking statements that are irrelevant or factually incorrect. To address this problem, we propose a contrastive learning scheme, named MixCL. A novel mixed contrastive objective is proposed to explicitly optimize the implicit knowledge elicitation process of LMs, and thus reduce their hallucination in conversations. We also examine negative sampling strategies of retrieved hard negatives and model-generated negatives. We conduct experiments on Wizard-of-Wikipedia, a public, open-domain knowledge-grounded dialogue benchmark, and assess the effectiveness of MixCL. MixCL effectively reduces the hallucination of LMs in conversations and achieves the highest performance among LM-based dialogue agents in terms of relevancy and factuality. We show that MixCL achieves comparable performance to state-of-the-art KB-based approaches while enjoying notable advantages in terms of efficiency and scalability.", + "primary_area": "speech natural language processing", + "author": "Weiwei Sun; Zhengliang Shi; Shen Gao; Pengjie Ren; Maarten de Rijke; Zhaochun Ren", + "authorids": "", + "aff": "Shandong University; Shandong University; Shandong University; Shandong University; University of Amsterdam; Shandong University", + "bibtex": "@article{Sun_Shi_Gao_Ren_de Rijke_Ren_2023, title={Contrastive Learning Reduces Hallucination in Conversations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26596}, DOI={10.1609/aaai.v37i11.26596}, abstractNote={Pre-trained language models (LMs) store knowledge in their parameters and can generate informative responses when used in conversational systems. However, LMs suffer from the problem of \u201challucination:\u201d they may generate plausible-looking statements that are irrelevant or factually incorrect. To address this problem, we propose a contrastive learning scheme, named MixCL. A novel mixed contrastive objective is proposed to explicitly optimize the implicit knowledge elicitation process of LMs, and thus reduce their hallucination in conversations. We also examine negative sampling strategies of retrieved hard negatives and model-generated negatives. We conduct experiments on Wizard-of-Wikipedia, a public, open-domain knowledge-grounded dialogue benchmark, and assess the effectiveness of MixCL. MixCL effectively reduces the hallucination of LMs in conversations and achieves the highest performance among LM-based dialogue agents in terms of relevancy and factuality. We show that MixCL achieves comparable performance to state-of-the-art KB-based approaches while enjoying notable advantages in terms of efficiency and scalability.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Weiwei and Shi, Zhengliang and Gao, Shen and Ren, Pengjie and de Rijke, Maarten and Ren, Zhaochun}, year={2023}, month={Jun.}, pages={13618-13626} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26596/26368", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26596", + "pdf_size": 452989, + "gs_citation": 81, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14720262193267122556&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.sdu.edu.cn;mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;uva.nl;sdu.edu.cn", + "email": "mail.sdu.edu.cn;mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;uva.nl;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Shandong University;University of Amsterdam", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.sdu.edu.cn;https://www.uva.nl", + "aff_unique_abbr": "SDU;UvA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;Netherlands" + }, + { + "id": "article-25887", + "title": "Contrastive Learning with the Feature Reconstruction Amplifier", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning has emerged as one of the most promising self-supervised methods. It can efficiently learn the transferable representations of samples through the instance-level discrimination task. In general, the performance of the contrastive learning method can be further improved by projecting the transferable high-dimensional representations into the low-dimensional feature space. This is because the model can learn more abstract discriminative information. However, when low-dimensional features cannot provide sufficient discriminative information to the model (e.g., the samples are very similar to each other), the existing contrastive learning method will be limited to a great extent. Therefore, in this paper, we propose a general module called the Feature Reconstruction Amplifier (FRA) for adding additional high-dimensional feature information to the model. Specifically, FRA reconstructs the low-dimensional feature embeddings with Gaussian noise vectors and projects them to a high-dimensional reconstruction space. In this reconstruction space, we can add additional feature information through the designed loss. We have verified the effectiveness of the module itself through exhaustive ablation experiments. In addition, we perform linear evaluation and transfer learning on five common visual datasets, the experimental results demonstrate that our method is superior to recent advanced contrastive learning methods.", + "primary_area": "machine learning i", + "author": "Wentao Cui; Liang Bai", + "authorids": "", + "aff": "Key Laboratory of Computational Intelligence and Chinese Information Processing of Ministry of Education, School of Computer and Information Technology, Shanxi University, Taiyuan, Shanxi, China+Institute of Intelligent Information Processing, Shanxi University, Taiyuan, 030006, Shanxi, China; Key Laboratory of Computational Intelligence and Chinese Information Processing of Ministry of Education, School of Computer and Information Technology, Shanxi University, Taiyuan, Shanxi, China+Institute of Intelligent Information Processing, Shanxi University, Taiyuan, 030006, Shanxi, China", + "bibtex": "@article{Cui_Bai_2023, title={Contrastive Learning with the Feature Reconstruction Amplifier}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25887}, DOI={10.1609/aaai.v37i6.25887}, abstractNote={Contrastive learning has emerged as one of the most promising self-supervised methods. It can efficiently learn the transferable representations of samples through the instance-level discrimination task. In general, the performance of the contrastive learning method can be further improved by projecting the transferable high-dimensional representations into the low-dimensional feature space. This is because the model can learn more abstract discriminative information. However, when low-dimensional features cannot provide sufficient discriminative information to the model (e.g., the samples are very similar to each other), the existing contrastive learning method will be limited to a great extent. Therefore, in this paper, we propose a general module called the Feature Reconstruction Amplifier (FRA) for adding additional high-dimensional feature information to the model. Specifically, FRA reconstructs the low-dimensional feature embeddings with Gaussian noise vectors and projects them to a high-dimensional reconstruction space. In this reconstruction space, we can add additional feature information through the designed loss. We have verified the effectiveness of the module itself through exhaustive ablation experiments. In addition, we perform linear evaluation and transfer learning on five common visual datasets, the experimental results demonstrate that our method is superior to recent advanced contrastive learning methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Wentao and Bai, Liang}, year={2023}, month={Jun.}, pages={7279-7287} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25887/25659", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25887", + "pdf_size": 1692594, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14118775848077985245&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "qq.com;sxu.edu.cn", + "email": "qq.com;sxu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "Shanxi University", + "aff_unique_dep": "School of Computer and Information Technology", + "aff_unique_url": "http://www.sxu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Taiyuan", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25373", + "title": "Contrastive Masked Autoencoders for Self-Supervised Video Hashing", + "track": "main", + "status": "Technical", + "abstract": "Self-Supervised Video Hashing (SSVH) models learn to generate short binary representations for videos without ground-truth supervision, facilitating large-scale video retrieval efficiency and attracting increasing research attention. The success of SSVH lies in the understanding of video content and the ability to capture the semantic relation among unlabeled videos. Typically, state-of-the-art SSVH methods consider these two points in a two-stage training pipeline, where they firstly train an auxiliary network by instance-wise mask-and-predict tasks and secondly train a hashing model to preserve the pseudo-neighborhood structure transferred from the auxiliary network. This consecutive training strategy is inflexible and also unnecessary. In this paper, we propose a simple yet effective one-stage SSVH method called ConMH, which incorporates video semantic information and video similarity relationship understanding in a single stage. To capture video semantic information for better hashing learning, we adopt an encoder-decoder structure to reconstruct the video from its temporal-masked frames. Particularly, we find that a higher masking ratio helps video understanding. Besides, we fully exploit the similarity relationship between videos by maximizing agreement between two augmented views of a video, which contributes to more discriminative and robust hash codes. Extensive experiments on three large-scale video datasets (i.e., FCVID, ActivityNet and YFCC) indicate that ConMH achieves state-of-the-art results. Code is available at https://github.com/huangmozhi9527/ConMH.", + "primary_area": "computer vision iii", + "author": "Yuting Wang; Jinpeng Wang; Bin Chen; Ziyun Zeng; Shu-Tao Xia", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University+Research Center of Artificial Intelligence, Peng Cheng Laboratory; Tsinghua Shenzhen International Graduate School, Tsinghua University+Research Center of Artificial Intelligence, Peng Cheng Laboratory; Harbin Institute of Technology, Shenzhen; Tsinghua Shenzhen International Graduate School, Tsinghua University+Research Center of Artificial Intelligence, Peng Cheng Laboratory; Tsinghua Shenzhen International Graduate School, Tsinghua University+Research Center of Artificial Intelligence, Peng Cheng Laboratory", + "bibtex": "@article{Wang_Wang_Chen_Zeng_Xia_2023, title={Contrastive Masked Autoencoders for Self-Supervised Video Hashing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25373}, DOI={10.1609/aaai.v37i3.25373}, abstractNote={Self-Supervised Video Hashing (SSVH) models learn to generate short binary representations for videos without ground-truth supervision, facilitating large-scale video retrieval efficiency and attracting increasing research attention. The success of SSVH lies in the understanding of video content and the ability to capture the semantic relation among unlabeled videos. Typically, state-of-the-art SSVH methods consider these two points in a two-stage training pipeline, where they firstly train an auxiliary network by instance-wise mask-and-predict tasks and secondly train a hashing model to preserve the pseudo-neighborhood structure transferred from the auxiliary network. This consecutive training strategy is inflexible and also unnecessary. In this paper, we propose a simple yet effective one-stage SSVH method called ConMH, which incorporates video semantic information and video similarity relationship understanding in a single stage. To capture video semantic information for better hashing learning, we adopt an encoder-decoder structure to reconstruct the video from its temporal-masked frames. Particularly, we find that a higher masking ratio helps video understanding. Besides, we fully exploit the similarity relationship between videos by maximizing agreement between two augmented views of a video, which contributes to more discriminative and robust hash codes. Extensive experiments on three large-scale video datasets (i.e., FCVID, ActivityNet and YFCC) indicate that ConMH achieves state-of-the-art results. Code is available at https://github.com/huangmozhi9527/ConMH.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yuting and Wang, Jinpeng and Chen, Bin and Zeng, Ziyun and Xia, Shu-Tao}, year={2023}, month={Jun.}, pages={2733-2741} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25373/25145", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25373", + "pdf_size": 4489938, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11395361876465878000&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;hit.edu.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;hit.edu.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "github": "https://github.com/huangmozhi9527/ConMH", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0+1;0+1", + "aff_unique_norm": "Tsinghua University;Peng Cheng Laboratory;Harbin Institute of Technology", + "aff_unique_dep": "International Graduate School;Research Center of Artificial Intelligence;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.pcl.ac.cn;http://en.hhit.edu.cn/", + "aff_unique_abbr": "THU;;HIT", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25424", + "title": "Contrastive Multi-Task Dense Prediction", + "track": "main", + "status": "Technical", + "abstract": "This paper targets the problem of multi-task dense prediction\nwhich aims to achieve simultaneous learning and inference on\na bunch of multiple dense prediction tasks in a single framework. A core objective in design is how to effectively model\ncross-task interactions to achieve a comprehensive improvement on different tasks based on their inherent complementarity and consistency. Existing works typically design extra\nexpensive distillation modules to perform explicit interaction\ncomputations among different task-specific features in both\ntraining and inference, bringing difficulty in adaptation for\ndifferent task sets, and reducing efficiency due to clearly increased size of multi-task models. In contrast, we introduce\nfeature-wise contrastive consistency into modeling the cross-task interactions for multi-task dense prediction. We propose\na novel multi-task contrastive regularization method based on\nthe consistency to effectively boost the representation learning of the different sub-tasks, which can also be easily generalized to different multi-task dense prediction frameworks,\nand costs no additional computation in the inference. Extensive experiments on two challenging datasets (i.e. NYUD-v2\nand Pascal-Context) clearly demonstrate the superiority of the\nproposed multi-task contrastive learning approach for dense\npredictions, establishing new state-of-the-art performances.", + "primary_area": "computer vision iii", + "author": "Siwei Yang; Hanrong Ye; Dan Xu", + "authorids": "", + "aff": "Key Laboratory of Embedded System and Service Computing, Tongji University + Hong Kong University of Science and Technology; Hong Kong University of Science and Technology; Hong Kong University of Science and Technology", + "bibtex": "@article{Yang_Ye_Xu_2023, title={Contrastive Multi-Task Dense Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25424}, DOI={10.1609/aaai.v37i3.25424}, abstractNote={This paper targets the problem of multi-task dense prediction\nwhich aims to achieve simultaneous learning and inference on\na bunch of multiple dense prediction tasks in a single framework. A core objective in design is how to effectively model\ncross-task interactions to achieve a comprehensive improvement on different tasks based on their inherent complementarity and consistency. Existing works typically design extra\nexpensive distillation modules to perform explicit interaction\ncomputations among different task-specific features in both\ntraining and inference, bringing difficulty in adaptation for\ndifferent task sets, and reducing efficiency due to clearly increased size of multi-task models. In contrast, we introduce\nfeature-wise contrastive consistency into modeling the cross-task interactions for multi-task dense prediction. We propose\na novel multi-task contrastive regularization method based on\nthe consistency to effectively boost the representation learning of the different sub-tasks, which can also be easily generalized to different multi-task dense prediction frameworks,\nand costs no additional computation in the inference. Extensive experiments on two challenging datasets (i.e. NYUD-v2\nand Pascal-Context) clearly demonstrate the superiority of the\nproposed multi-task contrastive learning approach for dense\npredictions, establishing new state-of-the-art performances.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Siwei and Ye, Hanrong and Xu, Dan}, year={2023}, month={Jun.}, pages={3190-3197} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25424/25196", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25424", + "pdf_size": 6296873, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8307107432776557193&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;cse.ust.hk;cse.ust.hk", + "email": "gmail.com;cse.ust.hk;cse.ust.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;1", + "aff_unique_norm": "Tongji University;Hong Kong University of Science and Technology", + "aff_unique_dep": "Key Laboratory of Embedded System and Service Computing;", + "aff_unique_url": "http://www.tongji.edu.cn;https://www.ust.hk", + "aff_unique_abbr": ";HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26253", + "title": "Contrastive Open Set Recognition", + "track": "main", + "status": "Technical", + "abstract": "In conventional recognition tasks, models are only trained to recognize learned targets, but it is usually difficult to collect training examples of all potential categories. In the testing phase, when models receive test samples from unknown classes, they mistakenly classify the samples into known classes. Open set recognition (OSR) is a more realistic recognition task, which requires the classifier to detect unknown test samples while keeping a high classification accuracy of known classes. In this paper, we study how to improve the OSR performance of deep neural networks from the perspective of representation learning. We employ supervised contrastive learning to improve the quality of feature representations, propose a new supervised contrastive learning method that enables the model to learn from soft training targets, and design an OSR framework on its basis. With the proposed method, we are able to make use of label smoothing and mixup when training deep neural networks contrastively, so as to improve both the robustness of outlier detection in OSR tasks and the accuracy in conventional classification tasks. We validate our method on multiple benchmark datasets and testing scenarios, achieving experimental results that verify the effectiveness of the proposed method.", + "primary_area": "machine learning iv", + "author": "Baile Xu; Furao Shen; Jian Zhao", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University + Department of Computer Science and Technology, Nanjing University + School of Artificial Intelligence, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University + Department of Computer Science and Technology, Nanjing University + School of Artificial Intelligence, Nanjing University; School of Electronic Science and Engineering, Nanjing University", + "bibtex": "@article{Xu_Shen_Zhao_2023, title={Contrastive Open Set Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26253}, DOI={10.1609/aaai.v37i9.26253}, abstractNote={In conventional recognition tasks, models are only trained to recognize learned targets, but it is usually difficult to collect training examples of all potential categories. In the testing phase, when models receive test samples from unknown classes, they mistakenly classify the samples into known classes. Open set recognition (OSR) is a more realistic recognition task, which requires the classifier to detect unknown test samples while keeping a high classification accuracy of known classes. In this paper, we study how to improve the OSR performance of deep neural networks from the perspective of representation learning. We employ supervised contrastive learning to improve the quality of feature representations, propose a new supervised contrastive learning method that enables the model to learn from soft training targets, and design an OSR framework on its basis. With the proposed method, we are able to make use of label smoothing and mixup when training deep neural networks contrastively, so as to improve both the robustness of outlier detection in OSR tasks and the accuracy in conventional classification tasks. We validate our method on multiple benchmark datasets and testing scenarios, achieving experimental results that verify the effectiveness of the proposed method.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Baile and Shen, Furao and Zhao, Jian}, year={2023}, month={Jun.}, pages={10546-10556} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26253/26025", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26253", + "pdf_size": 3127127, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13538178041494831448&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+0;0+0+0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing University", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0+0+0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25546", + "title": "Contrastive Pre-training with Adversarial Perturbations for Check-In Sequence Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "A core step of mining human mobility data is to learn accurate representations for user-generated check-in sequences. The learned representations should be able to fully describe the spatial-temporal mobility patterns of users and the high-level semantics of traveling. However, existing check-in sequence representation learning is usually implicitly achieved by end-to-end models designed for specific downstream tasks, resulting in unsatisfactory generalizable abilities and poor performance. Besides, although the sequence representation learning models that follow the contrastive learning pre-training paradigm have achieved breakthroughs in many fields like NLP, they fail to simultaneously consider the unique spatial-temporal characteristics of check-in sequences and need manual adjustments on the data augmentation strategies. So, directly applying them to check-in sequences cannot yield a meaningful pretext task. To this end, in this paper we propose a contrastive pre-training model with adversarial perturbations for check-in sequence representation learning (CACSR). Firstly, we design a novel spatial-temporal augmentation block for disturbing the spatial-temporal features of check-in sequences in the latent space to relieve the stress of designing manual data augmentation strategies. Secondly, to construct an effective contrastive pretext task, we generate \u201chard\u201d positive and negative pairs for the check-in sequence by adversarial training. These two designs encourage the model to capture the high-level spatial-temporal patterns and semantics of check-in sequences while ignoring the noisy and unimportant details. We demonstrate the effectiveness and versatility of CACSR on two kinds of downstream tasks using three real-world datasets. The results show that our model outperforms both the state-of-the-art pre-training methods and the end-to-end models.", + "primary_area": "data mining and knowledge management", + "author": "Letian Gong; Youfang Lin; Shengnan Guo; Yan Lin; Tianyi Wang; Erwen Zheng; Zeyu Zhou; Huaiyu Wan", + "authorids": "", + "aff": "School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing, China", + "bibtex": "@article{Gong_Lin_Guo_Lin_Wang_Zheng_Zhou_Wan_2023, title={Contrastive Pre-training with Adversarial Perturbations for Check-In Sequence Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25546}, DOI={10.1609/aaai.v37i4.25546}, abstractNote={A core step of mining human mobility data is to learn accurate representations for user-generated check-in sequences. The learned representations should be able to fully describe the spatial-temporal mobility patterns of users and the high-level semantics of traveling. However, existing check-in sequence representation learning is usually implicitly achieved by end-to-end models designed for specific downstream tasks, resulting in unsatisfactory generalizable abilities and poor performance. Besides, although the sequence representation learning models that follow the contrastive learning pre-training paradigm have achieved breakthroughs in many fields like NLP, they fail to simultaneously consider the unique spatial-temporal characteristics of check-in sequences and need manual adjustments on the data augmentation strategies. So, directly applying them to check-in sequences cannot yield a meaningful pretext task. To this end, in this paper we propose a contrastive pre-training model with adversarial perturbations for check-in sequence representation learning (CACSR). Firstly, we design a novel spatial-temporal augmentation block for disturbing the spatial-temporal features of check-in sequences in the latent space to relieve the stress of designing manual data augmentation strategies. Secondly, to construct an effective contrastive pretext task, we generate \u201chard\u201d positive and negative pairs for the check-in sequence by adversarial training. These two designs encourage the model to capture the high-level spatial-temporal patterns and semantics of check-in sequences while ignoring the noisy and unimportant details. We demonstrate the effectiveness and versatility of CACSR on two kinds of downstream tasks using three real-world datasets. The results show that our model outperforms both the state-of-the-art pre-training methods and the end-to-end models.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gong, Letian and Lin, Youfang and Guo, Shengnan and Lin, Yan and Wang, Tianyi and Zheng, Erwen and Zhou, Zeyu and Wan, Huaiyu}, year={2023}, month={Jun.}, pages={4276-4283} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25546/25318", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25546", + "pdf_size": 2950781, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2054595700655294497&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0+1;0;0;0;0+1", + "aff_unique_norm": "Beijing Jiaotong University;Beijing Key Laboratory of Traffic Data Analysis and Mining", + "aff_unique_dep": "School of Computer and Information Technology;Traffic Data Analysis and Mining", + "aff_unique_url": "http://www.bjtu.edu.cn;", + "aff_unique_abbr": "BJTU;", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0;0;0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26170", + "title": "Contrastive Predictive Autoencoders for Dynamic Point Cloud Self-Supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "We present a new self-supervised paradigm on point cloud sequence understanding. Inspired by the discriminative and generative self-supervised methods, we design two tasks, namely point cloud sequence based Contrastive Prediction and Reconstruction (CPR), to collaboratively learn more comprehensive spatiotemporal representations. Specifically, dense point cloud segments are first input into an encoder to extract embeddings. All but the last ones are then aggregated by a context-aware autoregressor to make predictions for the last target segment. Towards the goal of modeling multi-granularity structures, local and global contrastive learning are performed between predictions and targets. To further improve the generalization of representations, the predictions are also utilized to reconstruct raw point cloud sequences by a decoder, where point cloud colorization is employed to discriminate against different frames. By combining classic contrast and reconstruction paradigms, it makes the learned representations with both global discrimination and local perception. We conduct experiments on four point cloud sequence benchmarks, and report the results on action recognition and gesture recognition under multiple experimental settings. The performances are comparable with supervised methods and show powerful transferability.", + "primary_area": "machine learning iii", + "author": "Xiaoxiao Sheng; Zhiqiang Shen; Gang Xiao", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Sheng_Shen_Xiao_2023, title={Contrastive Predictive Autoencoders for Dynamic Point Cloud Self-Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26170}, DOI={10.1609/aaai.v37i8.26170}, abstractNote={We present a new self-supervised paradigm on point cloud sequence understanding. Inspired by the discriminative and generative self-supervised methods, we design two tasks, namely point cloud sequence based Contrastive Prediction and Reconstruction (CPR), to collaboratively learn more comprehensive spatiotemporal representations. Specifically, dense point cloud segments are first input into an encoder to extract embeddings. All but the last ones are then aggregated by a context-aware autoregressor to make predictions for the last target segment. Towards the goal of modeling multi-granularity structures, local and global contrastive learning are performed between predictions and targets. To further improve the generalization of representations, the predictions are also utilized to reconstruct raw point cloud sequences by a decoder, where point cloud colorization is employed to discriminate against different frames. By combining classic contrast and reconstruction paradigms, it makes the learned representations with both global discrimination and local perception. We conduct experiments on four point cloud sequence benchmarks, and report the results on action recognition and gesture recognition under multiple experimental settings. The performances are comparable with supervised methods and show powerful transferability.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sheng, Xiaoxiao and Shen, Zhiqiang and Xiao, Gang}, year={2023}, month={Jun.}, pages={9802-9810} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26170/25942", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26170", + "pdf_size": 394650, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17799919405134210433&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26733", + "title": "Contrastive Self-Supervised Learning Leads to Higher Adversarial Susceptibility", + "track": "aaai special track", + "status": "Technical", + "abstract": "Contrastive self-supervised learning (CSL) has managed to match or surpass the performance of supervised learning in image and video classification. However, it is still largely unknown if the nature of the representations induced by the two learning paradigms is similar. We investigate this under the lens of adversarial robustness. Our analysis of the problem reveals that CSL has intrinsically higher sensitivity to perturbations over supervised learning. We identify the uniform distribution of data representation over a unit hypersphere in the CSL representation space as the key contributor to this phenomenon. We establish that this is a result of the presence of false negative pairs in the training process, which increases model sensitivity to input perturbations. Our finding is supported by extensive experiments for image and video classification using adversarial perturbations and other input corruptions. We devise a strategy to detect and remove false negative pairs that is simple, yet effective in improving model robustness with CSL training. We close up to 68% of the robustness gap between CSL and its supervised counterpart. Finally, we contribute to adversarial learning by incorporating our method in CSL. We demonstrate an average gain of about 5% over two different state-of-the-art methods in this domain.", + "primary_area": "safe and robust ai", + "author": "Rohit Gupta; Naveed Akhtar; Ajmal Mian; Mubarak Shah", + "authorids": "", + "aff": "Center for Research in Computer Vision, University of Central Florida; University of Western Australia; University of Western Australia; Center for Research in Computer Vision, University of Central Florida", + "bibtex": "@article{Gupta_Akhtar_Mian_Shah_2023, title={Contrastive Self-Supervised Learning Leads to Higher Adversarial Susceptibility}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26733}, DOI={10.1609/aaai.v37i12.26733}, abstractNote={Contrastive self-supervised learning (CSL) has managed to match or surpass the performance of supervised learning in image and video classification. However, it is still largely unknown if the nature of the representations induced by the two learning paradigms is similar. We investigate this under the lens of adversarial robustness. Our analysis of the problem reveals that CSL has intrinsically higher sensitivity to perturbations over supervised learning. We identify the uniform distribution of data representation over a unit hypersphere in the CSL representation space as the key contributor to this phenomenon. We establish that this is a result of the presence of false negative pairs in the training process, which increases model sensitivity to input perturbations. Our finding is supported by extensive experiments for image and video classification using adversarial perturbations and other input corruptions. We devise a strategy to detect and remove false negative pairs that is simple, yet effective in improving model robustness with CSL training. We close up to 68% of the robustness gap between CSL and its supervised counterpart. Finally, we contribute to adversarial learning by incorporating our method in CSL. We demonstrate an average gain of about 5% over two different state-of-the-art methods in this domain.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gupta, Rohit and Akhtar, Naveed and Mian, Ajmal and Shah, Mubarak}, year={2023}, month={Jun.}, pages={14838-14846} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26733/26505", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26733", + "pdf_size": 1045512, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=671208662713767878&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "knights.ucf.edu;uwa.edu.au;uwa.edu.au;crcv.ucf.edu", + "email": "knights.ucf.edu;uwa.edu.au;uwa.edu.au;crcv.ucf.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "University of Central Florida;University of Western Australia", + "aff_unique_dep": "Center for Research in Computer Vision;", + "aff_unique_url": "https://www.ucf.edu;https://www.uwa.edu.au", + "aff_unique_abbr": "UCF;UWA", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Orlando;", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-25360", + "title": "Controllable Image Captioning via Prompting", + "track": "main", + "status": "Technical", + "abstract": "Despite the remarkable progress of image captioning, existing captioners typically lack the controllable capability to generate desired image captions, e.g., describing the image in a rough or detailed manner, in a factual or emotional view, etc. In this paper, we show that a unified model is qualified to perform well in diverse domains and freely switch among multiple styles. Such a controllable capability is achieved by embedding the prompt learning into the image captioning framework. To be specific, we design a set of prompts to fine-tune the pre-trained image captioner. These prompts allow the model to absorb stylized data from different domains for joint training, without performance degradation in each domain. Furthermore, we optimize the prompts with learnable vectors in the continuous word embedding space, avoiding the heuristic prompt engineering and meanwhile exhibiting superior performance. In the inference stage, our model is able to generate desired stylized captions by choosing the corresponding prompts. Extensive experiments verify the controllable capability of the proposed method. Notably, we achieve outstanding performance on two diverse image captioning benchmarks including COCO Karpathy split and TextCaps using a unified model.", + "primary_area": "computer vision ii", + "author": "Ning Wang; Jiahao Xie; Jihao Wu; Mingbo Jia; Linlin Li", + "authorids": "", + "aff": "Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.", + "bibtex": "@article{Wang_Xie_Wu_Jia_Li_2023, title={Controllable Image Captioning via Prompting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25360}, DOI={10.1609/aaai.v37i2.25360}, abstractNote={Despite the remarkable progress of image captioning, existing captioners typically lack the controllable capability to generate desired image captions, e.g., describing the image in a rough or detailed manner, in a factual or emotional view, etc. In this paper, we show that a unified model is qualified to perform well in diverse domains and freely switch among multiple styles. Such a controllable capability is achieved by embedding the prompt learning into the image captioning framework. To be specific, we design a set of prompts to fine-tune the pre-trained image captioner. These prompts allow the model to absorb stylized data from different domains for joint training, without performance degradation in each domain. Furthermore, we optimize the prompts with learnable vectors in the continuous word embedding space, avoiding the heuristic prompt engineering and meanwhile exhibiting superior performance. In the inference stage, our model is able to generate desired stylized captions by choosing the corresponding prompts. Extensive experiments verify the controllable capability of the proposed method. Notably, we achieve outstanding performance on two diverse image captioning benchmarks including COCO Karpathy split and TextCaps using a unified model.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Ning and Xie, Jiahao and Wu, Jihao and Jia, Mingbo and Li, Linlin}, year={2023}, month={Jun.}, pages={2617-2625} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25360/25132", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25360", + "pdf_size": 1814296, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5753282336043475969&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;tongji.edu.cn; fwujihao; jiamingbo;huawei.com", + "email": "mail.ustc.edu.cn;tongji.edu.cn; fwujihao; jiamingbo;huawei.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Huawei", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huawei.com", + "aff_unique_abbr": "Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25345", + "title": "Controlling Class Layout for Deep Ordinal Classification via Constrained Proxies Learning", + "track": "main", + "status": "Technical", + "abstract": "For deep ordinal classification, learning a well-structured feature space specific to ordinal classification is helpful to properly capture the ordinal nature among classes. Intuitively, when Euclidean distance metric is used, an ideal ordinal layout in feature space would be that the sample clusters are arranged in class order along a straight line in space. However, enforcing samples to conform to a specific layout in the feature space is a challenging problem. To address this problem, in this paper, we propose a novel Constrained Proxies Learning (CPL) method, which can learn a proxy for each ordinal class and then adjusts the global layout of classes by constraining these proxies. Specifically, we propose two kinds of strategies: hard layout constraint and soft layout constraint. The hard layout constraint is realized by directly controlling the generation of proxies to force them to be placed in a strict linear layout or semicircular layout (i.e., two instantiations of strict ordinal layout). The soft layout constraint is realized by constraining that the proxy layout should always produce unimodal proxy-to-proxies similarity distribution for each proxy (i.e., to be a relaxed ordinal layout). Experiments show that the proposed CPL method outperforms previous deep ordinal classification methods under the same setting of feature extractor.", + "primary_area": "computer vision ii", + "author": "Cong Wang; Zhiwei Jiang; Yafeng Yin; Zifeng Cheng; Shiping Ge; Qing Gu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Wang_Jiang_Yin_Cheng_Ge_Gu_2023, title={Controlling Class Layout for Deep Ordinal Classification via Constrained Proxies Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25345}, DOI={10.1609/aaai.v37i2.25345}, abstractNote={For deep ordinal classification, learning a well-structured feature space specific to ordinal classification is helpful to properly capture the ordinal nature among classes. Intuitively, when Euclidean distance metric is used, an ideal ordinal layout in feature space would be that the sample clusters are arranged in class order along a straight line in space. However, enforcing samples to conform to a specific layout in the feature space is a challenging problem. To address this problem, in this paper, we propose a novel Constrained Proxies Learning (CPL) method, which can learn a proxy for each ordinal class and then adjusts the global layout of classes by constraining these proxies. Specifically, we propose two kinds of strategies: hard layout constraint and soft layout constraint. The hard layout constraint is realized by directly controlling the generation of proxies to force them to be placed in a strict linear layout or semicircular layout (i.e., two instantiations of strict ordinal layout). The soft layout constraint is realized by constraining that the proxy layout should always produce unimodal proxy-to-proxies similarity distribution for each proxy (i.e., to be a relaxed ordinal layout). Experiments show that the proposed CPL method outperforms previous deep ordinal classification methods under the same setting of feature extractor.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Cong and Jiang, Zhiwei and Yin, Yafeng and Cheng, Zifeng and Ge, Shiping and Gu, Qing}, year={2023}, month={Jun.}, pages={2483-2491} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25345/25117", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25345", + "pdf_size": 1595031, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1660804717967504432&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25456", + "title": "ConvMatch: Rethinking Network Design for Two-View Correspondence Learning", + "track": "main", + "status": "Technical", + "abstract": "Multilayer perceptron (MLP) has been widely used in two-view correspondence learning for only unordered correspondences provided, and it extracts deep features from individual correspondence effectively. However, the problem of lacking context information limits its performance and hence, many extra complex blocks are designed to capture such information in the follow-up studies. In this paper, from a novel perspective, we design a correspondence learning network called ConvMatch that for the first time can leverage convolutional neural network (CNN) as the backbone to capture better context, thus avoiding the complex design of extra blocks. Specifically, with the observation that sparse motion vectors and dense motion field can be converted into each other with interpolating and sampling, we regularize the putative motion vectors by estimating dense motion field implicitly, then rectify the errors caused by outliers in local areas with CNN, and finally obtain correct motion vectors from the rectified motion field. Extensive experiments reveal that ConvMatch with a simple CNN backbone consistently outperforms state-of-the-arts including MLP-based methods for relative pose estimation and homography estimation, and shows promising generalization ability to different datasets and descriptors. Our code is publicly available at https://github.com/SuhZhang/ConvMatch.", + "primary_area": "computer vision iii", + "author": "Shihua Zhang; Jiayi Ma", + "authorids": "", + "aff": "Electronic Information School, Wuhan University, Wuhan 430072, China; Electronic Information School, Wuhan University, Wuhan 430072, China", + "bibtex": "@article{Zhang_Ma_2023, title={ConvMatch: Rethinking Network Design for Two-View Correspondence Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25456}, DOI={10.1609/aaai.v37i3.25456}, abstractNote={Multilayer perceptron (MLP) has been widely used in two-view correspondence learning for only unordered correspondences provided, and it extracts deep features from individual correspondence effectively. However, the problem of lacking context information limits its performance and hence, many extra complex blocks are designed to capture such information in the follow-up studies. In this paper, from a novel perspective, we design a correspondence learning network called ConvMatch that for the first time can leverage convolutional neural network (CNN) as the backbone to capture better context, thus avoiding the complex design of extra blocks. Specifically, with the observation that sparse motion vectors and dense motion field can be converted into each other with interpolating and sampling, we regularize the putative motion vectors by estimating dense motion field implicitly, then rectify the errors caused by outliers in local areas with CNN, and finally obtain correct motion vectors from the rectified motion field. Extensive experiments reveal that ConvMatch with a simple CNN backbone consistently outperforms state-of-the-arts including MLP-based methods for relative pose estimation and homography estimation, and shows promising generalization ability to different datasets and descriptors. Our code is publicly available at https://github.com/SuhZhang/ConvMatch.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Shihua and Ma, Jiayi}, year={2023}, month={Jun.}, pages={3472-3479} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25456/25228", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25456", + "pdf_size": 5235741, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14378812826318884791&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "https://github.com/SuhZhang/ConvMatch", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "Electronic Information School", + "aff_unique_url": "http://www.whu.edu.cn", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26595", + "title": "ConvNTM: Conversational Neural Topic Model", + "track": "main", + "status": "Technical", + "abstract": "Topic models have been thoroughly investigated for multiple years due to their great potential in analyzing and understanding texts. Recently, researchers combine the study of topic models with deep learning techniques, known as Neural Topic Models (NTMs). However, existing NTMs are mainly tested based on general document modeling without considering different textual analysis scenarios. We assume that there are different characteristics to model topics in different textual analysis tasks. In this paper, we propose a Conversational Neural Topic Model (ConvNTM) designed in particular for the conversational scenario. Unlike the general document topic modeling, a conversation session lasts for multiple turns: each short-text utterance complies with a single topic distribution and these topic distributions are dependent across turns. Moreover, there are roles in conversations, a.k.a., speakers and addressees. Topic distributions are partially determined by such roles in conversations. We take these factors into account to model topics in conversations via the multi-turn and multi-role formulation. We also leverage the word co-occurrence relationship as a new training objective to further improve topic quality. Comprehensive experimental results based on the benchmark datasets demonstrate that our proposed ConvNTM achieves the best performance both in topic modeling and in typical downstream tasks within conversational research (i.e., dialogue act classification and dialogue response generation).", + "primary_area": "speech natural language processing", + "author": "Hongda Sun; Quan Tu; Jinpeng Li; Rui Yan", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; Wangxuan Institute of Computer Technology, Peking University; Gaoling School of Artificial Intelligence, Renmin University of China+Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education", + "bibtex": "@article{Sun_Tu_Li_Yan_2023, title={ConvNTM: Conversational Neural Topic Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26595}, DOI={10.1609/aaai.v37i11.26595}, abstractNote={Topic models have been thoroughly investigated for multiple years due to their great potential in analyzing and understanding texts. Recently, researchers combine the study of topic models with deep learning techniques, known as Neural Topic Models (NTMs). However, existing NTMs are mainly tested based on general document modeling without considering different textual analysis scenarios. We assume that there are different characteristics to model topics in different textual analysis tasks. In this paper, we propose a Conversational Neural Topic Model (ConvNTM) designed in particular for the conversational scenario. Unlike the general document topic modeling, a conversation session lasts for multiple turns: each short-text utterance complies with a single topic distribution and these topic distributions are dependent across turns. Moreover, there are roles in conversations, a.k.a., speakers and addressees. Topic distributions are partially determined by such roles in conversations. We take these factors into account to model topics in conversations via the multi-turn and multi-role formulation. We also leverage the word co-occurrence relationship as a new training objective to further improve topic quality. Comprehensive experimental results based on the benchmark datasets demonstrate that our proposed ConvNTM achieves the best performance both in topic modeling and in typical downstream tasks within conversational research (i.e., dialogue act classification and dialogue response generation).}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Hongda and Tu, Quan and Li, Jinpeng and Yan, Rui}, year={2023}, month={Jun.}, pages={13609-13617} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26595/26367", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26595", + "pdf_size": 875922, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3714903711644416034&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;stu.pku.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;stu.pku.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+2", + "aff_unique_norm": "Renmin University of China;Peking University;Ministry of Education", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Wangxuan Institute of Computer Technology;Engineering Research Center of Next-Generation Intelligent Search and Recommendation", + "aff_unique_url": "http://www.ruc.edu.cn;http://www.pku.edu.cn;", + "aff_unique_abbr": "RUC;PKU;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26485", + "title": "Converge to the Truth: Factual Error Correction via Iterative Constrained Editing", + "track": "main", + "status": "Technical", + "abstract": "Given a possibly false claim sentence, how can we automatically correct it with minimal editing? Existing methods either require a large number of pairs of false and corrected claims for supervised training or do not handle well errors spanning over multiple tokens within an utterance. In this paper, we propose VENCE, a novel method for factual error correction (FEC) with minimal edits. VENCE formulates the FEC problem as iterative sampling editing actions with respect to a target density function. We carefully design the target function with predicted truthfulness scores from an offline trained fact verification model. VENCE samples the most probable editing positions based on back-calculated gradients of the truthfulness score concerning input tokens and the editing actions using a distantly-supervised language model (T5). Experiments on a public dataset show that VENCE improves the well-adopted SARI metric by 5.3 (or a relative improvement of 11.8%) over the previous best distantly-supervised methods.", + "primary_area": "speech natural language processing", + "author": "Jiangjie Chen; Rui Xu; Wenxuan Zeng; Changzhi Sun; Lei Li; Yanghua Xiao", + "authorids": "", + "aff": "Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; University of Electronic Science and Technology of China; ByteDance AI Lab; University of California, Santa Barbara; Fudan-Aishu Cognitive Intelligence Joint Research Center", + "bibtex": "@article{Chen_Xu_Zeng_Sun_Li_Xiao_2023, title={Converge to the Truth: Factual Error Correction via Iterative Constrained Editing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26485}, DOI={10.1609/aaai.v37i11.26485}, abstractNote={Given a possibly false claim sentence, how can we automatically correct it with minimal editing? Existing methods either require a large number of pairs of false and corrected claims for supervised training or do not handle well errors spanning over multiple tokens within an utterance. In this paper, we propose VENCE, a novel method for factual error correction (FEC) with minimal edits. VENCE formulates the FEC problem as iterative sampling editing actions with respect to a target density function. We carefully design the target function with predicted truthfulness scores from an offline trained fact verification model. VENCE samples the most probable editing positions based on back-calculated gradients of the truthfulness score concerning input tokens and the editing actions using a distantly-supervised language model (T5). Experiments on a public dataset show that VENCE improves the well-adopted SARI metric by 5.3 (or a relative improvement of 11.8%) over the previous best distantly-supervised methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jiangjie and Xu, Rui and Zeng, Wenxuan and Sun, Changzhi and Li, Lei and Xiao, Yanghua}, year={2023}, month={Jun.}, pages={12616-12625} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26485/26257", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26485", + "pdf_size": 363745, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16395743673985062832&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "fudan.edu.cn;m.fudan.edu.cn;std.uestc.edu.cn;bytedance.com;cs.ucsb.edu;fudan.edu.cn", + "email": "fudan.edu.cn;m.fudan.edu.cn;std.uestc.edu.cn;bytedance.com;cs.ucsb.edu;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;3;0", + "aff_unique_norm": "Fudan University;University of Electronic Science and Technology of China;ByteDance;University of California, Santa Barbara", + "aff_unique_dep": "School of Computer Science;;AI Lab;", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.uestc.edu.cn;https://www.bytedance.com;https://www.ucsb.edu", + "aff_unique_abbr": "Fudan;UESTC;ByteDance;UCSB", + "aff_campus_unique_index": "0;0;2", + "aff_campus_unique": "Shanghai;;Santa Barbara", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26342", + "title": "CoopInit: Initializing Generative Adversarial Networks via Cooperative Learning", + "track": "main", + "status": "Technical", + "abstract": "Numerous research efforts have been made to stabilize the training of the Generative Adversarial Networks (GANs), such as through regularization and architecture design. However, we identify the instability can also arise from the fragile balance at the early stage of adversarial learning. This paper proposes the CoopInit, a simple yet effective cooperative learning-based initialization strategy that can quickly learn a good starting point for GANs, with a very small computation overhead during training. The proposed algorithm consists of two learning stages: (i) Cooperative initialization stage: The discriminator of GAN is treated as an energy-based model (EBM) and is optimized via maximum likelihood estimation (MLE), with the help of the GAN's generator to provide synthetic data to approximate the learning gradients. The EBM also guides the MLE learning of the generator via MCMC teaching; (ii) Adversarial finalization stage: After a few iterations of initialization, the algorithm seamlessly transits to the regular mini-max adversarial training until convergence. The motivation is that the MLE-based initialization stage drives the model towards mode coverage, which is helpful in alleviating the issue of mode dropping during the adversarial learning stage. We demonstrate the effectiveness of the proposed approach on image generation and one-sided unpaired image-to-image translation tasks through extensive experiments.", + "primary_area": "machine learning iv", + "author": "Yang Zhao; Jianwen Xie; Ping Li", + "authorids": "", + "aff": "Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research", + "bibtex": "@article{Zhao_Xie_Li_2023, title={CoopInit: Initializing Generative Adversarial Networks via Cooperative Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26342}, DOI={10.1609/aaai.v37i9.26342}, abstractNote={Numerous research efforts have been made to stabilize the training of the Generative Adversarial Networks (GANs), such as through regularization and architecture design. However, we identify the instability can also arise from the fragile balance at the early stage of adversarial learning. This paper proposes the CoopInit, a simple yet effective cooperative learning-based initialization strategy that can quickly learn a good starting point for GANs, with a very small computation overhead during training. The proposed algorithm consists of two learning stages: (i) Cooperative initialization stage: The discriminator of GAN is treated as an energy-based model (EBM) and is optimized via maximum likelihood estimation (MLE), with the help of the GAN\u2019s generator to provide synthetic data to approximate the learning gradients. The EBM also guides the MLE learning of the generator via MCMC teaching; (ii) Adversarial finalization stage: After a few iterations of initialization, the algorithm seamlessly transits to the regular mini-max adversarial training until convergence. The motivation is that the MLE-based initialization stage drives the model towards mode coverage, which is helpful in alleviating the issue of mode dropping during the adversarial learning stage. We demonstrate the effectiveness of the proposed approach on image generation and one-sided unpaired image-to-image translation tasks through extensive experiments.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yang and Xie, Jianwen and Li, Ping}, year={2023}, month={Jun.}, pages={11345-11353} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26342/26114", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26342", + "pdf_size": 9432796, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10175753810390178261&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Baidu Research", + "aff_unique_dep": "Cognitive Computing Lab", + "aff_unique_url": "https://baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26803", + "title": "Cooperative Multi-Agent Learning in a Complex World: Challenges and Solutions", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Over the past few years, artificial intelligence (AI) has achieved great success in a variety of applications, such as image classification and recommendation systems. This success has often been achieved by training machine learning models on static datasets, where inputs and desired outputs are provided.\nHowever, we are now seeing a shift in this paradigm. Instead of learning from static datasets, machine learning models are increasingly being trained through feedback from their interactions with the world. This is particularly important when machine learning models are deployed in the real world, as their decisions can often have an impact on other agents, turning the decision-making process into a multi-agent problem.\nAs a result, multi-agent learning in complex environments is a critical area of research for the next generation of AI, particularly in the context of cooperative tasks. Cooperative multi-agent learning is an essential problem for practitioners to consider as it has the potential to enable a wide range of multi-agent tasks.\nIn this presentation, we will review the background and challenges of cooperative multi-agent learning, and survey our research that aims to address these challenges.", + "primary_area": "", + "author": "Yali Du", + "authorids": "", + "aff": "Department of Informatics, King\u2019s College London", + "bibtex": "@article{Du_2024, title={Cooperative Multi-Agent Learning in a Complex World: Challenges and Solutions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26803}, DOI={10.1609/aaai.v37i13.26803}, abstractNote={Over the past few years, artificial intelligence (AI) has achieved great success in a variety of applications, such as image classification and recommendation systems. This success has often been achieved by training machine learning models on static datasets, where inputs and desired outputs are provided.\nHowever, we are now seeing a shift in this paradigm. Instead of learning from static datasets, machine learning models are increasingly being trained through feedback from their interactions with the world. This is particularly important when machine learning models are deployed in the real world, as their decisions can often have an impact on other agents, turning the decision-making process into a multi-agent problem.\nAs a result, multi-agent learning in complex environments is a critical area of research for the next generation of AI, particularly in the context of cooperative tasks. Cooperative multi-agent learning is an essential problem for practitioners to consider as it has the potential to enable a wide range of multi-agent tasks.\nIn this presentation, we will review the background and challenges of cooperative multi-agent learning, and survey our research that aims to address these challenges.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Yali}, year={2024}, month={Jul.}, pages={15436-15436} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26803/26575", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26803", + "pdf_size": 47228, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5831150083944043784&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "kcl.ac.uk", + "email": "kcl.ac.uk", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "King\u2019s College London", + "aff_unique_dep": "Department of Informatics", + "aff_unique_url": "https://www.kcl.ac.uk", + "aff_unique_abbr": "KCL", + "aff_campus_unique_index": "0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26182", + "title": "Cooperative and Adversarial Learning: Co-enhancing Discriminability and Transferability in Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Discriminability and transferability are two goals of feature learning for domain adaptation (DA), as we aim to find the transferable features from the source domain that are helpful for discriminating the class label in the target domain. Modern DA approaches optimize discriminability and transferability by adopting two separate modules for the two goals upon a feature extractor, but lack fully exploiting their relationship. This paper argues that by letting the discriminative module and transfer module help each other, better DA can be achieved. We propose Cooperative and Adversarial LEarning (CALE) to combine the optimization of discriminability and transferability into a whole, provide one solution for making the discriminative module and transfer module guide each other. Specifically, CALE generates cooperative (easy) examples and adversarial (hard) examples with both discriminative module and transfer module. While the easy examples that contain the module knowledge can be used to enhance each other, the hard ones are used to enhance the robustness of the corresponding goal. Experimental results show the effectiveness of CALE for unifying the learning of discriminability and transferability, as well as its superior performance.", + "primary_area": "machine learning iii", + "author": "Hui Sun; Zheng Xie; Xin-Ye Li; Ming Li", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Sun_Xie_Li_Li_2023, title={Cooperative and Adversarial Learning: Co-enhancing Discriminability and Transferability in Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26182}, DOI={10.1609/aaai.v37i8.26182}, abstractNote={Discriminability and transferability are two goals of feature learning for domain adaptation (DA), as we aim to find the transferable features from the source domain that are helpful for discriminating the class label in the target domain. Modern DA approaches optimize discriminability and transferability by adopting two separate modules for the two goals upon a feature extractor, but lack fully exploiting their relationship. This paper argues that by letting the discriminative module and transfer module help each other, better DA can be achieved. We propose Cooperative and Adversarial LEarning (CALE) to combine the optimization of discriminability and transferability into a whole, provide one solution for making the discriminative module and transfer module guide each other. Specifically, CALE generates cooperative (easy) examples and adversarial (hard) examples with both discriminative module and transfer module. While the easy examples that contain the module knowledge can be used to enhance each other, the hard ones are used to enhance the robustness of the corresponding goal. Experimental results show the effectiveness of CALE for unifying the learning of discriminability and transferability, as well as its superior performance.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Hui and Xie, Zheng and Li, Xin-Ye and Li, Ming}, year={2023}, month={Jun.}, pages={9909-9917} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26182/25954", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26182", + "pdf_size": 895117, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5530467258747404195&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25263", + "title": "CoordFill: Efficient High-Resolution Image Inpainting via Parameterized Coordinate Querying", + "track": "main", + "status": "Technical", + "abstract": "Image inpainting aims to fill the missing hole of the input. It is hard to solve this task efficiently when facing high-resolution images due to two reasons: (1) Large reception field needs to be handled for high-resolution image inpainting. (2) The general encoder and decoder network synthesizes many background pixels synchronously due to the form of the image matrix. In this paper, we try to break the above limitations for the first time thanks to the recent development of continuous implicit representation. In detail, we down-sample and encode the degraded image to produce the spatial-adaptive parameters for each spatial patch via an attentional Fast Fourier Convolution (FFC)-based parameter generation network. Then, we take these parameters as the weights and biases of a series of multi-layer perceptron (MLP), where the input is the encoded continuous coordinates and the output is the synthesized color value. Thanks to the proposed structure, we only encode the high-resolution image in a relatively low resolution for larger reception field capturing. Then, the continuous position encoding will be helpful to synthesize the photo-realistic high-frequency textures by re-sampling the coordinate in a higher resolution. Also, our framework enables us to query the coordinates of missing pixels only in parallel, yielding a more efficient solution than the previous methods. Experiments show that the proposed method achieves real-time performance on the 2048X2048 images using a single GTX 2080 Ti GPU and can handle 4096X4096 images, with much better performance than existing state-of-the-art methods visually and numerically. The code is available at: https://github.com/NiFangBaAGe/CoordFill.", + "primary_area": "computer vision ii", + "author": "Weihuang Liu; Xiaodong Cun; Chi-Man Pun; Menghan Xia; Yong Zhang; Jue Wang", + "authorids": "", + "aff": "University of Macau; Tencent AI Lab; University of Macau; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab", + "bibtex": "@article{Liu_Cun_Pun_Xia_Zhang_Wang_2023, title={CoordFill: Efficient High-Resolution Image Inpainting via Parameterized Coordinate Querying}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25263}, DOI={10.1609/aaai.v37i2.25263}, abstractNote={Image inpainting aims to fill the missing hole of the input. It is hard to solve this task efficiently when facing high-resolution images due to two reasons: (1) Large reception field needs to be handled for high-resolution image inpainting. (2) The general encoder and decoder network synthesizes many background pixels synchronously due to the form of the image matrix. In this paper, we try to break the above limitations for the first time thanks to the recent development of continuous implicit representation. In detail, we down-sample and encode the degraded image to produce the spatial-adaptive parameters for each spatial patch via an attentional Fast Fourier Convolution (FFC)-based parameter generation network. Then, we take these parameters as the weights and biases of a series of multi-layer perceptron (MLP), where the input is the encoded continuous coordinates and the output is the synthesized color value. Thanks to the proposed structure, we only encode the high-resolution image in a relatively low resolution for larger reception field capturing. Then, the continuous position encoding will be helpful to synthesize the photo-realistic high-frequency textures by re-sampling the coordinate in a higher resolution. Also, our framework enables us to query the coordinates of missing pixels only in parallel, yielding a more efficient solution than the previous methods. Experiments show that the proposed method achieves real-time performance on the 2048X2048 images using a single GTX 2080 Ti GPU and can handle 4096X4096 images, with much better performance than existing state-of-the-art methods visually and numerically. The code is available at: https://github.com/NiFangBaAGe/CoordFill.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Weihuang and Cun, Xiaodong and Pun, Chi-Man and Xia, Menghan and Zhang, Yong and Wang, Jue}, year={2023}, month={Jun.}, pages={1746-1754} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25263/25035", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25263", + "pdf_size": 3047822, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15344782670812529408&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "umac.mo;gmail.com;umac.mo;gmail.com;gmail.com;gmail.com", + "email": "umac.mo;gmail.com;umac.mo;gmail.com;gmail.com;gmail.com", + "github": "https://github.com/NiFangBaAGe/CoordFill", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;1;1;1", + "aff_unique_norm": "University of Macau;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.um.edu.mo;https://ai.tencent.com", + "aff_unique_abbr": "UM;Tencent AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1;1;1", + "aff_country_unique": "Macau;China" + }, + { + "id": "article-26307", + "title": "Coordinate Descent Methods for DC Minimization: Optimality Conditions and Global Convergence", + "track": "main", + "status": "Technical", + "abstract": "Difference-of-Convex (DC) minimization, referring to the problem of minimizing the difference of two convex functions, has been found rich applications in statistical learning and studied extensively for decades. However, existing methods are primarily based on multi-stage convex relaxation, only leading to weak optimality of critical points. This paper proposes a coordinate descent method for minimizing a class of DC functions based on sequential nonconvex approximation. Our approach iteratively solves a nonconvex one-dimensional subproblem globally, and it is guaranteed to converge to a coordinate-wise stationary point. We prove that this new optimality condition is always stronger than the standard critical point condition and directional point condition under a mildlocally bounded nonconvexity assumption. For comparisons, we also include a naive variant of coordinate descent methods based on sequential convex approximation in our study. When the objective function satisfies a globally bounded nonconvexity assumption and Luo-Tseng error bound assumption, coordinate descent methods achieve Q-linear convergence rate. Also, for many applications of interest, we show that the nonconvex one-dimensional subproblem can be computed exactly and efficiently using a breakpoint searching method. Finally, we have conducted extensive experiments on several statistical learning tasks to show the superiority of our approach.", + "primary_area": "machine learning iv", + "author": "Ganzhao Yuan", + "authorids": "", + "aff": "Peng Cheng Laboratory, China", + "bibtex": "@article{Yuan_2023, title={Coordinate Descent Methods for DC Minimization: Optimality Conditions and Global Convergence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26307}, DOI={10.1609/aaai.v37i9.26307}, abstractNote={Difference-of-Convex (DC) minimization, referring to the problem of minimizing the difference of two convex functions, has been found rich applications in statistical learning and studied extensively for decades. However, existing methods are primarily based on multi-stage convex relaxation, only leading to weak optimality of critical points. This paper proposes a coordinate descent method for minimizing a class of DC functions based on sequential nonconvex approximation. Our approach iteratively solves a nonconvex one-dimensional subproblem globally, and it is guaranteed to converge to a coordinate-wise stationary point. We prove that this new optimality condition is always stronger than the standard critical point condition and directional point condition under a mildlocally bounded nonconvexity assumption. For comparisons, we also include a naive variant of coordinate descent methods based on sequential convex approximation in our study. When the objective function satisfies a globally bounded nonconvexity assumption and Luo-Tseng error bound assumption, coordinate descent methods achieve Q-linear convergence rate. Also, for many applications of interest, we show that the nonconvex one-dimensional subproblem can be computed exactly and efficiently using a breakpoint searching method. Finally, we have conducted extensive experiments on several statistical learning tasks to show the superiority of our approach.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Ganzhao}, year={2023}, month={Jun.}, pages={11034-11042} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26307/26079", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26307", + "pdf_size": 474269, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18021185290110158857&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "pcl.ac.cn", + "email": "pcl.ac.cn", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Peng Cheng Laboratory", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "article-25794", + "title": "Copyright-Certified Distillation Dataset: Distilling One Million Coins into One Bitcoin with Your Private Key", + "track": "main", + "status": "Technical", + "abstract": "The rapid development of neural network dataset distillation in recent years has provided new ideas in many areas such as continuous learning, neural network architecture search and privacy preservation. Dataset distillation is a very effective method to distill large training datasets into small data, thus ensuring that the test accuracy of models trained on their synthesized small datasets matches that of models trained on the full dataset. Thus, dataset distillation itself is commercially valuable, not only for reducing training costs, but also for compressing storage costs and significantly reducing the training costs of deep learning. However, copyright protection for dataset distillation has not been proposed yet, so we propose the first method to protect intellectual property by embedding watermarks in the dataset distillation process. Our approach not only popularizes the dataset distillation technique, but also authenticates the ownership of the distilled dataset by the models trained on that distilled dataset.", + "primary_area": "knowledge representation and reasoning", + "author": "Tengjun Liu; Ying Chen; Wanxuan Gu", + "authorids": "", + "aff": "School of Computer Science,Fudan University; School of Computer Science,Fudan University; NVIDIA", + "bibtex": "@article{Liu_Chen_Gu_2023, title={Copyright-Certified Distillation Dataset: Distilling One Million Coins into One Bitcoin with Your Private Key}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25794}, DOI={10.1609/aaai.v37i5.25794}, abstractNote={The rapid development of neural network dataset distillation in recent years has provided new ideas in many areas such as continuous learning, neural network architecture search and privacy preservation. Dataset distillation is a very effective method to distill large training datasets into small data, thus ensuring that the test accuracy of models trained on their synthesized small datasets matches that of models trained on the full dataset. Thus, dataset distillation itself is commercially valuable, not only for reducing training costs, but also for compressing storage costs and significantly reducing the training costs of deep learning. However, copyright protection for dataset distillation has not been proposed yet, so we propose the first method to protect intellectual property by embedding watermarks in the dataset distillation process. Our approach not only popularizes the dataset distillation technique, but also authenticates the ownership of the distilled dataset by the models trained on that distilled dataset.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Tengjun and Chen, Ying and Gu, Wanxuan}, year={2023}, month={Jun.}, pages={6458-6466} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25794/25566", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25794", + "pdf_size": 8942884, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5790743427380721428&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "fudan.edu.com;fudan.edu.com;163.com", + "email": "fudan.edu.com;fudan.edu.com;163.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Fudan University;NVIDIA Corporation", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.nvidia.com", + "aff_unique_abbr": "Fudan;NVIDIA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26395", + "title": "Correct for Whom? Subjectivity and the Evaluation of Personalized Image Aesthetics Assessment Models", + "track": "main", + "status": "Technical", + "abstract": "The problem of image aesthetic quality assessment is surprisingly difficult to define precisely. Most early work attempted to estimate the average aesthetic rating of a group of observers, while some recent work has shifted to an approach based on few-shot personalization. In this paper, we connect few-shot personalization, via Immanuel Kant's concept of disinterested judgment, to an argument from feminist aesthetics about the biased tendencies of objective standards for subjective pleasures. To empirically investigate this philosophical debate, we introduce PR-AADB, a relabeling of the existing AADB dataset with labels for pairs of images, and measure how well the existing groundtruth predicts our new pairwise labels. We find, consistent with the feminist critique, that both the existing groundtruth and few-shot personalized predictions represent some users' preferences significantly better than others, but that it is difficult to predict when and for whom the existing groundtruth will be correct. We thus advise against using benchmark datasets to evaluate models for personalized IAQA, and recommend caution when attempting to account for subjective difference using machine learning more generally.", + "primary_area": "philosophy and ethics of ai", + "author": "Samuel Goree; Weslie Khoo; David J. Crandall", + "authorids": "", + "aff": "Department of Informatics, Indiana University; Department of Computer Science, Indiana University; Department of Computer Science, Indiana University", + "bibtex": "@article{Goree_Khoo_Crandall_2023, title={Correct for Whom? Subjectivity and the Evaluation of Personalized Image Aesthetics Assessment Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26395}, DOI={10.1609/aaai.v37i10.26395}, abstractNote={The problem of image aesthetic quality assessment is surprisingly difficult to define precisely. Most early work attempted to estimate the average aesthetic rating of a group of observers, while some recent work has shifted to an approach based on few-shot personalization. In this paper, we connect few-shot personalization, via Immanuel Kant\u2019s concept of disinterested judgment, to an argument from feminist aesthetics about the biased tendencies of objective standards for subjective pleasures. To empirically investigate this philosophical debate, we introduce PR-AADB, a relabeling of the existing AADB dataset with labels for pairs of images, and measure how well the existing groundtruth predicts our new pairwise labels. We find, consistent with the feminist critique, that both the existing groundtruth and few-shot personalized predictions represent some users\u2019 preferences significantly better than others, but that it is difficult to predict when and for whom the existing groundtruth will be correct. We thus advise against using benchmark datasets to evaluate models for personalized IAQA, and recommend caution when attempting to account for subjective difference using machine learning more generally.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Goree, Samuel and Khoo, Weslie and Crandall, David J.}, year={2023}, month={Jun.}, pages={11818-11827} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26395/26167", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26395", + "pdf_size": 2446391, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10811221954202921951&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "iu.edu;iu.edu;indiana.edu", + "email": "iu.edu;iu.edu;indiana.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indiana University", + "aff_unique_dep": "Department of Informatics", + "aff_unique_url": "https://www.indiana.edu", + "aff_unique_abbr": "IU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26728", + "title": "Correct-by-Construction Reinforcement Learning of Cardiac Pacemakers from Duration Calculus Requirements", + "track": "aaai special track", + "status": "Technical", + "abstract": "As the complexity of pacemaker devices continues to grow, the importance of capturing its functional correctness requirement formally cannot be overestimated. The pacemaker system specification document by \\emph{Boston Scientific} provides a widely accepted set of specifications for pacemakers. \nAs these specifications are written in a natural language, they are not amenable for automated verification, synthesis, or reinforcement learning of pacemaker systems. This paper presents a formalization of these requirements for a dual-chamber pacemaker in \\emph{duration calculus} (DC), a highly expressive real-time specification language.\nThe proposed formalization allows us to automatically translate pacemaker requirements into executable specifications as stopwatch automata, which can be used to enable simulation, monitoring, validation, verification and automatic synthesis of pacemaker systems. \nThe cyclic nature of the pacemaker-heart closed-loop system results in DC requirements that compile to a decidable subclass of stopwatch automata. We present shield reinforcement learning (shield RL), a shield synthesis based reinforcement learning algorithm, by automatically constructing safety envelopes from DC specifications.", + "primary_area": "safe and robust ai", + "author": "Kalyani Dole; Ashutosh Gupta; John Komp; Shankaranarayanan Krishna; Ashutosh Trivedi", + "authorids": "", + "aff": "Indian Institute of Technology Bombay; Indian Institute of Technology Bombay; University of Colorado Boulder; Indian Institute of Technology Bombay; University of Colorado Boulder", + "bibtex": "@article{Dole_Gupta_Komp_Krishna_Trivedi_2023, title={Correct-by-Construction Reinforcement Learning of Cardiac Pacemakers from Duration Calculus Requirements}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26728}, DOI={10.1609/aaai.v37i12.26728}, abstractNote={As the complexity of pacemaker devices continues to grow, the importance of capturing its functional correctness requirement formally cannot be overestimated. The pacemaker system specification document by \\emph{Boston Scientific} provides a widely accepted set of specifications for pacemakers. As these specifications are written in a natural language, they are not amenable for automated verification, synthesis, or reinforcement learning of pacemaker systems. This paper presents a formalization of these requirements for a dual-chamber pacemaker in \\emph{duration calculus} (DC), a highly expressive real-time specification language.\nThe proposed formalization allows us to automatically translate pacemaker requirements into executable specifications as stopwatch automata, which can be used to enable simulation, monitoring, validation, verification and automatic synthesis of pacemaker systems. The cyclic nature of the pacemaker-heart closed-loop system results in DC requirements that compile to a decidable subclass of stopwatch automata. We present shield reinforcement learning (shield RL), a shield synthesis based reinforcement learning algorithm, by automatically constructing safety envelopes from DC specifications.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dole, Kalyani and Gupta, Ashutosh and Komp, John and Krishna, Shankaranarayanan and Trivedi, Ashutosh}, year={2023}, month={Jun.}, pages={14792-14800} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26728/26500", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26728", + "pdf_size": 604479, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6535268620708672032&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "cse.iitb.ac.in;cse.iitb.ac.in;colorado.edu;cse.iitb.ac.in;colorado.edu", + "email": "cse.iitb.ac.in;cse.iitb.ac.in;colorado.edu;cse.iitb.ac.in;colorado.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;1", + "aff_unique_norm": "Indian Institute of Technology Bombay;University of Colorado", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitb.ac.in;https://www.colorado.edu", + "aff_unique_abbr": "IIT Bombay;CU Boulder", + "aff_campus_unique_index": "0;0;1;0;1", + "aff_campus_unique": "Bombay;Boulder", + "aff_country_unique_index": "0;0;1;0;1", + "aff_country_unique": "India;United States" + }, + { + "id": "article-25190", + "title": "Correlation Loss: Enforcing Correlation between Classification and Localization", + "track": "main", + "status": "Technical", + "abstract": "Object detectors are conventionally trained by a weighted sum of classification and localization losses. Recent studies (e.g., predicting IoU with an auxiliary head, Generalized Focal Loss, Rank & Sort Loss) have shown that forcing these two loss terms to interact with each other in non-conventional ways creates a useful inductive bias and improves performance. Inspired by these works, we focus on the correlation between classification and localization and make two main contributions: (i) We provide an analysis about the effects of correlation between classification and localization tasks in object detectors. We identify why correlation affects the performance of various NMS-based and NMS-free detectors, and we devise measures to evaluate the effect of correlation and use them to analyze common detectors. (ii) Motivated by our observations, e.g., that NMS-free detectors can also benefit from correlation, we propose Correlation Loss, a novel plug-in loss function that improves the performance of various object detectors by directly optimizing correlation coefficients: E.g., Correlation Loss on Sparse R-CNN, an NMS-free method, yields 1.6 AP gain on COCO and 1.8 AP gain on Cityscapes dataset. Our best model on Sparse R-CNN reaches 51.0 AP without test-time augmentation on COCO test-dev, reaching state-of-the-art. Code is available at: https://github.com/fehmikahraman/CorrLoss.", + "primary_area": "computer vision i", + "author": "Fehmi Kahraman; Kemal Oksuz; Sinan Kalkan; Emre Akbas", + "authorids": "", + "aff": "Dept. of Computer Engineering, Middle East Technical University (METU), Ankara, Turkey+METU Center for Robotics and Artificial Intelligence (ROMER), Ankara, Turkey; Dept. of Computer Engineering, Middle East Technical University (METU), Ankara, Turkey+METU Center for Robotics and Artificial Intelligence (ROMER), Ankara, Turkey; Dept. of Computer Engineering, Middle East Technical University (METU), Ankara, Turkey; Dept. of Computer Engineering, Middle East Technical University (METU), Ankara, Turkey+METU Center for Robotics and Artificial Intelligence (ROMER), Ankara, Turkey", + "bibtex": "@article{Kahraman_Oksuz_Kalkan_Akbas_2023, title={Correlation Loss: Enforcing Correlation between Classification and Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25190}, DOI={10.1609/aaai.v37i1.25190}, abstractNote={Object detectors are conventionally trained by a weighted sum of classification and localization losses. Recent studies (e.g., predicting IoU with an auxiliary head, Generalized Focal Loss, Rank & Sort Loss) have shown that forcing these two loss terms to interact with each other in non-conventional ways creates a useful inductive bias and improves performance. Inspired by these works, we focus on the correlation between classification and localization and make two main contributions: (i) We provide an analysis about the effects of correlation between classification and localization tasks in object detectors. We identify why correlation affects the performance of various NMS-based and NMS-free detectors, and we devise measures to evaluate the effect of correlation and use them to analyze common detectors. (ii) Motivated by our observations, e.g., that NMS-free detectors can also benefit from correlation, we propose Correlation Loss, a novel plug-in loss function that improves the performance of various object detectors by directly optimizing correlation coefficients: E.g., Correlation Loss on Sparse R-CNN, an NMS-free method, yields 1.6 AP gain on COCO and 1.8 AP gain on Cityscapes dataset. Our best model on Sparse R-CNN reaches 51.0 AP without test-time augmentation on COCO test-dev, reaching state-of-the-art. Code is available at: https://github.com/fehmikahraman/CorrLoss.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kahraman, Fehmi and Oksuz, Kemal and Kalkan, Sinan and Akbas, Emre}, year={2023}, month={Jun.}, pages={1087-1095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25190/24962", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25190", + "pdf_size": 466726, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14611205807039541230&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "metu.edu.tr;metu.edu.tr;metu.edu.tr;metu.edu.tr", + "email": "metu.edu.tr;metu.edu.tr;metu.edu.tr;metu.edu.tr", + "github": "https://github.com/fehmikahraman/CorrLoss", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0;0+0", + "aff_unique_norm": "Middle East Technical University", + "aff_unique_dep": "Dept. of Computer Engineering", + "aff_unique_url": "https://www.metu.edu.tr", + "aff_unique_abbr": "METU", + "aff_campus_unique_index": "0+0;0+0;0;0+0", + "aff_campus_unique": "Ankara", + "aff_country_unique_index": "0+0;0+0;0;0+0", + "aff_country_unique": "Turkey" + }, + { + "id": "article-26215", + "title": "Correspondence-Free Domain Alignment for Unsupervised Cross-Domain Image Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain image retrieval aims at retrieving images across different domains to excavate cross-domain classificatory or correspondence relationships. This paper studies a less-touched problem of cross-domain image retrieval, i.e., unsupervised cross-domain image retrieval, considering the following practical assumptions: (i) no correspondence relationship, and (ii) no category annotations. It is challenging to align and bridge distinct domains without cross-domain correspondence. To tackle the challenge, we present a novel Correspondence-free Domain Alignment (CoDA) method to effectively eliminate the cross-domain gap through In-domain Self-matching Supervision (ISS) and Cross-domain Classifier Alignment (CCA). To be specific, ISS is presented to encapsulate discriminative information into the latent common space by elaborating a novel self-matching supervision mechanism. To alleviate the cross-domain discrepancy, CCA is proposed to align distinct domain-specific classifiers. Thanks to the ISS and CCA, our method could encode the discrimination into the domain-invariant embedding space for unsupervised cross-domain image retrieval. To verify the effectiveness of the proposed method, extensive experiments are conducted on four benchmark datasets compared with six state-of-the-art methods.", + "primary_area": "machine learning iii", + "author": "Xu Wang; Dezhong Peng; Ming Yan; Peng Hu", + "authorids": "", + "aff": "College of Computer Science, Sichuan University, Chengdu, China+Sichuan Zhiqian Technology Co., Ltd, Chengdu, China+Chengdu Ruibei Yingte Information Technology Ltd. Company, Chengdu, China; College of Computer Science, Sichuan University, Chengdu, China+Centre for Frontier AI Research (CFAR), A*STAR, Singapore+Chengdu Ruibei Yingte Information Technology Ltd. Company, Chengdu, China; Centre for Frontier AI Research (CFAR), A*STAR, Singapore; College of Computer Science, Sichuan University, Chengdu, China", + "bibtex": "@article{Wang_Peng_Yan_Hu_2023, title={Correspondence-Free Domain Alignment for Unsupervised Cross-Domain Image Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26215}, DOI={10.1609/aaai.v37i8.26215}, abstractNote={Cross-domain image retrieval aims at retrieving images across different domains to excavate cross-domain classificatory or correspondence relationships. This paper studies a less-touched problem of cross-domain image retrieval, i.e., unsupervised cross-domain image retrieval, considering the following practical assumptions: (i) no correspondence relationship, and (ii) no category annotations. It is challenging to align and bridge distinct domains without cross-domain correspondence. To tackle the challenge, we present a novel Correspondence-free Domain Alignment (CoDA) method to effectively eliminate the cross-domain gap through In-domain Self-matching Supervision (ISS) and Cross-domain Classifier Alignment (CCA). To be specific, ISS is presented to encapsulate discriminative information into the latent common space by elaborating a novel self-matching supervision mechanism. To alleviate the cross-domain discrepancy, CCA is proposed to align distinct domain-specific classifiers. Thanks to the ISS and CCA, our method could encode the discrimination into the domain-invariant embedding space for unsupervised cross-domain image retrieval. To verify the effectiveness of the proposed method, extensive experiments are conducted on four benchmark datasets compared with six state-of-the-art methods.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xu and Peng, Dezhong and Yan, Ming and Hu, Peng}, year={2023}, month={Jun.}, pages={10200-10208} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26215/25987", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26215", + "pdf_size": 3825818, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6705069360508947838&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;scu.edu.cn;gmail.com;gmail.com", + "email": "gmail.com;scu.edu.cn;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+2;0+3+2;3;0", + "aff_unique_norm": "Sichuan University;Sichuan Zhiqian Technology Co., Ltd;Chengdu Ruibei Yingte Information Technology Ltd. Company;A*STAR", + "aff_unique_dep": "College of Computer Science;;;Centre for Frontier AI Research (CFAR)", + "aff_unique_url": "https://www.scu.edu.cn;;;https://www.a-star.edu.sg", + "aff_unique_abbr": "SCU;;;A*STAR", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chengdu;", + "aff_country_unique_index": "0+0+0;0+1+0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26108", + "title": "Corruption-Tolerant Algorithms for Generalized Linear Models", + "track": "main", + "status": "Technical", + "abstract": "This paper presents SVAM (Sequential Variance-Altered MLE), a unified framework for learning generalized linear models under adversarial label corruption in training data. SVAM extends to tasks such as least squares regression, logistic regression, and gamma regression, whereas many existing works on learning with label corruptions focus only on least squares regression. SVAM is based on a novel variance reduction technique that may be of independent interest and works by iteratively solving weighted MLEs over variance-altered versions of the GLM objective. SVAM offers provable model recovery guarantees superior to the state-of-the-art for robust regression even when a constant fraction of training labels are adversarially corrupted. SVAM also empirically outperforms several existing problem-specific techniques for robust regression and classification. Code for SVAM is available at https://github.com/purushottamkar/svam/", + "primary_area": "machine learning iii", + "author": "Bhaskar Mukhoty; Debojyoti Dey; Purushottam Kar", + "authorids": "", + "aff": "Mohamed Bin Zayed University of Artificial Intelligence, Abu Dhabi, UAE; Indian Institute of Technology Kanpur, Uttar Pradesh, India; Indian Institute of Technology Kanpur, Uttar Pradesh, India", + "bibtex": "@article{Mukhoty_Dey_Kar_2023, title={Corruption-Tolerant Algorithms for Generalized Linear Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26108}, DOI={10.1609/aaai.v37i8.26108}, abstractNote={This paper presents SVAM (Sequential Variance-Altered MLE), a unified framework for learning generalized linear models under adversarial label corruption in training data. SVAM extends to tasks such as least squares regression, logistic regression, and gamma regression, whereas many existing works on learning with label corruptions focus only on least squares regression. SVAM is based on a novel variance reduction technique that may be of independent interest and works by iteratively solving weighted MLEs over variance-altered versions of the GLM objective. SVAM offers provable model recovery guarantees superior to the state-of-the-art for robust regression even when a constant fraction of training labels are adversarially corrupted. SVAM also empirically outperforms several existing problem-specific techniques for robust regression and classification. Code for SVAM is available at https://github.com/purushottamkar/svam/}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mukhoty, Bhaskar and Dey, Debojyoti and Kar, Purushottam}, year={2023}, month={Jun.}, pages={9243-9250} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26108/25880", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26108", + "pdf_size": 410903, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7692258852534610497&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mbzuai.ac.ae;cse.iitk.ac.in;cse.iitk.ac.in", + "email": "mbzuai.ac.ae;cse.iitk.ac.in;cse.iitk.ac.in", + "github": "https://github.com/purushottamkar/svam/", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Mohamed Bin Zayed University of Artificial Intelligence;Indian Institute of Technology Kanpur", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.mbzuai.ac.ae;https://www.iitk.ac.in", + "aff_unique_abbr": "MBZUAI;IIT Kanpur", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Abu Dhabi;Kanpur", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "United Arab Emirates;India" + }, + { + "id": "article-26854", + "title": "Cosmic Microwave Background Recovery: A Graph-Based Bayesian Convolutional Network Approach", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The cosmic microwave background (CMB) is a significant source of knowledge about the origin and evolution of our universe. However, observations of the CMB are contaminated by foreground emissions, obscuring the CMB signal and reducing its efficacy in constraining cosmological parameters. We employ deep learning as a data-driven approach to CMB cleaning from multi-frequency full-sky maps. In particular, we develop a graph-based Bayesian convolutional neural network based on the U-Net architecture that predicts cleaned CMB with pixel-wise uncertainty estimates. We demonstrate the potential of this technique on realistic simulated data based on the Planck mission. We show that our model ac- accurately recovers the cleaned CMB sky map and resulting angular power spectrum while identifying regions of uncertainty. Finally, we discuss the current challenges and the path forward for deploying our model for CMB recovery on real observations.", + "primary_area": "emerging applications of ai", + "author": "Jadie Adams; Steven Lu; Krzysztof M. Gorski; Graca Rocha; Kiri L. Wagstaff", + "authorids": "", + "aff": "Jet Propulsion Laboratory, California Institute of Technology; Jet Propulsion Laboratory, California Institute of Technology; Jet Propulsion Laboratory, California Institute of Technology; Jet Propulsion Laboratory, California Institute of Technology; Jet Propulsion Laboratory, California Institute of Technology + Scienti\ufb01c Computing and Imaging Institute, University of Utah", + "bibtex": "@article{Adams_Lu_Gorski_Rocha_Wagstaff_2024, title={Cosmic Microwave Background Recovery: A Graph-Based Bayesian Convolutional Network Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26854}, DOI={10.1609/aaai.v37i13.26854}, abstractNote={The cosmic microwave background (CMB) is a significant source of knowledge about the origin and evolution of our universe. However, observations of the CMB are contaminated by foreground emissions, obscuring the CMB signal and reducing its efficacy in constraining cosmological parameters. We employ deep learning as a data-driven approach to CMB cleaning from multi-frequency full-sky maps. In particular, we develop a graph-based Bayesian convolutional neural network based on the U-Net architecture that predicts cleaned CMB with pixel-wise uncertainty estimates. We demonstrate the potential of this technique on realistic simulated data based on the Planck mission. We show that our model ac- accurately recovers the cleaned CMB sky map and resulting angular power spectrum while identifying regions of uncertainty. Finally, we discuss the current challenges and the path forward for deploying our model for CMB recovery on real observations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Adams, Jadie and Lu, Steven and Gorski, Krzysztof M. and Rocha, Graca and Wagstaff, Kiri L.}, year={2024}, month={Jul.}, pages={15640-15646} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26854/26626", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26854", + "pdf_size": 2685560, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17850259535046112351&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "sci.utah.edu; you.lu; krzysztof.m.gorski; graca.m.rocha;wkiri.com", + "email": "sci.utah.edu; you.lu; krzysztof.m.gorski; graca.m.rocha;wkiri.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "California Institute of Technology;University of Utah", + "aff_unique_dep": "Jet Propulsion Laboratory;Scienti\ufb01c Computing and Imaging Institute", + "aff_unique_url": "https://www.caltech.edu;https://www.sci.utah.edu", + "aff_unique_abbr": "Caltech;U of U SCI", + "aff_campus_unique_index": "0;0;0;0;0+1", + "aff_campus_unique": "Pasadena;Salt Lake City", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25265", + "title": "Counterfactual Dynamics Forecasting \u2013 a New Setting of Quantitative Reasoning", + "track": "main", + "status": "Technical", + "abstract": "Rethinking and introspection are important elements of human intelligence. To mimic these capabilities, counterfactual reasoning has attracted attention of AI researchers recently, which aims to forecast the alternative outcomes for hypothetical scenarios (\u201cwhat-if\u201d). However, most existing approaches focused on qualitative reasoning (e.g., casual-effect relationship). It lacks a well-defined description of the differences between counterfactuals and facts, as well as how these differences evolve over time. This paper defines a new problem formulation - counterfactual dynamics forecasting - which is described in middle-level abstraction under the structural causal models (SCM) framework and derived as ordinary differential equations (ODEs) as low-level quantitative computation. Based on it, we propose a method to infer counterfactual dynamics considering the factual dynamics as demonstration. Moreover, the evolution of differences between facts and counterfactuals are modelled by an explicit temporal component. The experimental results on two dynamical systems demonstrate the effectiveness of the proposed method.", + "primary_area": "computer vision ii", + "author": "Yanzhu Liu; Ying Sun; Joo-Hwee Lim", + "authorids": "", + "aff": "Institute for Infocomm Research (I2R) & Centre for Frontier AI Research (CFAR), A*STAR, Singapore; Institute for Infocomm Research (I2R) & Centre for Frontier AI Research (CFAR), A*STAR, Singapore; Institute for Infocomm Research (I2R) & Centre for Frontier AI Research (CFAR), A*STAR, Singapore", + "bibtex": "@article{Liu_Sun_Lim_2023, title={Counterfactual Dynamics Forecasting \u2013 a New Setting of Quantitative Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25265}, DOI={10.1609/aaai.v37i2.25265}, abstractNote={Rethinking and introspection are important elements of human intelligence. To mimic these capabilities, counterfactual reasoning has attracted attention of AI researchers recently, which aims to forecast the alternative outcomes for hypothetical scenarios (\u201cwhat-if\u201d). However, most existing approaches focused on qualitative reasoning (e.g., casual-effect relationship). It lacks a well-defined description of the differences between counterfactuals and facts, as well as how these differences evolve over time. This paper defines a new problem formulation - counterfactual dynamics forecasting - which is described in middle-level abstraction under the structural causal models (SCM) framework and derived as ordinary differential equations (ODEs) as low-level quantitative computation. Based on it, we propose a method to infer counterfactual dynamics considering the factual dynamics as demonstration. Moreover, the evolution of differences between facts and counterfactuals are modelled by an explicit temporal component. The experimental results on two dynamical systems demonstrate the effectiveness of the proposed method.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yanzhu and Sun, Ying and Lim, Joo-Hwee}, year={2023}, month={Jun.}, pages={1764-1771} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25265/25037", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25265", + "pdf_size": 1228702, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:9hjWskcUSMwJ:scholar.google.com/&scioq=Counterfactual+Dynamics+Forecasting+%E2%80%93+a+New+Setting+of+Quantitative+Reasoning&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "i2r.a-star.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "email": "i2r.a-star.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "A*STAR", + "aff_unique_dep": "Institute for Infocomm Research (I2R)", + "aff_unique_url": "https://www.a-star.edu.sg", + "aff_unique_abbr": "A*STAR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26691", + "title": "Counterfactual Fairness Is Basically Demographic Parity", + "track": "aaai special track", + "status": "Technical", + "abstract": "Making fair decisions is crucial to ethically implementing machine learning algorithms in social settings. In this work, we consider the celebrated definition of counterfactual fairness. We begin by showing that an algorithm which satisfies counterfactual fairness also satisfies demographic parity, a far simpler fairness constraint. Similarly, we show that all algorithms satisfying demographic parity can be trivially modified to satisfy counterfactual fairness. Together, our results indicate that counterfactual fairness is basically equivalent to demographic parity, which has important implications for the growing body of work on counterfactual fairness. We then validate our theoretical findings empirically, analyzing three existing algorithms for counterfactual fairness against three simple benchmarks. We find that two simple benchmark algorithms outperform all three existing algorithms---in terms of fairness, accuracy, and efficiency---on several data sets. Our analysis leads us to formalize a concrete fairness goal: to preserve the order of individuals within protected groups. We believe transparency around the ordering of individuals within protected groups makes fair algorithms more trustworthy. By design, the two simple benchmark algorithms satisfy this goal while the existing algorithms do not.", + "primary_area": "ai for social impact", + "author": "Lucas Rosenblatt; R. Teal Witter", + "authorids": "", + "aff": "New York University; New York University", + "bibtex": "@article{Rosenblatt_Witter_2023, title={Counterfactual Fairness Is Basically Demographic Parity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26691}, DOI={10.1609/aaai.v37i12.26691}, abstractNote={Making fair decisions is crucial to ethically implementing machine learning algorithms in social settings. In this work, we consider the celebrated definition of counterfactual fairness. We begin by showing that an algorithm which satisfies counterfactual fairness also satisfies demographic parity, a far simpler fairness constraint. Similarly, we show that all algorithms satisfying demographic parity can be trivially modified to satisfy counterfactual fairness. Together, our results indicate that counterfactual fairness is basically equivalent to demographic parity, which has important implications for the growing body of work on counterfactual fairness. We then validate our theoretical findings empirically, analyzing three existing algorithms for counterfactual fairness against three simple benchmarks. We find that two simple benchmark algorithms outperform all three existing algorithms---in terms of fairness, accuracy, and efficiency---on several data sets. Our analysis leads us to formalize a concrete fairness goal: to preserve the order of individuals within protected groups. We believe transparency around the ordering of individuals within protected groups makes fair algorithms more trustworthy. By design, the two simple benchmark algorithms satisfy this goal while the existing algorithms do not.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rosenblatt, Lucas and Witter, R. Teal}, year={2023}, month={Jun.}, pages={14461-14469} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26691/26463", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26691", + "pdf_size": 338482, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17050973733224177617&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "nyu.edu;nyu.edu", + "email": "nyu.edu;nyu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26113", + "title": "Counterfactual Learning with General Data-Generating Policies", + "track": "main", + "status": "Technical", + "abstract": "Off-policy evaluation (OPE) attempts to predict the performance of counterfactual policies using log data from a different policy. We extend its applicability by developing an OPE method for a class of both full support and deficient support logging policies in contextual-bandit settings. This class includes deterministic bandit (such as Upper Confidence Bound) as well as deterministic decision-making based on supervised and unsupervised learning. We prove that our method's prediction converges in probability to the true performance of a counterfactual policy as the sample size increases. We validate our method with experiments on partly and entirely deterministic logging policies. Finally, we apply it to evaluate coupon targeting policies by a major online platform and show how to improve the existing policy.", + "primary_area": "machine learning iii", + "author": "Yusuke Narita; Kyohei Okumura; Akihiro Shimizu; Kohei Yata", + "authorids": "", + "aff": "Yale University; Northwestern University; Mercari, Inc.; University of Wisconsin-Madison", + "bibtex": "@article{Narita_Okumura_Shimizu_Yata_2023, title={Counterfactual Learning with General Data-Generating Policies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26113}, DOI={10.1609/aaai.v37i8.26113}, abstractNote={Off-policy evaluation (OPE) attempts to predict the performance of counterfactual policies using log data from a different policy. We extend its applicability by developing an OPE method for a class of both full support and deficient support logging policies in contextual-bandit settings. This class includes deterministic bandit (such as Upper Confidence Bound) as well as deterministic decision-making based on supervised and unsupervised learning. We prove that our method\u2019s prediction converges in probability to the true performance of a counterfactual policy as the sample size increases. We validate our method with experiments on partly and entirely deterministic logging policies. Finally, we apply it to evaluate coupon targeting policies by a major online platform and show how to improve the existing policy.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Narita, Yusuke and Okumura, Kyohei and Shimizu, Akihiro and Yata, Kohei}, year={2023}, month={Jun.}, pages={9286-9293} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26113/25885", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26113", + "pdf_size": 483382, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7777010214860110227&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "yale.edu;u.northwestern.edu;mercari.com;wisc.edu", + "email": "yale.edu;u.northwestern.edu;mercari.com;wisc.edu", + "github": "", + "project": "https://arxiv.org/abs/2212.01925", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Yale University;Northwestern University;Mercari;University of Wisconsin-Madison", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.yale.edu;https://www.northwestern.edu;https://www.mercari.com;https://www.wisc.edu", + "aff_unique_abbr": "Yale;NU;Mercari;UW-Madison", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Madison", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;Japan" + }, + { + "id": "article-26655", + "title": "Counterfactuals for the Future", + "track": "aaai special track", + "status": "Technical", + "abstract": "Counterfactuals are often described as 'retrospective,' focusing on hypothetical alternatives to a realized past. This description relates to an often implicit assumption about the structure and stability of exogenous variables in the system being modeled --- an assumption that is reasonable in many settings where counterfactuals are used. In this work, we consider cases where we might reasonably make a different assumption about exogenous variables; namely, that the exogenous noise terms of each unit do exhibit some unit-specific structure and/or stability. This leads us to a different use of counterfactuals --- a forward-looking rather than retrospective counterfactual. We introduce \"counterfactual treatment choice,\" a type of treatment choice problem that motivates using forward-looking counterfactuals. We then explore how mismatches between interventional versus forward-looking counterfactual approaches to treatment choice, consistent with different assumptions about exogenous noise, can lead to counterintuitive results.", + "primary_area": "ai for social impact", + "author": "Lucius E. J. Bynum; Joshua R. Loftus; Julia Stoyanovich", + "authorids": "", + "aff": "New York University, New York, NY, USA; London School of Economics, London, United Kingdom; New York University, New York, NY, USA", + "bibtex": "@article{Bynum_Loftus_Stoyanovich_2023, title={Counterfactuals for the Future}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26655}, DOI={10.1609/aaai.v37i12.26655}, abstractNote={Counterfactuals are often described as \u2019retrospective,\u2019 focusing on hypothetical alternatives to a realized past. This description relates to an often implicit assumption about the structure and stability of exogenous variables in the system being modeled --- an assumption that is reasonable in many settings where counterfactuals are used. In this work, we consider cases where we might reasonably make a different assumption about exogenous variables; namely, that the exogenous noise terms of each unit do exhibit some unit-specific structure and/or stability. This leads us to a different use of counterfactuals --- a forward-looking rather than retrospective counterfactual. We introduce "counterfactual treatment choice," a type of treatment choice problem that motivates using forward-looking counterfactuals. We then explore how mismatches between interventional versus forward-looking counterfactual approaches to treatment choice, consistent with different assumptions about exogenous noise, can lead to counterintuitive results.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bynum, Lucius E. J. and Loftus, Joshua R. and Stoyanovich, Julia}, year={2023}, month={Jun.}, pages={14144-14152} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26655/26427", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26655", + "pdf_size": 1245506, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1799323831324156952&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "nyu.edu;lse.ac.uk;nyu.edu", + "email": "nyu.edu;lse.ac.uk;nyu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "New York University;London School of Economics", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nyu.edu;https://www.lse.ac.uk", + "aff_unique_abbr": "NYU;LSE", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "New York;London", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-27002", + "title": "Counting Knot Mosaics with ALLSAT (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Knot mosaics are a model of a quantum knot system. A knot mosaic is a m-by-n grid where each location on the grid may contain any of 11 possible tiles such that the final layout has closed loops. Oh et al. proved a recurrence relation of state matrices to count the number of m-by-n knot mosaics. Our contribution is to use ALLSAT solvers to count knot mosaics and to experimentally try different ways to encode the AT MOST ONE constraint in SAT. We plan to use our SAT method as a tool to list knot mosaics of interest for specific classes of knots.", + "primary_area": "", + "author": "Hannah Miller", + "authorids": "", + "aff": "Golisano College of Computing and Information Sciences, Rochester Institute of Technology, Rochester, NY 14623", + "bibtex": "@article{Miller_2024, title={Counting Knot Mosaics with ALLSAT (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27002}, DOI={10.1609/aaai.v37i13.27002}, abstractNote={Knot mosaics are a model of a quantum knot system. A knot mosaic is a m-by-n grid where each location on the grid may contain any of 11 possible tiles such that the final layout has closed loops. Oh et al. proved a recurrence relation of state matrices to count the number of m-by-n knot mosaics. Our contribution is to use ALLSAT solvers to count knot mosaics and to experimentally try different ways to encode the AT MOST ONE constraint in SAT. We plan to use our SAT method as a tool to list knot mosaics of interest for specific classes of knots.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Miller, Hannah}, year={2024}, month={Jul.}, pages={16284-16285} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27002/26774", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27002", + "pdf_size": 100643, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:9xb4tc0Gbs4J:scholar.google.com/&scioq=Counting+Knot+Mosaics+with+ALLSAT+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "mail.rit.edu", + "email": "mail.rit.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "Golisano College of Computing and Information Sciences", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Rochester", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26068", + "title": "Coupling Artificial Neurons in BERT and Biological Neurons in the Human Brain", + "track": "main", + "status": "Technical", + "abstract": "Linking computational natural language processing (NLP) models and neural responses to language in the human brain on the one hand facilitates the effort towards disentangling the neural representations underpinning language perception, on the other hand provides neurolinguistics evidence to evaluate and improve NLP models. Mappings of an NLP model\u2019s representations of and the brain activities evoked by linguistic input are typically deployed to reveal this symbiosis. However, two critical problems limit its advancement: 1) The model\u2019s representations (artificial neurons, ANs) rely on layer-level embeddings and thus lack fine-granularity; 2) The brain activities (biological neurons, BNs) are limited to neural recordings of isolated cortical unit (i.e., voxel/region) and thus lack integrations and interactions among brain functions. To address those problems, in this study, we 1) define ANs with fine-granularity in transformer-based NLP models (BERT in this study) and measure their temporal activations to input text sequences; 2) define BNs as functional brain networks (FBNs) extracted from functional magnetic resonance imaging (fMRI) data to capture functional interactions in the brain; 3) couple ANs and BNs by maximizing the synchronization of their temporal activations. Our experimental results demonstrate 1) The activations of ANs and BNs are significantly synchronized; 2) the ANs carry meaningful linguistic/semantic information and anchor to their BN signatures; 3) the anchored BNs are interpretable in a neurolinguistic context. Overall, our study introduces a novel, general, and effective framework to link transformer-based NLP models and neural activities in response to language and may provide novel insights for future studies such as brain-inspired evaluation and development of NLP models.", + "primary_area": "machine learning ii", + "author": "Xu Liu; Mengyue Zhou; Gaosheng Shi; Yu Du; Lin Zhao; Zihao Wu; David Liu; Tianming Liu; Xintao Hu", + "authorids": "", + "aff": "School of Automation, Northwestern Polytechnical University; School of Automation, Northwestern Polytechnical University; School of Automation, Northwestern Polytechnical University; School of Automation, Northwestern Polytechnical University; School of Computing, University of Georgia; School of Computing, University of Georgia; Athens Academy; School of Computing, University of Georgia; School of Automation, Northwestern Polytechnical University", + "bibtex": "@article{Liu_Zhou_Shi_Du_Zhao_Wu_Liu_Liu_Hu_2023, title={Coupling Artificial Neurons in BERT and Biological Neurons in the Human Brain}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26068}, DOI={10.1609/aaai.v37i7.26068}, abstractNote={Linking computational natural language processing (NLP) models and neural responses to language in the human brain on the one hand facilitates the effort towards disentangling the neural representations underpinning language perception, on the other hand provides neurolinguistics evidence to evaluate and improve NLP models. Mappings of an NLP model\u2019s representations of and the brain activities evoked by linguistic input are typically deployed to reveal this symbiosis. However, two critical problems limit its advancement: 1) The model\u2019s representations (artificial neurons, ANs) rely on layer-level embeddings and thus lack fine-granularity; 2) The brain activities (biological neurons, BNs) are limited to neural recordings of isolated cortical unit (i.e., voxel/region) and thus lack integrations and interactions among brain functions. To address those problems, in this study, we 1) define ANs with fine-granularity in transformer-based NLP models (BERT in this study) and measure their temporal activations to input text sequences; 2) define BNs as functional brain networks (FBNs) extracted from functional magnetic resonance imaging (fMRI) data to capture functional interactions in the brain; 3) couple ANs and BNs by maximizing the synchronization of their temporal activations. Our experimental results demonstrate 1) The activations of ANs and BNs are significantly synchronized; 2) the ANs carry meaningful linguistic/semantic information and anchor to their BN signatures; 3) the anchored BNs are interpretable in a neurolinguistic context. Overall, our study introduces a novel, general, and effective framework to link transformer-based NLP models and neural activities in response to language and may provide novel insights for future studies such as brain-inspired evaluation and development of NLP models.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xu and Zhou, Mengyue and Shi, Gaosheng and Du, Yu and Zhao, Lin and Wu, Zihao and Liu, David and Liu, Tianming and Hu, Xintao}, year={2023}, month={Jun.}, pages={8888-8896} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26068/25840", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26068", + "pdf_size": 2425891, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1186901919304681159&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.nwpu.edu.cn;mail.nwpu.edu.cn;mail.nwpu.edu.cn;mail.nwpu.edu.cn;uga.edu;uga.edu;gmail.com;gmail.com;nwpu.edu.cn", + "email": "mail.nwpu.edu.cn;mail.nwpu.edu.cn;mail.nwpu.edu.cn;mail.nwpu.edu.cn;uga.edu;uga.edu;gmail.com;gmail.com;nwpu.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;1;1;2;1;0", + "aff_unique_norm": "Northwestern Polytechnical University;University of Georgia;Athens Academy", + "aff_unique_dep": "School of Automation;School of Computing;", + "aff_unique_url": "https://www.nwpu.edu.cn;https://www.uga.edu;", + "aff_unique_abbr": "NWPU;UGA;", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Athens", + "aff_country_unique_index": "0;0;0;0;1;1;2;1;0", + "aff_country_unique": "China;United States;Greece" + }, + { + "id": "article-26396", + "title": "Covariate-Shift Generalization via Random Sample Weighting", + "track": "main", + "status": "Technical", + "abstract": "Shifts in the marginal distribution of covariates from training to the test phase, named covariate-shifts, often lead to unstable prediction performance across agnostic testing data, especially under model misspecification. Recent literature on invariant learning attempts to learn an invariant predictor from heterogeneous environments. However, the performance of the learned predictor depends heavily on the availability and quality of provided environments. In this paper, we propose a simple and effective non-parametric method for generating heterogeneous environments via Random Sample Weighting (RSW). Given the training dataset from a single source environment, we randomly generate a set of covariate-determining sample weights and use each weighted training distribution to simulate an environment. We theoretically show that under appropriate conditions, such random sample weighting can produce sufficient heterogeneity to be exploited by common invariance constraints to find the invariant variables for stable prediction under covariate shifts. Extensive experiments on both simulated and real-world datasets clearly validate the effectiveness of our method.", + "primary_area": "philosophy and ethics of ai", + "author": "Yue He; Xinwei Shen; Renzhe Xu; Tong Zhang; Yong Jiang; Wenchao Zou; Peng Cui", + "authorids": "", + "aff": "Tsinghua University; ETH Z\u00fcrich; Tsinghua University; The Hong Kong University of Science and Technology; Tsinghua University; Siemens; Tsinghua University", + "bibtex": "@article{He_Shen_Xu_Zhang_Jiang_Zou_Cui_2023, title={Covariate-Shift Generalization via Random Sample Weighting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26396}, DOI={10.1609/aaai.v37i10.26396}, abstractNote={Shifts in the marginal distribution of covariates from training to the test phase, named covariate-shifts, often lead to unstable prediction performance across agnostic testing data, especially under model misspecification. Recent literature on invariant learning attempts to learn an invariant predictor from heterogeneous environments. However, the performance of the learned predictor depends heavily on the availability and quality of provided environments. In this paper, we propose a simple and effective non-parametric method for generating heterogeneous environments via Random Sample Weighting (RSW). Given the training dataset from a single source environment, we randomly generate a set of covariate-determining sample weights and use each weighted training distribution to simulate an environment. We theoretically show that under appropriate conditions, such random sample weighting can produce sufficient heterogeneity to be exploited by common invariance constraints to find the invariant variables for stable prediction under covariate shifts. Extensive experiments on both simulated and real-world datasets clearly validate the effectiveness of our method.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Yue and Shen, Xinwei and Xu, Renzhe and Zhang, Tong and Jiang, Yong and Zou, Wenchao and Cui, Peng}, year={2023}, month={Jun.}, pages={11828-11836} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26396/26168", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26396", + "pdf_size": 241897, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16505597114190589335&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;stat.math.ethz.ch;gmail.com;tongzhang-ml.org;sz.tsinghua.edu.cn;siemens.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;stat.math.ethz.ch;gmail.com;tongzhang-ml.org;sz.tsinghua.edu.cn;siemens.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;2;0;3;0", + "aff_unique_norm": "Tsinghua University;ETH Z\u00fcrich;Hong Kong University of Science and Technology;Siemens AG", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.ethz.ch;https://www.ust.hk;https://www.siemens.com", + "aff_unique_abbr": "THU;ETHZ;HKUST;Siemens", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;2;0", + "aff_country_unique": "China;Switzerland;Germany" + }, + { + "id": "article-26347", + "title": "CowClip: Reducing CTR Prediction Model Training Time from 12 Hours to 10 Minutes on 1 GPU", + "track": "main", + "status": "Technical", + "abstract": "The click-through rate (CTR) prediction task is to predict whether a user will click on the recommended item. As mind-boggling amounts of data are produced online daily, accelerating CTR prediction model training is critical to ensuring an up-to-date model and reducing the training cost. One approach to increase the training speed is to apply large batch training. However, as shown in computer vision and natural language processing tasks, training with a large batch easily suffers from the loss of accuracy. Our experiments show that previous scaling rules fail in the training of CTR prediction neural networks. To tackle this problem, we first theoretically show that different frequencies of ids make it challenging to scale hyperparameters when scaling the batch size. To stabilize the training process in a large batch size setting, we develop the adaptive Column-wise Clipping (CowClip). It enables an easy and effective scaling rule for the embeddings, which keeps the learning rate unchanged and scales the L2 loss. We conduct extensive experiments with four CTR prediction networks on two real-world datasets and successfully scaled 128 times the original batch size without accuracy loss. In particular, for CTR prediction model DeepFM training on the Criteo dataset, our optimization framework enlarges the batch size from 1K to 128K with over 0.1% AUC improvement and reduces training time from 12 hours to 10 minutes on a single V100 GPU. Our code locates at github.com/bytedance/LargeBatchCTR.", + "primary_area": "machine learning iv", + "author": "Zangwei Zheng; Pengtai Xu; Xuan Zou; Da Tang; Zhen Li; Chenguang Xi; Peng Wu; Leqi Zou; Yijie Zhu; Ming Chen; Xiangzhuo Ding; Fuzhao Xue; Ziheng Qin; Youlong Cheng; Yang You", + "authorids": "", + "aff": "Department of Computer Science, National University of Singapore+Bytedance Inc.; Department of Computer Science, National University of Singapore+Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Bytedance Inc.; Department of Computer Science, National University of Singapore; Department of Computer Science, National University of Singapore; Bytedance Inc.; Department of Computer Science, National University of Singapore", + "bibtex": "@article{Zheng_Xu_Zou_Tang_Li_Xi_Wu_Zou_Zhu_Chen_Ding_Xue_Qin_Cheng_You_2023, title={CowClip: Reducing CTR Prediction Model Training Time from 12 Hours to 10 Minutes on 1 GPU}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26347}, DOI={10.1609/aaai.v37i9.26347}, abstractNote={The click-through rate (CTR) prediction task is to predict whether a user will click on the recommended item. As mind-boggling amounts of data are produced online daily, accelerating CTR prediction model training is critical to ensuring an up-to-date model and reducing the training cost. One approach to increase the training speed is to apply large batch training. However, as shown in computer vision and natural language processing tasks, training with a large batch easily suffers from the loss of accuracy. Our experiments show that previous scaling rules fail in the training of CTR prediction neural networks. To tackle this problem, we first theoretically show that different frequencies of ids make it challenging to scale hyperparameters when scaling the batch size. To stabilize the training process in a large batch size setting, we develop the adaptive Column-wise Clipping (CowClip). It enables an easy and effective scaling rule for the embeddings, which keeps the learning rate unchanged and scales the L2 loss. We conduct extensive experiments with four CTR prediction networks on two real-world datasets and successfully scaled 128 times the original batch size without accuracy loss. In particular, for CTR prediction model DeepFM training on the Criteo dataset, our optimization framework enlarges the batch size from 1K to 128K with over 0.1% AUC improvement and reduces training time from 12 hours to 10 minutes on a single V100 GPU. Our code locates at github.com/bytedance/LargeBatchCTR.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Zangwei and Xu, Pengtai and Zou, Xuan and Tang, Da and Li, Zhen and Xi, Chenguang and Wu, Peng and Zou, Leqi and Zhu, Yijie and Chen, Ming and Ding, Xiangzhuo and Xue, Fuzhao and Qin, Ziheng and Cheng, Youlong and You, Yang}, year={2023}, month={Jun.}, pages={11390-11398} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26347/26119", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26347", + "pdf_size": 588788, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17587732917048482797&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "comp.nus.edu.sg;comp.nus.edu.sg;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;comp.nus.edu.sg;comp.nus.edu.sg;bytedance.com;comp.nus.edu.sg", + "email": "comp.nus.edu.sg;comp.nus.edu.sg;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;comp.nus.edu.sg;comp.nus.edu.sg;bytedance.com;comp.nus.edu.sg", + "github": "github.com/bytedance/LargeBatchCTR", + "project": "", + "author_num": 15, + "aff_unique_index": "0+1;0+1;1;1;1;1;1;1;1;1;1;0;0;1;0", + "aff_unique_norm": "National University of Singapore;Bytedance Inc.", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.bytedance.com", + "aff_unique_abbr": "NUS;Bytedance", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;1;1;1;1;1;1;1;1;1;0;0;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25368", + "title": "Crafting Monocular Cues and Velocity Guidance for Self-Supervised Multi-Frame Depth Learning", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised monocular methods can efficiently learn depth information of weakly textured surfaces or reflective objects. However, the depth accuracy is limited due to the inherent ambiguity in monocular geometric modeling. In contrast, multi-frame depth estimation methods improve depth accuracy thanks to the success of Multi-View Stereo (MVS), which directly makes use of geometric constraints. Unfortunately, MVS often suffers from texture-less regions, non-Lambertian surfaces, and moving objects, especially in real-world video sequences without known camera motion and depth supervision. Therefore, we propose MOVEDepth, which exploits the MOnocular cues and VElocity guidance to improve multi-frame Depth learning. Unlike existing methods that enforce consistency between MVS depth and monocular depth, MOVEDepth boosts multi-frame depth learning by directly addressing the inherent problems of MVS. The key of our approach is to utilize monocular depth as a geometric priority to construct MVS cost volume, and adjust depth candidates of cost volume under the guidance of predicted camera velocity. We further fuse monocular depth and MVS depth by learning uncertainty in the cost volume, which results in a robust depth estimation against ambiguity in multi-view geometry. Extensive experiments show MOVEDepth achieves state-of-the-art performance: Compared with Monodepth2 and PackNet, our method relatively improves the depth accuracy by 20% and 19.8% on the KITTI benchmark. MOVEDepth also generalizes to the more challenging DDAD benchmark, relatively outperforming ManyDepth by 7.2%. The code is available at https://github.com/JeffWang987/MOVEDepth.", + "primary_area": "computer vision iii", + "author": "Xiaofeng Wang; Zheng Zhu; Guan Huang; Xu Chi; Yun Ye; Ziwei Chen; Xingang Wang", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Science; PhiGent Robotics; PhiGent Robotics; PhiGent Robotics; PhiGent Robotics; Southeast University; Institute of Automation, Chinese Academy of Sciences", + "bibtex": "@article{Wang_Zhu_Huang_Chi_Ye_Chen_Wang_2023, title={Crafting Monocular Cues and Velocity Guidance for Self-Supervised Multi-Frame Depth Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25368}, DOI={10.1609/aaai.v37i3.25368}, abstractNote={Self-supervised monocular methods can efficiently learn depth information of weakly textured surfaces or reflective objects. However, the depth accuracy is limited due to the inherent ambiguity in monocular geometric modeling. In contrast, multi-frame depth estimation methods improve depth accuracy thanks to the success of Multi-View Stereo (MVS), which directly makes use of geometric constraints. Unfortunately, MVS often suffers from texture-less regions, non-Lambertian surfaces, and moving objects, especially in real-world video sequences without known camera motion and depth supervision. Therefore, we propose MOVEDepth, which exploits the MOnocular cues and VElocity guidance to improve multi-frame Depth learning. Unlike existing methods that enforce consistency between MVS depth and monocular depth, MOVEDepth boosts multi-frame depth learning by directly addressing the inherent problems of MVS. The key of our approach is to utilize monocular depth as a geometric priority to construct MVS cost volume, and adjust depth candidates of cost volume under the guidance of predicted camera velocity. We further fuse monocular depth and MVS depth by learning uncertainty in the cost volume, which results in a robust depth estimation against ambiguity in multi-view geometry. Extensive experiments show MOVEDepth achieves state-of-the-art performance: Compared with Monodepth2 and PackNet, our method relatively improves the depth accuracy by 20% and 19.8% on the KITTI benchmark. MOVEDepth also generalizes to the more challenging DDAD benchmark, relatively outperforming ManyDepth by 7.2%. The code is available at https://github.com/JeffWang987/MOVEDepth.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xiaofeng and Zhu, Zheng and Huang, Guan and Chi, Xu and Ye, Yun and Chen, Ziwei and Wang, Xingang}, year={2023}, month={Jun.}, pages={2689-2697} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25368/25140", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25368", + "pdf_size": 1874926, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14907205913233285520&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "ia.ac.cn;ieee.org;phigent.ai;phigent.ai;phigent.ai;seu.edu.cn;ia.ac.cn", + "email": "ia.ac.cn;ieee.org;phigent.ai;phigent.ai;phigent.ai;seu.edu.cn;ia.ac.cn", + "github": "https://github.com/JeffWang987/MOVEDepth", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;2;2;2;3;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Science;PhiGent Robotics;Southeast University", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;;", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;;https://www.seu.edu.cn/", + "aff_unique_abbr": "CAS;UCAS;;SEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26662", + "title": "Critical Firms Prediction for Stemming Contagion Risk in Networked-Loans through Graph-Based Deep Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "The networked-loan is major financing support for Micro, Small and Medium-sized Enterprises (MSMEs) in some developing countries. But external shocks may weaken the financial networks' robustness; an accidental default may spread across the network and collapse the whole network. Thus, predicting the critical firms in networked-loans to stem contagion risk and prevent potential systemic financial crises is of crucial significance to the long-term health of inclusive finance and sustainable economic development. Existing approaches in the banking industry dismiss the contagion risk across loan networks and need extensive knowledge with sophisticated financial expertise. Regarding the issues, we propose a novel approach to predict critical firms for stemming contagion risk in the bank industry with deep reinforcement learning integrated with high-order graph message-passing networks. We demonstrate that our approach outperforms the state-of-the-art baselines significantly on the dataset from a large commercial bank. Moreover, we also conducted empirical studies on the real-world loan dataset for risk mitigation. The proposed approach enables financial regulators and risk managers to better track and understands contagion and systemic risk in networked-loans. The superior performance also represents a paradigm shift in addressing the modern challenges in financing support of MSMEs and sustainable economic development.", + "primary_area": "ai for social impact", + "author": "Dawei Cheng; Zhibin Niu; Jianfu Zhang; Yiyi Zhang; Changjun Jiang", + "authorids": "", + "aff": "Department of Computer Science and Technology, Tongji University, Shanghai, China+Key Laboratory of Arti\ufb01cial Intelligence, Ministry of Education, Shanghai, China+Shanghai Arti\ufb01cial Intelligence Laboratory, Shanghai, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China+Key Laboratory of Arti\ufb01cial Intelligence, Ministry of Education, Shanghai, China+Shanghai Arti\ufb01cial Intelligence Laboratory, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China+Key Laboratory of Arti\ufb01cial Intelligence, Ministry of Education, Shanghai, China+Shanghai Arti\ufb01cial Intelligence Laboratory, Shanghai, China; Department of Computer Science and Technology, Tongji University, Shanghai, China+Key Laboratory of Arti\ufb01cial Intelligence, Ministry of Education, Shanghai, China+Shanghai Arti\ufb01cial Intelligence Laboratory, Shanghai, China", + "bibtex": "@article{Cheng_Niu_Zhang_Zhang_Jiang_2023, title={Critical Firms Prediction for Stemming Contagion Risk in Networked-Loans through Graph-Based Deep Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26662}, DOI={10.1609/aaai.v37i12.26662}, abstractNote={The networked-loan is major financing support for Micro, Small and Medium-sized Enterprises (MSMEs) in some developing countries. But external shocks may weaken the financial networks\u2019 robustness; an accidental default may spread across the network and collapse the whole network. Thus, predicting the critical firms in networked-loans to stem contagion risk and prevent potential systemic financial crises is of crucial significance to the long-term health of inclusive finance and sustainable economic development. Existing approaches in the banking industry dismiss the contagion risk across loan networks and need extensive knowledge with sophisticated financial expertise. Regarding the issues, we propose a novel approach to predict critical firms for stemming contagion risk in the bank industry with deep reinforcement learning integrated with high-order graph message-passing networks. We demonstrate that our approach outperforms the state-of-the-art baselines significantly on the dataset from a large commercial bank. Moreover, we also conducted empirical studies on the real-world loan dataset for risk mitigation. The proposed approach enables financial regulators and risk managers to better track and understands contagion and systemic risk in networked-loans. The superior performance also represents a paradigm shift in addressing the modern challenges in financing support of MSMEs and sustainable economic development.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Dawei and Niu, Zhibin and Zhang, Jianfu and Zhang, Yiyi and Jiang, Changjun}, year={2023}, month={Jun.}, pages={14205-14213} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26662/26434", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26662", + "pdf_size": 2257988, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10111320717393130407&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "tongji.edu.cn;tju.edu.cn;sjtu.edu.cn;sjtu.edu.cn;tongji.edu.cn", + "email": "tongji.edu.cn;tju.edu.cn;sjtu.edu.cn;sjtu.edu.cn;tongji.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;3;4+1+2;4+1+2;0+1+2", + "aff_unique_norm": "Tongji University;Key Laboratory of Arti\ufb01cial Intelligence;Shanghai Arti\ufb01cial Intelligence Laboratory;Tianjin University;Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Technology;Ministry of Education;;College of Intelligence and Computing;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.tongji.edu.cn;;https://www.shailab.org;http://www.tju.edu.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "Tongji;;Shanghai AI Lab;Tianjin University;SJTU", + "aff_campus_unique_index": "0+0;2;0+0;0+0;0+0", + "aff_campus_unique": "Shanghai;;Tianjin", + "aff_country_unique_index": "0+0+0;0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25462", + "title": "Cross-Category Highlight Detection via Feature Decomposition and Modality Alignment", + "track": "main", + "status": "Technical", + "abstract": "Learning an autonomous highlight video detector with good transferability across video categories, called Cross-Category Video Highlight Detection(CC-VHD), is crucial for the practical application on video-based media platforms. To tackle this problem, we first propose a framework that treats the CC-VHD as learning category-independent highlight feature representation. Under this framework, we propose a novel module, named Multi-task Feature Decomposition Branch which jointly conducts label prediction, cyclic feature reconstruction, and adversarial feature reconstruction to decompose the video features into two independent components: highlight-related component and category-related component. Besides, we propose to align the visual and audio modalities to one aligned feature space before conducting modality fusion, which has not been considered in previous works. Finally, the extensive experimental results on three challenging public benchmarks validate the efficacy of our paradigm and the superiority over the existing state-of-the-art approaches to video highlight detection.", + "primary_area": "computer vision iii", + "author": "Zhenduo Zhang", + "authorids": "", + "aff": "Platform Technology Department, OVBU, PCG, Tencent, China", + "bibtex": "@article{Zhang_2023, title={Cross-Category Highlight Detection via Feature Decomposition and Modality Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25462}, DOI={10.1609/aaai.v37i3.25462}, abstractNote={Learning an autonomous highlight video detector with good transferability across video categories, called Cross-Category Video Highlight Detection(CC-VHD), is crucial for the practical application on video-based media platforms. To tackle this problem, we first propose a framework that treats the CC-VHD as learning category-independent highlight feature representation. Under this framework, we propose a novel module, named Multi-task Feature Decomposition Branch which jointly conducts label prediction, cyclic feature reconstruction, and adversarial feature reconstruction to decompose the video features into two independent components: highlight-related component and category-related component. Besides, we propose to align the visual and audio modalities to one aligned feature space before conducting modality fusion, which has not been considered in previous works. Finally, the extensive experimental results on three challenging public benchmarks validate the efficacy of our paradigm and the superiority over the existing state-of-the-art approaches to video highlight detection.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhenduo}, year={2023}, month={Jun.}, pages={3525-3533} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25462/25234", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25462", + "pdf_size": 1887375, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LT0Eca1qiHsJ:scholar.google.com/&scioq=Cross-Category+Highlight+Detection+via+Feature+Decomposition+and+Modality+Alignment&hl=en&as_sdt=0,44", + "gs_version_total": 2, + "aff_domain": "163.com", + "email": "163.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "Platform Technology Department", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "article-25583", + "title": "Cross-Domain Adaptative Learning for Online Advertisement Customer Lifetime Value Prediction", + "track": "main", + "status": "Technical", + "abstract": "Accurate estimation of customer lifetime value (LTV), which reflects the potential consumption of a user over a period of time, is crucial for the revenue management of online advertising platforms. However, predicting LTV in real-world applications is not an easy task since the user consumption data is usually insufficient within a specific domain. To tackle this problem, we propose a novel cross-domain adaptative framework (CDAF) to leverage consumption data from different domains. The proposed method is able to simultaneously mitigate the data scarce problem and the distribution gap problem caused by data from different domains. To be specific, our method firstly learns a LTV prediction model from a different but related platform with sufficient data provision. Subsequently, we exploit domain-invariant information to mitigate data scarce problem by minimizing the Wasserstein discrepancy between the encoded user representations of two domains. In addition, we design a dual-predictor schema which not only enhances domain-invariant information in the semantic space but also preserves domain-specific information for accurate target prediction. The proposed framework is evaluated on five datasets collected from real historical data on the advertising platform of Tencent Games. Experimental results verify that the proposed framework is able to significantly improve the LTV prediction performance on this platform. For instance, our method can boost DCNv2 with the improvement of 13.7% in terms of AUC on dataset G2. Code: https://github.com/TL-UESTC/CDAF.", + "primary_area": "data mining and knowledge management", + "author": "Hongzu Su; Zhekai Du; Jingjing Li; Lei Zhu; Ke Lu", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China + Institute of Electronic and Information Engineering of UESTC in Guangdong; Shandong Normal University; University of Electronic Science and Technology of China", + "bibtex": "@article{Su_Du_Li_Zhu_Lu_2023, title={Cross-Domain Adaptative Learning for Online Advertisement Customer Lifetime Value Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25583}, DOI={10.1609/aaai.v37i4.25583}, abstractNote={Accurate estimation of customer lifetime value (LTV), which reflects the potential consumption of a user over a period of time, is crucial for the revenue management of online advertising platforms. However, predicting LTV in real-world applications is not an easy task since the user consumption data is usually insufficient within a specific domain. To tackle this problem, we propose a novel cross-domain adaptative framework (CDAF) to leverage consumption data from different domains. The proposed method is able to simultaneously mitigate the data scarce problem and the distribution gap problem caused by data from different domains. To be specific, our method firstly learns a LTV prediction model from a different but related platform with sufficient data provision. Subsequently, we exploit domain-invariant information to mitigate data scarce problem by minimizing the Wasserstein discrepancy between the encoded user representations of two domains. In addition, we design a dual-predictor schema which not only enhances domain-invariant information in the semantic space but also preserves domain-specific information for accurate target prediction. The proposed framework is evaluated on five datasets collected from real historical data on the advertising platform of Tencent Games. Experimental results verify that the proposed framework is able to significantly improve the LTV prediction performance on this platform. For instance, our method can boost DCNv2 with the improvement of 13.7% in terms of AUC on dataset G2. Code: https://github.com/TL-UESTC/CDAF.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Hongzu and Du, Zhekai and Li, Jingjing and Zhu, Lei and Lu, Ke}, year={2023}, month={Jun.}, pages={4605-4613} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25583/25355", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25583", + "pdf_size": 543706, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16992490203175328887&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "std.uestc.edu.cn;std.uestc.edu.cn;yeah.net;gmail.com;uestc.edu.cn", + "email": "std.uestc.edu.cn;std.uestc.edu.cn;yeah.net;gmail.com;uestc.edu.cn", + "github": "https://github.com/TL-UESTC/CDAF", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+0;1;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Shandong Normal University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;http://www.sdu.edu.cn/", + "aff_unique_abbr": "UESTC;SDU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Guangdong", + "aff_country_unique_index": "0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25615", + "title": "Cross-Domain Few-Shot Graph Classification with a Reinforced Task Coordinator", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain graph few-shot learning attempts to address the prevalent data scarcity issue in graph mining problems. However, the utilization of cross-domain data induces another intractable domain shift issue which severely degrades the generalization ability of cross-domain graph few-shot learning models. The combat with the domain shift issue is hindered due to the coarse utilization of source domains and the ignorance of accessible prompts. To address these challenges, in this paper, we design a novel Cross-domain Task Coordinator to leverage a small set of labeled target domain data as prompt tasks, then model the association and discover the relevance between meta-tasks from the source domain and the prompt tasks. Based on the discovered relevance, our model achieves adaptive task selection and enables the optimization of a graph learner using the selected fine-grained meta-tasks. Extensive experiments conducted on molecular property prediction benchmarks validate the effectiveness of our proposed method by comparing it with state-of-the-art baselines.", + "primary_area": "data mining and knowledge management", + "author": "Qiannan Zhang; Shichao Pei; Qiang Yang; Chuxu Zhang; Nitesh V. Chawla; Xiangliang Zhang", + "authorids": "", + "aff": "King Abdullah University of Science and Technology, Saudi Arabia; University of Notre Dame, USA; King Abdullah University of Science and Technology, Saudi Arabia; Brandeis University, USA + King Abdullah University of Science and Technology, Saudi Arabia; University of Notre Dame, USA; University of Notre Dame, USA + King Abdullah University of Science and Technology, Saudi Arabia", + "bibtex": "@article{Zhang_Pei_Yang_Zhang_Chawla_Zhang_2023, title={Cross-Domain Few-Shot Graph Classification with a Reinforced Task Coordinator}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25615}, DOI={10.1609/aaai.v37i4.25615}, abstractNote={Cross-domain graph few-shot learning attempts to address the prevalent data scarcity issue in graph mining problems. However, the utilization of cross-domain data induces another intractable domain shift issue which severely degrades the generalization ability of cross-domain graph few-shot learning models. The combat with the domain shift issue is hindered due to the coarse utilization of source domains and the ignorance of accessible prompts. To address these challenges, in this paper, we design a novel Cross-domain Task Coordinator to leverage a small set of labeled target domain data as prompt tasks, then model the association and discover the relevance between meta-tasks from the source domain and the prompt tasks. Based on the discovered relevance, our model achieves adaptive task selection and enables the optimization of a graph learner using the selected fine-grained meta-tasks. Extensive experiments conducted on molecular property prediction benchmarks validate the effectiveness of our proposed method by comparing it with state-of-the-art baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Qiannan and Pei, Shichao and Yang, Qiang and Zhang, Chuxu and Chawla, Nitesh V. and Zhang, Xiangliang}, year={2023}, month={Jun.}, pages={4893-4901} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25615/25387", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25615", + "pdf_size": 4341203, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9799592003182881415&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "kaust.edu.sa;nd.edu;kaust.edu.sa;brandeis.edu;nd.edu;nd.edu", + "email": "kaust.edu.sa;nd.edu;kaust.edu.sa;brandeis.edu;nd.edu;nd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2+0;1;1+0", + "aff_unique_norm": "King Abdullah University of Science and Technology;University of Notre Dame;Brandeis University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.kast.kau.edu.sa;https://www.nd.edu;https://www.brandeis.edu", + "aff_unique_abbr": "KAUST;Notre Dame;Brandeis", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1+0;1;1+0", + "aff_country_unique": "Saudi Arabia;United States" + }, + { + "id": "article-25591", + "title": "Cross-Domain Graph Anomaly Detection via Anomaly-Aware Contrastive Alignment", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain graph anomaly detection (CD-GAD) describes the problem of detecting anomalous nodes in an unlabelled target graph using auxiliary, related source graphs with labelled anomalous and normal nodes. Although it presents a promising approach to address the notoriously high false positive issue in anomaly detection, little work has been done in this line of research. There are numerous domain adaptation methods in the literature, but it is difficult to adapt them for GAD due to the unknown distributions of the anomalies and the complex node relations embedded in graph data. To this end, we introduce a novel domain adaptation approach, namely Anomaly-aware Contrastive alignmenT (ACT), for GAD. ACT is designed to jointly optimise: (i) unsupervised contrastive learning of normal representations of nodes in the target graph, and (ii) anomaly-aware one-class alignment that aligns these contrastive node representations and the representations of labelled normal nodes in the source graph, while enforcing significant deviation of the representations of the normal nodes from the labelled anomalous nodes in the source graph. In doing so, ACT effectively transfers anomaly-informed knowledge from the source graph to learn the complex node relations of the normal class for GAD on the target graph without any specification of the anomaly distributions. Extensive experiments on eight CD-GAD settings demonstrate that our approach ACT achieves substantially improved detection performance over 10 state-of-the-art GAD methods. Code is available at https://github.com/QZ-WANG/ACT.", + "primary_area": "data mining and knowledge management", + "author": "Qizhou Wang; Guansong Pang; Mahsa Salehi; Wray Buntine; Christopher Leckie", + "authorids": "", + "aff": "Monash University; Singapore Management University; Monash University; VinUniversity+Monash University; The University of Melbourne", + "bibtex": "@article{Wang_Pang_Salehi_Buntine_Leckie_2023, title={Cross-Domain Graph Anomaly Detection via Anomaly-Aware Contrastive Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25591}, DOI={10.1609/aaai.v37i4.25591}, abstractNote={Cross-domain graph anomaly detection (CD-GAD) describes the problem of detecting anomalous nodes in an unlabelled target graph using auxiliary, related source graphs with labelled anomalous and normal nodes. Although it presents a promising approach to address the notoriously high false positive issue in anomaly detection, little work has been done in this line of research. There are numerous domain adaptation methods in the literature, but it is difficult to adapt them for GAD due to the unknown distributions of the anomalies and the complex node relations embedded in graph data. To this end, we introduce a novel domain adaptation approach, namely Anomaly-aware Contrastive alignmenT (ACT), for GAD. ACT is designed to jointly optimise: (i) unsupervised contrastive learning of normal representations of nodes in the target graph, and (ii) anomaly-aware one-class alignment that aligns these contrastive node representations and the representations of labelled normal nodes in the source graph, while enforcing significant deviation of the representations of the normal nodes from the labelled anomalous nodes in the source graph. In doing so, ACT effectively transfers anomaly-informed knowledge from the source graph to learn the complex node relations of the normal class for GAD on the target graph without any specification of the anomaly distributions. Extensive experiments on eight CD-GAD settings demonstrate that our approach ACT achieves substantially improved detection performance over 10 state-of-the-art GAD methods. Code is available at https://github.com/QZ-WANG/ACT.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Qizhou and Pang, Guansong and Salehi, Mahsa and Buntine, Wray and Leckie, Christopher}, year={2023}, month={Jun.}, pages={4676-4684} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25591/25363", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25591", + "pdf_size": 966022, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1274726189091139404&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "monash.edu;smu.edu.sg;monash.edu;monash.edu;unimelb.edu.au", + "email": "monash.edu;smu.edu.sg;monash.edu;monash.edu;unimelb.edu.au", + "github": "https://github.com/QZ-WANG/ACT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2+0;3", + "aff_unique_norm": "Monash University;Singapore Management University;VinUniversity;University of Melbourne", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.monash.edu;https://www.smu.edu.sg;https://vinuni.edu.vn;https://www.unimelb.edu.au", + "aff_unique_abbr": "Monash;SMU;VinUni;UniMelb", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2+0;0", + "aff_country_unique": "Australia;Singapore;Vietnam" + }, + { + "id": "article-25400", + "title": "Cross-Modal Contrastive Learning for Domain Adaptation in 3D Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Domain adaptation for 3D point cloud has attracted a lot of interest since it can avoid the time-consuming labeling process of 3D data to some extent. A recent work named xMUDA leveraged multi-modal data to domain adaptation task of 3D semantic segmentation by mimicking the predictions between 2D and 3D modalities, and outperformed the previous single modality methods only using point clouds. Based on it, in this paper, we propose a novel cross-modal contrastive learning scheme to further improve the adaptation effects. By employing constraints from the correspondences between 2D pixel features and 3D point features, our method not only facilitates interaction between the two different modalities, but also boosts feature representations in both labeled source domain and unlabeled target domain. Meanwhile, to sufficiently utilize 2D context information for domain adaptation through cross-modal learning, we introduce a neighborhood feature aggregation module to enhance pixel features. The module employs neighborhood attention to aggregate nearby pixels in the 2D image, which relieves the mismatching between the two different modalities, arising from projecting relative sparse point cloud to dense image pixels. We evaluate our method on three unsupervised domain adaptation scenarios, including country-to-country, day-to-night, and dataset-to-dataset. Experimental results show that our approach outperforms existing methods, which demonstrates the effectiveness of the proposed method.", + "primary_area": "computer vision iii", + "author": "Bowei Xing; Xianghua Ying; Ruibin Wang; Jinfa Yang; Taiyan Chen", + "authorids": "", + "aff": "Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University", + "bibtex": "@article{Xing_Ying_Wang_Yang_Chen_2023, title={Cross-Modal Contrastive Learning for Domain Adaptation in 3D Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25400}, DOI={10.1609/aaai.v37i3.25400}, abstractNote={Domain adaptation for 3D point cloud has attracted a lot of interest since it can avoid the time-consuming labeling process of 3D data to some extent. A recent work named xMUDA leveraged multi-modal data to domain adaptation task of 3D semantic segmentation by mimicking the predictions between 2D and 3D modalities, and outperformed the previous single modality methods only using point clouds. Based on it, in this paper, we propose a novel cross-modal contrastive learning scheme to further improve the adaptation effects. By employing constraints from the correspondences between 2D pixel features and 3D point features, our method not only facilitates interaction between the two different modalities, but also boosts feature representations in both labeled source domain and unlabeled target domain. Meanwhile, to sufficiently utilize 2D context information for domain adaptation through cross-modal learning, we introduce a neighborhood feature aggregation module to enhance pixel features. The module employs neighborhood attention to aggregate nearby pixels in the 2D image, which relieves the mismatching between the two different modalities, arising from projecting relative sparse point cloud to dense image pixels. We evaluate our method on three unsupervised domain adaptation scenarios, including country-to-country, day-to-night, and dataset-to-dataset. Experimental results show that our approach outperforms existing methods, which demonstrates the effectiveness of the proposed method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xing, Bowei and Ying, Xianghua and Wang, Ruibin and Yang, Jinfa and Chen, Taiyan}, year={2023}, month={Jun.}, pages={2974-2982} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25400/25172", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25400", + "pdf_size": 1879205, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7093488292201856589&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Intelligence Science and Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "Peking University", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26525", + "title": "Cross-Modal Distillation for Speaker Recognition", + "track": "main", + "status": "Technical", + "abstract": "Speaker recognition achieved great progress recently, however, it is not easy or efficient to further improve its performance via traditional solutions: collecting more data and designing new neural networks. Aiming at the fundamental challenge of speech data, i.e. low information density, multimodal learning can mitigate this challenge by introducing richer and more discriminative information as input for identity recognition. Specifically, since the face image is more discriminative than the speech for identity recognition, we conduct multimodal learning by introducing a face recognition model (teacher) to transfer discriminative knowledge to a speaker recognition model (student) during training. However, this knowledge transfer via distillation is not trivial because the big domain gap between face and speech can easily lead to overfitting. In this work, we introduce a multimodal learning framework, VGSR (Vision-Guided Speaker Recognition). Specifically, we propose a MKD (Margin-based Knowledge Distillation) strategy for cross-modality distillation by introducing a loose constrain to align the teacher and student, greatly reducing overfitting. Our MKD strategy can easily adapt to various existing knowledge distillation methods. In addition, we propose a QAW (Quality-based Adaptive Weights) module to weight input samples via quantified data quality, leading to a robust model training. Experimental results on the VoxCeleb1 and CN-Celeb datasets show our proposed strategies can effectively improve the accuracy of speaker recognition by a margin of 10% \u223c 15%, and our methods are very robust to different noises.", + "primary_area": "speech natural language processing", + "author": "Yufeng Jin; Guosheng Hu; Haonan Chen; Duoqian Miao; Liang Hu; Cairong Zhao", + "authorids": "", + "aff": "School of Electronic and Information Engineering, Tongji University, China; Oosto, UK; Alibaba Group, China; School of Electronic and Information Engineering, Tongji University, China; School of Electronic and Information Engineering, Tongji University, China; School of Electronic and Information Engineering, Tongji University, China", + "bibtex": "@article{Jin_Hu_Chen_Miao_Hu_Zhao_2023, title={Cross-Modal Distillation for Speaker Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26525}, DOI={10.1609/aaai.v37i11.26525}, abstractNote={Speaker recognition achieved great progress recently, however, it is not easy or efficient to further improve its performance via traditional solutions: collecting more data and designing new neural networks. Aiming at the fundamental challenge of speech data, i.e. low information density, multimodal learning can mitigate this challenge by introducing richer and more discriminative information as input for identity recognition. Specifically, since the face image is more discriminative than the speech for identity recognition, we conduct multimodal learning by introducing a face recognition model (teacher) to transfer discriminative knowledge to a speaker recognition model (student) during training. However, this knowledge transfer via distillation is not trivial because the big domain gap between face and speech can easily lead to overfitting. In this work, we introduce a multimodal learning framework, VGSR (Vision-Guided Speaker Recognition). Specifically, we propose a MKD (Margin-based Knowledge Distillation) strategy for cross-modality distillation by introducing a loose constrain to align the teacher and student, greatly reducing overfitting. Our MKD strategy can easily adapt to various existing knowledge distillation methods. In addition, we propose a QAW (Quality-based Adaptive Weights) module to weight input samples via quantified data quality, leading to a robust model training. Experimental results on the VoxCeleb1 and CN-Celeb datasets show our proposed strategies can effectively improve the accuracy of speaker recognition by a margin of 10% \u223c 15%, and our methods are very robust to different noises.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Yufeng and Hu, Guosheng and Chen, Haonan and Miao, Duoqian and Hu, Liang and Zhao, Cairong}, year={2023}, month={Jun.}, pages={12977-12985} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26525/26297", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26525", + "pdf_size": 1064345, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2062367171308482650&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tongji.edu.cn;gmail.com;alibaba-inc.com;tongji.edu.cn;gmail.com;tongji.edu.cn", + "email": "tongji.edu.cn;gmail.com;alibaba-inc.com;tongji.edu.cn;gmail.com;tongji.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;0", + "aff_unique_norm": "Tongji University;Oosto;Alibaba Group", + "aff_unique_dep": "School of Electronic and Information Engineering;;", + "aff_unique_url": "https://www.tongji.edu.cn;;https://www.alibaba.com", + "aff_unique_abbr": "Tongji;;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25093", + "title": "Cross-Modal Label Contrastive Learning for Unsupervised Audio-Visual Event Localization", + "track": "main", + "status": "Technical", + "abstract": "This paper for the first time explores audio-visual event localization in an unsupervised manner. Previous methods tackle this problem in a supervised setting and require segment-level or video-level event category ground-truth to train the model. However, building large-scale multi-modality datasets with category annotations is human-intensive and thus not scalable to real-world applications. To this end, we propose cross-modal label contrastive learning to exploit multi-modal information among unlabeled audio and visual streams as self-supervision signals. At the feature representation level, multi-modal representations are collaboratively learned from audio and visual components by using self-supervised representation learning. At the label level, we propose a novel self-supervised pretext task i.e. label contrasting to self-annotate videos with pseudo-labels for localization model training. Note that irrelevant background would hinder the acquisition of high-quality pseudo-labels and thus lead to an inferior localization model. To address this issue, we then propose an expectation-maximization algorithm that optimizes the pseudo-label acquisition and localization model in a coarse-to-fine manner. Extensive experiments demonstrate that our unsupervised approach performs reasonably well compared to the state-of-the-art supervised methods.", + "primary_area": "computer vision i", + "author": "Peijun Bao; Wenhan Yang; Boon Poh Ng; Meng Hwa Er; Alex C. Kot", + "authorids": "", + "aff": "Nanyang Technological University; Nanyang Technological University + Peng Cheng Laboratory; Nanyang Technological University; Nanyang Technological University; Nanyang Technological University", + "bibtex": "@article{Bao_Yang_Ng_Er_Kot_2023, title={Cross-Modal Label Contrastive Learning for Unsupervised Audio-Visual Event Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25093}, DOI={10.1609/aaai.v37i1.25093}, abstractNote={This paper for the first time explores audio-visual event localization in an unsupervised manner. Previous methods tackle this problem in a supervised setting and require segment-level or video-level event category ground-truth to train the model. However, building large-scale multi-modality datasets with category annotations is human-intensive and thus not scalable to real-world applications. To this end, we propose cross-modal label contrastive learning to exploit multi-modal information among unlabeled audio and visual streams as self-supervision signals. At the feature representation level, multi-modal representations are collaboratively learned from audio and visual components by using self-supervised representation learning. At the label level, we propose a novel self-supervised pretext task i.e. label contrasting to self-annotate videos with pseudo-labels for localization model training. Note that irrelevant background would hinder the acquisition of high-quality pseudo-labels and thus lead to an inferior localization model. To address this issue, we then propose an expectation-maximization algorithm that optimizes the pseudo-label acquisition and localization model in a coarse-to-fine manner. Extensive experiments demonstrate that our unsupervised approach performs reasonably well compared to the state-of-the-art supervised methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bao, Peijun and Yang, Wenhan and Ng, Boon Poh and Er, Meng Hwa and Kot, Alex C.}, year={2023}, month={Jun.}, pages={215-222} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25093/24865", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25093", + "pdf_size": 3499309, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17409378577769838396&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "e.ntu.edu.sg;pcl.ac.cn;ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "email": "e.ntu.edu.sg;pcl.ac.cn;ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;0", + "aff_unique_norm": "Nanyang Technological University;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ntu.edu.sg;http://www.pcl.ac.cn", + "aff_unique_abbr": "NTU;PCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0;0;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25250", + "title": "Cross-Modality Earth Mover\u2019s Distance for Visible Thermal Person Re-identification", + "track": "main", + "status": "Technical", + "abstract": "Visible thermal person re-identification (VT-ReID) suffers from inter-modality discrepancy and intra-identity variations. Distribution alignment is a popular solution for VT-ReID, however, it is usually restricted to the influence of the intra-identity variations. In this paper, we propose the Cross-Modality Earth Mover's Distance (CM-EMD) that can alleviate the impact of the intra-identity variations during modality alignment. CM-EMD selects an optimal transport strategy and assigns high weights to pairs that have a smaller intra-identity variation. In this manner, the model will focus on reducing the inter-modality discrepancy while paying less attention to intra-identity variations, leading to a more effective modality alignment. Moreover, we introduce two techniques to improve the advantage of CM-EMD. First, Cross-Modality Discrimination Learning (CM-DL) is designed to overcome the discrimination degradation problem caused by modality alignment. By reducing the ratio between intra-identity and inter-identity variances, CM-DL leads the model to learn more discriminative representations. Second, we construct the Multi-Granularity Structure (MGS), enabling us to align modalities from both coarse- and fine-grained levels with the proposed CM-EMD. Extensive experiments show the benefits of the proposed CM-EMD and its auxiliary techniques (CM-DL and MGS). Our method achieves state-of-the-art performance on two VT-ReID benchmarks.", + "primary_area": "computer vision ii", + "author": "Yongguo Ling; Zhun Zhong; Zhiming Luo; Fengxiang Yang; Donglin Cao; Yaojin Lin; Shaozi Li; Nicu Sebe", + "authorids": "", + "aff": "Department of Artificial Intelligence, Xiamen University, China; Department of Information Engineering and Computer Science, University of Trento, Italy; Department of Artificial Intelligence, Xiamen University, China; Department of Artificial Intelligence, Xiamen University, China; Department of Artificial Intelligence, Xiamen University, China; School of Computer Science, Minnan Normal University, China; Department of Artificial Intelligence, Xiamen University, China; Department of Information Engineering and Computer Science, University of Trento, Italy", + "bibtex": "@article{Ling_Zhong_Luo_Yang_Cao_Lin_Li_Sebe_2023, title={Cross-Modality Earth Mover\u2019s Distance for Visible Thermal Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25250}, DOI={10.1609/aaai.v37i2.25250}, abstractNote={Visible thermal person re-identification (VT-ReID) suffers from inter-modality discrepancy and intra-identity variations. Distribution alignment is a popular solution for VT-ReID, however, it is usually restricted to the influence of the intra-identity variations. In this paper, we propose the Cross-Modality Earth Mover\u2019s Distance (CM-EMD) that can alleviate the impact of the intra-identity variations during modality alignment. CM-EMD selects an optimal transport strategy and assigns high weights to pairs that have a smaller intra-identity variation. In this manner, the model will focus on reducing the inter-modality discrepancy while paying less attention to intra-identity variations, leading to a more effective modality alignment. Moreover, we introduce two techniques to improve the advantage of CM-EMD. First, Cross-Modality Discrimination Learning (CM-DL) is designed to overcome the discrimination degradation problem caused by modality alignment. By reducing the ratio between intra-identity and inter-identity variances, CM-DL leads the model to learn more discriminative representations. Second, we construct the Multi-Granularity Structure (MGS), enabling us to align modalities from both coarse- and fine-grained levels with the proposed CM-EMD. Extensive experiments show the benefits of the proposed CM-EMD and its auxiliary techniques (CM-DL and MGS). Our method achieves state-of-the-art performance on two VT-ReID benchmarks.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ling, Yongguo and Zhong, Zhun and Luo, Zhiming and Yang, Fengxiang and Cao, Donglin and Lin, Yaojin and Li, Shaozi and Sebe, Nicu}, year={2023}, month={Jun.}, pages={1631-1639} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25250/25022", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25250", + "pdf_size": 394658, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1050220637335166543&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sina.com;unitn.it;xmu.edu.cn;xmu.edu.cn;xmu.edu.cn;mnnu.edu.cn;xmu.edu.cn;unitn.it", + "email": "sina.com;unitn.it;xmu.edu.cn;xmu.edu.cn;xmu.edu.cn;mnnu.edu.cn;xmu.edu.cn;unitn.it", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;2;0;1", + "aff_unique_norm": "Xiamen University;University of Trento;Minnan Normal University", + "aff_unique_dep": "Department of Artificial Intelligence;Department of Information Engineering and Computer Science;School of Computer Science", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.unitn.it;", + "aff_unique_abbr": "XMU;UniTN;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0;0;1", + "aff_country_unique": "China;Italy" + }, + { + "id": "article-25116", + "title": "Cross-Modality Person Re-identification with Memory-Based Contrastive Embedding", + "track": "main", + "status": "Technical", + "abstract": "Visible-infrared person re-identification (VI-ReID) aims to retrieve the person images of the same identity from the RGB to infrared image space, which is very important for real-world surveillance system. In practice, VI-ReID is more challenging due to the heterogeneous modality discrepancy, which further aggravates the challenges of traditional single-modality person ReID problem, i.e., inter-class confusion and intra-class variations. In this paper, we propose an aggregated memory-based cross-modality deep metric learning framework, which benefits from the increasing number of learned modality-aware and modality-agnostic centroid proxies for cluster contrast and mutual information learning. Furthermore, to suppress the modality discrepancy, the proposed cross-modality alignment objective simultaneously utilizes both historical and up-to-date learned cluster proxies for enhanced cross-modality association. Such training mechanism helps to obtain hard positive references through increased diversity of learned cluster proxies, and finally achieves stronger ``pulling close'' effect between cross-modality image features. Extensive experiment results demonstrate the effectiveness of the proposed method, surpassing state-of-the-art works significantly by a large margin on the commonly used VI-ReID datasets.", + "primary_area": "computer vision i", + "author": "De Cheng; Xiaolong Wang; Nannan Wang; Zhen Wang; Xiaoyu Wang; Xinbo Gao", + "authorids": "", + "aff": "Xidian University; Xidian University; Xidian University+Zhejiang Lab; Zhejiang Lab; University of Science and Technology of China; Chongqing University of Posts and Telecommunications", + "bibtex": "@article{Cheng_Wang_Wang_Wang_Wang_Gao_2023, title={Cross-Modality Person Re-identification with Memory-Based Contrastive Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25116}, DOI={10.1609/aaai.v37i1.25116}, abstractNote={Visible-infrared person re-identification (VI-ReID) aims to retrieve the person images of the same identity from the RGB to infrared image space, which is very important for real-world surveillance system. In practice, VI-ReID is more challenging due to the heterogeneous modality discrepancy, which further aggravates the challenges of traditional single-modality person ReID problem, i.e., inter-class confusion and intra-class variations. In this paper, we propose an aggregated memory-based cross-modality deep metric learning framework, which benefits from the increasing number of learned modality-aware and modality-agnostic centroid proxies for cluster contrast and mutual information learning. Furthermore, to suppress the modality discrepancy, the proposed cross-modality alignment objective simultaneously utilizes both historical and up-to-date learned cluster proxies for enhanced cross-modality association. Such training mechanism helps to obtain hard positive references through increased diversity of learned cluster proxies, and finally achieves stronger ``pulling close\u2019\u2019 effect between cross-modality image features. Extensive experiment results demonstrate the effectiveness of the proposed method, surpassing state-of-the-art works significantly by a large margin on the commonly used VI-ReID datasets.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, De and Wang, Xiaolong and Wang, Nannan and Wang, Zhen and Wang, Xiaoyu and Gao, Xinbo}, year={2023}, month={Jun.}, pages={425-432} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25116/24888", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25116", + "pdf_size": 251760, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1013732667329955797&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff_domain": "xidian.edu.cn;xidian.edu.cn;xidian.edu.cn; ; ; ", + "email": "xidian.edu.cn;xidian.edu.cn;xidian.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1;1;2;3", + "aff_unique_norm": "Xidian University;Zhejiang Lab;University of Science and Technology of China;Chongqing University of Posts and Telecommunications", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.xidian.edu.cn/;http://www.zhejianglab.com;http://www.ustc.edu.cn;http://www.cqupt.edu.cn", + "aff_unique_abbr": "Xidian;;USTC;CQUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26990", + "title": "Cross-Regional Fraud Detection via Continual Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Detecting fraud is an urgent task to avoid transaction risks. Especially when expanding a business to new cities or new countries, developing a totally new model will bring the cost issue and result in forgetting previous knowledge. This study proposes a novel solution based on heterogeneous trade graphs, namely HTG-CFD, to prevent knowledge forgetting of cross-regional fraud detection. Specifically, a novel heterogeneous trade graph is meticulously constructed from original transactions to explore the complex semantics among different types of entities and relationships. Motivated by continual learning, we present a practical and task-oriented forgetting prevention method to alleviate knowledge forgetting in the context of cross-regional detection. Extensive experiments demonstrate that HTG-CFD promotes performance in both cross-regional and single-regional scenarios.", + "primary_area": "", + "author": "Yujie Li; Yuxuan Yang; Qiang Gao; Xin Yang", + "authorids": "", + "aff": "Southwestern University of Finance and Economics; Southwestern University of Finance and Economics; Southwestern University of Finance and Economics+Kash Institute of Electronics and Information Industry; Southwestern University of Finance and Economics+Kash Institute of Electronics and Information Industry", + "bibtex": "@article{Li_Yang_Gao_Yang_2024, title={Cross-Regional Fraud Detection via Continual Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26990}, DOI={10.1609/aaai.v37i13.26990}, abstractNote={Detecting fraud is an urgent task to avoid transaction risks. Especially when expanding a business to new cities or new countries, developing a totally new model will bring the cost issue and result in forgetting previous knowledge. This study proposes a novel solution based on heterogeneous trade graphs, namely HTG-CFD, to prevent knowledge forgetting of cross-regional fraud detection. Specifically, a novel heterogeneous trade graph is meticulously constructed from original transactions to explore the complex semantics among different types of entities and relationships. Motivated by continual learning, we present a practical and task-oriented forgetting prevention method to alleviate knowledge forgetting in the context of cross-regional detection. Extensive experiments demonstrate that HTG-CFD promotes performance in both cross-regional and single-regional scenarios.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yujie and Yang, Yuxuan and Gao, Qiang and Yang, Xin}, year={2024}, month={Jul.}, pages={16260-16261} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26990/26762", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26990", + "pdf_size": 1919838, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10427694198667329167&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "smail.swufe.edu.cn;smail.swufe.edu.cn;swufe.edu.cn;swufe.edu.cn", + "email": "smail.swufe.edu.cn;smail.swufe.edu.cn;swufe.edu.cn;swufe.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0+1", + "aff_unique_norm": "Southwestern University of Finance and Economics;Kash Institute of Electronics and Information Industry", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.swufe.edu.cn;", + "aff_unique_abbr": "SWUFE;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-25457", + "title": "Cross-View Geo-Localization via Learning Disentangled Geometric Layout Correspondence", + "track": "main", + "status": "Technical", + "abstract": "Cross-view geo-localization aims to estimate the location of a query ground image by matching it to a reference geo-tagged aerial images database. As an extremely challenging task, its difficulties root in the drastic view changes and different capturing time between two views. Despite these difficulties, recent works achieve outstanding progress on cross-view geo-localization benchmarks. However, existing methods still suffer from poor performance on the cross-area benchmarks, in which the training and testing data are captured from two different regions. We attribute this deficiency to the lack of ability to extract the spatial configuration of visual feature layouts and models' overfitting on low-level details from the training set. In this paper, we propose GeoDTR which explicitly disentangles geometric information from raw features and learns the spatial correlations among visual features from aerial and ground pairs with a novel geometric layout extractor module. This module generates a set of geometric layout descriptors, modulating the raw features and producing high-quality latent representations. In addition, we elaborate on two categories of data augmentations, (i) Layout simulation, which varies the spatial configuration while keeping the low-level details intact. (ii) Semantic augmentation, which alters the low-level details and encourages the model to capture spatial configurations. These augmentations help to improve the performance of the cross-view geo-localization models, especially on the cross-area benchmarks. Moreover, we propose a counterfactual-based learning process to benefit the geometric layout extractor in exploring spatial information. Extensive experiments show that GeoDTR not only achieves state-of-the-art results but also significantly boosts the performance on same-area and cross-area benchmarks. Our code can be found at https://gitlab.com/vail-uvm/geodtr.", + "primary_area": "computer vision iii", + "author": "Xiaohan Zhang; Xingyu Li; Waqas Sultani; Yi Zhou; Safwan Wshah", + "authorids": "", + "aff": "Department of Computer Science, University of Vermont, Burlington, USA+Vermont Complex Systems Center, University of Vermont, Burlington, USA; Shanghai Center for Brain Science and Brain-Inspired Technology, China; Intelligent Machine Lab, Information Technology University, Pakistan; NEL-BITA, School of Information Science and Technology, University of Science and Technology of China, China; Department of Computer Science, University of Vermont, Burlington, USA+Vermont Complex Systems Center, University of Vermont, Burlington, USA", + "bibtex": "@article{Zhang_Li_Sultani_Zhou_Wshah_2023, title={Cross-View Geo-Localization via Learning Disentangled Geometric Layout Correspondence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25457}, DOI={10.1609/aaai.v37i3.25457}, abstractNote={Cross-view geo-localization aims to estimate the location of a query ground image by matching it to a reference geo-tagged aerial images database. As an extremely challenging task, its difficulties root in the drastic view changes and different capturing time between two views. Despite these difficulties, recent works achieve outstanding progress on cross-view geo-localization benchmarks. However, existing methods still suffer from poor performance on the cross-area benchmarks, in which the training and testing data are captured from two different regions. We attribute this deficiency to the lack of ability to extract the spatial configuration of visual feature layouts and models\u2019 overfitting on low-level details from the training set. In this paper, we propose GeoDTR which explicitly disentangles geometric information from raw features and learns the spatial correlations among visual features from aerial and ground pairs with a novel geometric layout extractor module. This module generates a set of geometric layout descriptors, modulating the raw features and producing high-quality latent representations. In addition, we elaborate on two categories of data augmentations, (i) Layout simulation, which varies the spatial configuration while keeping the low-level details intact. (ii) Semantic augmentation, which alters the low-level details and encourages the model to capture spatial configurations. These augmentations help to improve the performance of the cross-view geo-localization models, especially on the cross-area benchmarks. Moreover, we propose a counterfactual-based learning process to benefit the geometric layout extractor in exploring spatial information. Extensive experiments show that GeoDTR not only achieves state-of-the-art results but also significantly boosts the performance on same-area and cross-area benchmarks. Our code can be found at https://gitlab.com/vail-uvm/geodtr.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xiaohan and Li, Xingyu and Sultani, Waqas and Zhou, Yi and Wshah, Safwan}, year={2023}, month={Jun.}, pages={3480-3488} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25457/25229", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25457", + "pdf_size": 2389930, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7267799409886790469&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "uvm.edu;shanghaicenter.org;itu.edu.pk;ustc.edu.cn;uvm.edu", + "email": "uvm.edu;shanghaicenter.org;itu.edu.pk;ustc.edu.cn;uvm.edu", + "github": "", + "project": "https://gitlab.com/vail-uvm/geodtr", + "author_num": 5, + "aff_unique_index": "0+0;1;2;3;0+0", + "aff_unique_norm": "University of Vermont;Shanghai Center for Brain Science and Brain-Inspired Technology;Information Technology University;University of Science and Technology of China", + "aff_unique_dep": "Department of Computer Science;;Intelligent Machine Lab;School of Information Science and Technology", + "aff_unique_url": "https://www.uvm.edu;;;http://www.ustc.edu.cn", + "aff_unique_abbr": "UVM;;;USTC", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Burlington;", + "aff_country_unique_index": "0+0;1;2;1;0+0", + "aff_country_unique": "United States;China;Pakistan" + }, + { + "id": "article-26079", + "title": "Crowd-Level Abnormal Behavior Detection via Multi-Scale Motion Consistency Learning", + "track": "main", + "status": "Technical", + "abstract": "Detecting abnormal crowd motion emerging from complex interactions of individuals is paramount to ensure the safety of crowds. Crowd-level abnormal behaviors (CABs), e.g., counter flow and crowd turbulence, are proven to be the crucial causes of many crowd disasters. In the recent decade, video anomaly detection (VAD) techniques have achieved remarkable success in detecting individual-level abnormal behaviors (e.g., sudden running, fighting and stealing), but research on VAD for CABs is rather limited. Unlike individual-level anomaly, CABs usually do not exhibit salient difference from the normal behaviors when observed locally, and the scale of CABs could vary from one scenario to another. In this paper, we present a systematic study to tackle the important problem of VAD for CABs with a novel crowd motion learning framework, multi-scale motion consistency network (MSMC-Net). MSMC-Net first captures the spatial and temporal crowd motion consistency information in a graph representation. Then, it simultaneously trains multiple feature graphs constructed at different scales to capture rich crowd patterns. An attention network is used to adaptively fuse the multi-scale features for better CAB detection. For the empirical study, we consider three large-scale crowd event datasets, UMN, Hajj and Love Parade. Experimental results show that MSMC-Net could substantially improve the state-of-the-art performance on all the datasets.", + "primary_area": "machine learning ii", + "author": "Linbo Luo; Yuanjing Li; Haiyan Yin; Shangwei Xie; Ruimin Hu; Wentong Cai", + "authorids": "", + "aff": "School of Cyber Engineering, Xidian University; School of Cyber Engineering, Xidian University; Sea AI Lab; School of Cyber Engineering, Xidian University; School of Cyber Engineering, Xidian University; School of Computer Science and Engineering, Nanyang Technological University", + "bibtex": "@article{Luo_Li_Yin_Xie_Hu_Cai_2023, title={Crowd-Level Abnormal Behavior Detection via Multi-Scale Motion Consistency Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26079}, DOI={10.1609/aaai.v37i7.26079}, abstractNote={Detecting abnormal crowd motion emerging from complex interactions of individuals is paramount to ensure the safety of crowds. Crowd-level abnormal behaviors (CABs), e.g., counter flow and crowd turbulence, are proven to be the crucial causes of many crowd disasters. In the recent decade, video anomaly detection (VAD) techniques have achieved remarkable success in detecting individual-level abnormal behaviors (e.g., sudden running, fighting and stealing), but research on VAD for CABs is rather limited. Unlike individual-level anomaly, CABs usually do not exhibit salient difference from the normal behaviors when observed locally, and the scale of CABs could vary from one scenario to another. In this paper, we present a systematic study to tackle the important problem of VAD for CABs with a novel crowd motion learning framework, multi-scale motion consistency network (MSMC-Net). MSMC-Net first captures the spatial and temporal crowd motion consistency information in a graph representation. Then, it simultaneously trains multiple feature graphs constructed at different scales to capture rich crowd patterns. An attention network is used to adaptively fuse the multi-scale features for better CAB detection. For the empirical study, we consider three large-scale crowd event datasets, UMN, Hajj and Love Parade. Experimental results show that MSMC-Net could substantially improve the state-of-the-art performance on all the datasets.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Linbo and Li, Yuanjing and Yin, Haiyan and Xie, Shangwei and Hu, Ruimin and Cai, Wentong}, year={2023}, month={Jun.}, pages={8984-8992} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26079/25851", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26079", + "pdf_size": 1488392, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=756035931937636652&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "xidian.edu.cn;stu.xidian.edu.cn;outlook.com;stu.xidian.edu.cn;163.com;ntu.edu.sg", + "email": "xidian.edu.cn;stu.xidian.edu.cn;outlook.com;stu.xidian.edu.cn;163.com;ntu.edu.sg", + "github": "", + "project": "https://arxiv.org/abs/2212.00501", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;2", + "aff_unique_norm": "Xidian University;Sea AI Lab;Nanyang Technological University", + "aff_unique_dep": "School of Cyber Engineering;;School of Computer Science and Engineering", + "aff_unique_url": "http://www.xidian.edu.cn/;;https://www.ntu.edu.sg", + "aff_unique_abbr": "Xidian;;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;2", + "aff_country_unique": "China;;Singapore" + }, + { + "id": "article-25892", + "title": "CrysGNN: Distilling Pre-trained Knowledge to Enhance Property Prediction for Crystalline Materials", + "track": "main", + "status": "Technical", + "abstract": "In recent years, graph neural network (GNN) based approaches\nhave emerged as a powerful technique to encode complex\ntopological structure of crystal materials in an enriched repre-\nsentation space. These models are often supervised in nature\nand using the property-specific training data, learn relation-\nship between crystal structure and different properties like\nformation energy, bandgap, bulk modulus, etc. Most of these\nmethods require a huge amount of property-tagged data to\ntrain the system which may not be available for different prop-\nerties. However, there is an availability of a huge amount\nof crystal data with its chemical composition and structural\nbonds. To leverage these untapped data, this paper presents\nCrysGNN, a new pre-trained GNN framework for crystalline\nmaterials, which captures both node and graph level structural\ninformation of crystal graphs using a huge amount of unla-\nbelled material data. Further, we extract distilled knowledge\nfrom CrysGNN and inject into different state of the art prop-\nerty predictors to enhance their property prediction accuracy.\nWe conduct extensive experiments to show that with distilled\nknowledge from the pre-trained model, all the SOTA algo-\nrithms are able to outperform their own vanilla version with\ngood margins. We also observe that the distillation process\nprovides significant improvement over the conventional ap-\nproach of finetuning the pre-trained model. We will release the\npre-trained model along with the large dataset of 800K crys-\ntal graph which we carefully curated; so that the pre-trained\nmodel can be plugged into any existing and upcoming models\nto enhance their prediction accuracy.", + "primary_area": "machine learning i", + "author": "Kishalay Das; Bidisha Samanta; Pawan Goyal; Seung-Cheol Lee; Satadeep Bhattacharjee; Niloy Ganguly", + "authorids": "", + "aff": "Indian Institute of Technology Kharagpur, India; Indian Institute of Technology Kharagpur, India; Indian Institute of Technology Kharagpur, India; Indo Korea Science and Technology Center, Bangalore, India; Indo Korea Science and Technology Center, Bangalore, India; Indian Institute of Technology Kharagpur, India+L3S, Leibniz University of Hannover, Germany", + "bibtex": "@article{Das_Samanta_Goyal_Lee_Bhattacharjee_Ganguly_2023, title={CrysGNN: Distilling Pre-trained Knowledge to Enhance Property Prediction for Crystalline Materials}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25892}, DOI={10.1609/aaai.v37i6.25892}, abstractNote={In recent years, graph neural network (GNN) based approaches\nhave emerged as a powerful technique to encode complex\ntopological structure of crystal materials in an enriched repre-\nsentation space. These models are often supervised in nature\nand using the property-specific training data, learn relation-\nship between crystal structure and different properties like\nformation energy, bandgap, bulk modulus, etc. Most of these\nmethods require a huge amount of property-tagged data to\ntrain the system which may not be available for different prop-\nerties. However, there is an availability of a huge amount\nof crystal data with its chemical composition and structural\nbonds. To leverage these untapped data, this paper presents\nCrysGNN, a new pre-trained GNN framework for crystalline\nmaterials, which captures both node and graph level structural\ninformation of crystal graphs using a huge amount of unla-\nbelled material data. Further, we extract distilled knowledge\nfrom CrysGNN and inject into different state of the art prop-\nerty predictors to enhance their property prediction accuracy.\nWe conduct extensive experiments to show that with distilled\nknowledge from the pre-trained model, all the SOTA algo-\nrithms are able to outperform their own vanilla version with\ngood margins. We also observe that the distillation process\nprovides significant improvement over the conventional ap-\nproach of finetuning the pre-trained model. We will release the\npre-trained model along with the large dataset of 800K crys-\ntal graph which we carefully curated; so that the pre-trained\nmodel can be plugged into any existing and upcoming models\nto enhance their prediction accuracy.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Das, Kishalay and Samanta, Bidisha and Goyal, Pawan and Lee, Seung-Cheol and Bhattacharjee, Satadeep and Ganguly, Niloy}, year={2023}, month={Jun.}, pages={7323-7331} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25892/25664", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25892", + "pdf_size": 700671, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2287804014705418180&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "kgpian.iitkgp.ac.in;iitkgp.ac.in;cse.iitkgp.ac.in;ikst.res.in;ikst.res.in;cse.iitkgp.ac.in", + "email": "kgpian.iitkgp.ac.in;iitkgp.ac.in;cse.iitkgp.ac.in;ikst.res.in;ikst.res.in;cse.iitkgp.ac.in", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0+2", + "aff_unique_norm": "Indian Institute of Technology Kharagpur;Indo Korea Science and Technology Center;Leibniz University of Hannover", + "aff_unique_dep": ";;L3S", + "aff_unique_url": "https://www.iitkgp.ac.in;;https://www.uni-hannover.de", + "aff_unique_abbr": "IIT Kharagpur;;LUH", + "aff_campus_unique_index": "0;0;0;1;1;0", + "aff_campus_unique": "Kharagpur;Bangalore;", + "aff_country_unique_index": "0;0;0;0;0;0+1", + "aff_country_unique": "India;Germany" + }, + { + "id": "article-25204", + "title": "Curriculum Multi-Negative Augmentation for Debiased Video Grounding", + "track": "main", + "status": "Technical", + "abstract": "Video Grounding (VG) aims to locate the desired segment from a video given a sentence query. Recent studies have found that current VG models are prone to over-rely the groundtruth moment annotation distribution biases in the training set. To discourage the standard VG model's behavior of exploiting such temporal annotation biases and improve the model generalization ability, we propose multiple negative augmentations in a hierarchical way, including cross-video augmentations from clip-/video-level, and self-shuffled augmentations with masks. These augmentations can effectively diversify the data distribution so that the model can make more reasonable predictions instead of merely fitting the temporal biases. However, directly adopting such data augmentation strategy may inevitably carry some noise shown in our cases, since not all of the handcrafted augmentations are semantically irrelevant to the groundtruth video. To further denoise and improve the grounding accuracy, we design a multi-stage curriculum strategy to adaptively train the standard VG model from easy to hard negative augmentations. Experiments on newly collected Charades-CD and ActivityNet-CD datasets demonstrate our proposed strategy can improve the performance of the base model on both i.i.d and o.o.d scenarios.", + "primary_area": "computer vision i", + "author": "Xiaohan Lan; Yitian Yuan; Hong Chen; Xin Wang; Zequn Jie; Lin Ma; Zhi Wang; Wenwu Zhu", + "authorids": "", + "aff": "Tsinghua University; Meituan Inc.; Tsinghua University; Tsinghua University; Meituan Inc.; Meituan Inc.; Tsinghua University; Tsinghua University", + "bibtex": "@article{Lan_Yuan_Chen_Wang_Jie_Ma_Wang_Zhu_2023, title={Curriculum Multi-Negative Augmentation for Debiased Video Grounding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25204}, DOI={10.1609/aaai.v37i1.25204}, abstractNote={Video Grounding (VG) aims to locate the desired segment from a video given a sentence query. Recent studies have found that current VG models are prone to over-rely the groundtruth moment annotation distribution biases in the training set. To discourage the standard VG model\u2019s behavior of exploiting such temporal annotation biases and improve the model generalization ability, we propose multiple negative augmentations in a hierarchical way, including cross-video augmentations from clip-/video-level, and self-shuffled augmentations with masks. These augmentations can effectively diversify the data distribution so that the model can make more reasonable predictions instead of merely fitting the temporal biases. However, directly adopting such data augmentation strategy may inevitably carry some noise shown in our cases, since not all of the handcrafted augmentations are semantically irrelevant to the groundtruth video. To further denoise and improve the grounding accuracy, we design a multi-stage curriculum strategy to adaptively train the standard VG model from easy to hard negative augmentations. Experiments on newly collected Charades-CD and ActivityNet-CD datasets demonstrate our proposed strategy can improve the performance of the base model on both i.i.d and o.o.d scenarios.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Xiaohan and Yuan, Yitian and Chen, Hong and Wang, Xin and Jie, Zequn and Ma, Lin and Wang, Zhi and Zhu, Wenwu}, year={2023}, month={Jun.}, pages={1213-1221} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25204/24976", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25204", + "pdf_size": 5395602, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=389168219586683688&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;foxmail.com;mails.tsinghua.edu.cn;tsinghua.edu.cn;gmail.com;gmail.com;sz.tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;foxmail.com;mails.tsinghua.edu.cn;tsinghua.edu.cn;gmail.com;gmail.com;sz.tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;1;1;0;0", + "aff_unique_norm": "Tsinghua University;Meituan Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.meituan.com", + "aff_unique_abbr": "THU;Meituan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25236", + "title": "Curriculum Temperature for Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Most existing distillation methods ignore the flexible role of the temperature in the loss function and fix it as a hyper-parameter that can be decided by an inefficient grid search. In general, the temperature controls the discrepancy between two distributions and can faithfully determine the difficulty level of the distillation task. Keeping a constant temperature, i.e., a fixed level of task difficulty, is usually sub-optimal for a growing student during its progressive learning stages. In this paper, we propose a simple curriculum-based technique, termed Curriculum Temperature for Knowledge Distillation (CTKD), which controls the task difficulty level during the student's learning career through a dynamic and learnable temperature. Specifically, following an easy-to-hard curriculum, we gradually increase the distillation loss w.r.t. the temperature, leading to increased distillation difficulty in an adversarial manner. As an easy-to-use plug-in technique, CTKD can be seamlessly integrated into existing knowledge distillation frameworks and brings general improvements at a negligible additional computation cost. Extensive experiments on CIFAR-100, ImageNet-2012, and MS-COCO demonstrate the effectiveness of our method.", + "primary_area": "computer vision ii", + "author": "Zheng Li; Xiang Li; Lingfeng Yang; Borui Zhao; Renjie Song; Lei Luo; Jun Li; Jian Yang", + "authorids": "", + "aff": "Nankai University; Nankai University; Nanjing University of Science and Technology; Megvii Technology; Megvii Technology; Nanjing University of Science and Technology; Nanjing University of Science and Technology; Nankai University", + "bibtex": "@article{Li_Li_Yang_Zhao_Song_Luo_Li_Yang_2023, title={Curriculum Temperature for Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25236}, DOI={10.1609/aaai.v37i2.25236}, abstractNote={Most existing distillation methods ignore the flexible role of the temperature in the loss function and fix it as a hyper-parameter that can be decided by an inefficient grid search. In general, the temperature controls the discrepancy between two distributions and can faithfully determine the difficulty level of the distillation task. Keeping a constant temperature, i.e., a fixed level of task difficulty, is usually sub-optimal for a growing student during its progressive learning stages. In this paper, we propose a simple curriculum-based technique, termed Curriculum Temperature for Knowledge Distillation (CTKD), which controls the task difficulty level during the student\u2019s learning career through a dynamic and learnable temperature. Specifically, following an easy-to-hard curriculum, we gradually increase the distillation loss w.r.t. the temperature, leading to increased distillation difficulty in an adversarial manner. As an easy-to-use plug-in technique, CTKD can be seamlessly integrated into existing knowledge distillation frameworks and brings general improvements at a negligible additional computation cost. Extensive experiments on CIFAR-100, ImageNet-2012, and MS-COCO demonstrate the effectiveness of our method.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zheng and Li, Xiang and Yang, Lingfeng and Zhao, Borui and Song, Renjie and Luo, Lei and Li, Jun and Yang, Jian}, year={2023}, month={Jun.}, pages={1504-1512} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25236/25008", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25236", + "pdf_size": 429985, + "gs_citation": 179, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11844621094009257859&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;gmail.com;megvii.com;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "mail.nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;gmail.com;megvii.com;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;2;1;1;0", + "aff_unique_norm": "Nankai University;Nanjing University of Science and Technology;Megvii Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.nankai.edu.cn;http://www.nust.edu.cn/;https://www.megvii.com", + "aff_unique_abbr": "NKU;NUST;Megvii", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26795", + "title": "Customer Service Combining Human Operators and Virtual Agents: A Call for Multidisciplinary AI Research", + "track": "senior member presentation bridge papers", + "status": "Technical", + "abstract": "The use of virtual agents (bots) has become essential for providing online assistance to customers. However, even though a lot of effort has been dedicated to the research, development, and deployment of such virtual agents, customers are frequently frustrated with the interaction with the virtual agent and require a human instead. We suggest that a holistic approach, combining virtual agents and human operators working together, is the path to providing satisfactory service. However, implementing such a holistic customer service system will not, and cannot, be achieved using any single AI technology or branch. Rather, such a system will inevitably require the integration of multiple and diverse AI technologies, including natural language processing, multi-agent systems, machine learning, reinforcement learning, and behavioral cloning; in addition to integration with other disciplines such as psychology, business, sociology, economics, operation research, informatics, computer-human interaction, and more. As such, we believe this customer service application offers a rich domain for experimentation and application of multidisciplinary AI. In this paper, we introduce the holistic customer service application and discuss the key AI technologies and disciplines required for a successful AI solution for this setting. For each of these AI technologies, we outline the key scientific questions and research avenues stemming from this setting. We demonstrate that integrating technologies from different fields can lead to a cost-effective successful customer service center. The challenge is that there is a need for several communities, each with its own language and modeling techniques, different problem-solving methods, and different evaluation methodologies, all of which need to work together. Real cooperation will require the formation of joint methodologies and techniques that could improve the service to customers, but, more importantly, open new directions in cooperation of diverse communities toward solving joint difficult tasks.", + "primary_area": "", + "author": "Sarit Kraus; Yaniv Oshrat; Yonatan Aumann; Tal Hollander; Oleg Maksimov; Anita Ostroumov; Natali Shechtman", + "authorids": "", + "aff": "Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel", + "bibtex": "@article{Kraus_Oshrat_Aumann_Hollander_Maksimov_Ostroumov_Shechtman_2024, title={Customer Service Combining Human Operators and Virtual Agents: A Call for Multidisciplinary AI Research}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26795}, DOI={10.1609/aaai.v37i13.26795}, abstractNote={The use of virtual agents (bots) has become essential for providing online assistance to customers. However, even though a lot of effort has been dedicated to the research, development, and deployment of such virtual agents, customers are frequently frustrated with the interaction with the virtual agent and require a human instead. We suggest that a holistic approach, combining virtual agents and human operators working together, is the path to providing satisfactory service. However, implementing such a holistic customer service system will not, and cannot, be achieved using any single AI technology or branch. Rather, such a system will inevitably require the integration of multiple and diverse AI technologies, including natural language processing, multi-agent systems, machine learning, reinforcement learning, and behavioral cloning; in addition to integration with other disciplines such as psychology, business, sociology, economics, operation research, informatics, computer-human interaction, and more. As such, we believe this customer service application offers a rich domain for experimentation and application of multidisciplinary AI. In this paper, we introduce the holistic customer service application and discuss the key AI technologies and disciplines required for a successful AI solution for this setting. For each of these AI technologies, we outline the key scientific questions and research avenues stemming from this setting. We demonstrate that integrating technologies from different fields can lead to a cost-effective successful customer service center. The challenge is that there is a need for several communities, each with its own language and modeling techniques, different problem-solving methods, and different evaluation methodologies, all of which need to work together. Real cooperation will require the formation of joint methodologies and techniques that could improve the service to customers, but, more importantly, open new directions in cooperation of diverse communities toward solving joint difficult tasks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kraus, Sarit and Oshrat, Yaniv and Aumann, Yonatan and Hollander, Tal and Maksimov, Oleg and Ostroumov, Anita and Shechtman, Natali}, year={2024}, month={Jul.}, pages={15393-15401} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26795/26567", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26795", + "pdf_size": 195979, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1173635866701114838&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.biu.ac.il; ; ; ; ; ; ", + "email": "cs.biu.ac.il; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Bar-Ilan University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.biu.ac.il", + "aff_unique_abbr": "BIU", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Ramat Gan", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25443", + "title": "Cyclically Disentangled Feature Translation for Face Anti-spoofing", + "track": "main", + "status": "Technical", + "abstract": "Current domain adaptation methods for face anti-spoofing leverage labeled source domain data and unlabeled target domain data to obtain a promising generalizable decision boundary. However, it is usually difficult for these methods to achieve a perfect domain-invariant liveness feature disentanglement, which may degrade the final classification performance by domain differences in illumination, face category, spoof type, etc. In this work, we tackle cross-scenario face anti-spoofing by proposing a novel domain adaptation method called cyclically disentangled feature translation network (CDFTN). Specifically, CDFTN generates pseudo-labeled samples that possess: 1) source domain-invariant liveness features and 2) target domain-specific content features, which are disentangled through domain adversarial training. A robust classifier is trained based on the synthetic pseudo-labeled images under the supervision of source domain labels. We further extend CDFTN for multi-target domain adaptation by leveraging data from more unlabeled target domains. Extensive experiments on several public datasets demonstrate that our proposed approach significantly outperforms the state of the art. Code and models are available at https://github.com/vis-face/CDFTN.", + "primary_area": "computer vision iii", + "author": "Haixiao Yue; Keyao Wang; Guosheng Zhang; Haocheng Feng; Junyu Han; Errui Ding; Jingdong Wang", + "authorids": "", + "aff": "Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.; Department of Computer Vision Technology(VIS), Baidu Inc.", + "bibtex": "@article{Yue_Wang_Zhang_Feng_Han_Ding_Wang_2023, title={Cyclically Disentangled Feature Translation for Face Anti-spoofing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25443}, DOI={10.1609/aaai.v37i3.25443}, abstractNote={Current domain adaptation methods for face anti-spoofing leverage labeled source domain data and unlabeled target domain data to obtain a promising generalizable decision boundary. However, it is usually difficult for these methods to achieve a perfect domain-invariant liveness feature disentanglement, which may degrade the final classification performance by domain differences in illumination, face category, spoof type, etc. In this work, we tackle cross-scenario face anti-spoofing by proposing a novel domain adaptation method called cyclically disentangled feature translation network (CDFTN). Specifically, CDFTN generates pseudo-labeled samples that possess: 1) source domain-invariant liveness features and 2) target domain-specific content features, which are disentangled through domain adversarial training. A robust classifier is trained based on the synthetic pseudo-labeled images under the supervision of source domain labels. We further extend CDFTN for multi-target domain adaptation by leveraging data from more unlabeled target domains. Extensive experiments on several public datasets demonstrate that our proposed approach significantly outperforms the state of the art. Code and models are available at https://github.com/vis-face/CDFTN.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yue, Haixiao and Wang, Keyao and Zhang, Guosheng and Feng, Haocheng and Han, Junyu and Ding, Errui and Wang, Jingdong}, year={2023}, month={Jun.}, pages={3358-3366} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25443/25215", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25443", + "pdf_size": 5177105, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18190356303365604642&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;outlook.com", + "email": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;outlook.com", + "github": "https://github.com/vis-face/CDFTN", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Baidu Inc.", + "aff_unique_dep": "Department of Computer Vision Technology(VIS)", + "aff_unique_url": "https://www.baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26389", + "title": "DACOM: Learning Delay-Aware Communication for Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Communication is supposed to improve multi-agent collaboration and overall performance in cooperative Multi-agent reinforcement learning (MARL). However, such improvements are prevalently limited in practice since most existing communication schemes ignore communication overheads (e.g., communication delays). In this paper, we demonstrate that ignoring communication delays has detrimental effects on collaborations, especially in delay-sensitive tasks such as autonomous driving. To mitigate this impact, we design a delay-aware multi-agent communication model (DACOM) to adapt communication to delays. Specifically, DACOM introduces a component, TimeNet, that is responsible for adjusting the waiting time of an agent to receive messages from other agents such that the uncertainty associated with delay can be addressed. Our experiments reveal that DACOM has a non-negligible performance improvement over other mechanisms by making a better trade-off between the benefits of communication and the costs of waiting for messages.", + "primary_area": "multiagent systems", + "author": "Tingting Yuan; Hwei-Ming Chung; Jie Yuan; Xiaoming Fu", + "authorids": "", + "aff": "University of G\u00f6ttingen; University of Oslo + NOOT Tech. Co., Ltd.; Beijing University of Posts and Telecommunications; University of G\u00f6ttingen", + "bibtex": "@article{Yuan_Chung_Yuan_Fu_2023, title={DACOM: Learning Delay-Aware Communication for Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26389}, DOI={10.1609/aaai.v37i10.26389}, abstractNote={Communication is supposed to improve multi-agent collaboration and overall performance in cooperative Multi-agent reinforcement learning (MARL). However, such improvements are prevalently limited in practice since most existing communication schemes ignore communication overheads (e.g., communication delays). In this paper, we demonstrate that ignoring communication delays has detrimental effects on collaborations, especially in delay-sensitive tasks such as autonomous driving. To mitigate this impact, we design a delay-aware multi-agent communication model (DACOM) to adapt communication to delays. Specifically, DACOM introduces a component, TimeNet, that is responsible for adjusting the waiting time of an agent to receive messages from other agents such that the uncertainty associated with delay can be addressed. Our experiments reveal that DACOM has a non-negligible performance improvement over other mechanisms by making a better trade-off between the benefits of communication and the costs of waiting for messages.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Tingting and Chung, Hwei-Ming and Yuan, Jie and Fu, Xiaoming}, year={2023}, month={Jun.}, pages={11763-11771} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26389/26161", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26389", + "pdf_size": 2375912, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=110931213934112168&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.uni-goettingen.de;noot.ai;bupt.edu.cn;cs.uni-goettingen.de", + "email": "cs.uni-goettingen.de;noot.ai;bupt.edu.cn;cs.uni-goettingen.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;0", + "aff_unique_norm": "University of G\u00f6ttingen;University of Oslo;NOOT Tech. Co., Ltd.;Beijing University of Posts and Telecommunications", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uni-goettingen.de;https://www.uio.no;;http://www.bupt.edu.cn/", + "aff_unique_abbr": "Georg-August-Universit\u00e4t G\u00f6ttingen;UiO;;BUPT", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;1;3;0", + "aff_country_unique": "Germany;Norway;;China" + }, + { + "id": "article-25543", + "title": "DAMix: Exploiting Deep Autoregressive Model Zoo for Improving Lossless Compression Generalization", + "track": "main", + "status": "Technical", + "abstract": "Deep generative models have demonstrated superior performance in lossless compression on identically distributed data. However, in real-world scenarios, data to be compressed are of various distributions and usually cannot be known in advance. Thus, commercially expected neural compression must have strong Out-of-Distribution (OoD) generalization capabilities. Compared with traditional compression methods, deep learning methods have intrinsic flaws for OoD generalization. In this work, we make the attempt to tackle this challenge via exploiting a zoo of Deep Autoregressive models (DAMix). We build a model zoo consisting of autoregressive models trained on data from diverse distributions. In the test phase, we select useful expert models by a simple model evaluation score and adaptively aggregate the predictions of selected models. By assuming the outputs from each expert model are biased in favor of their training distributions, a von Mises-Fisher based filter is proposed to recover the value of unbiased predictions that provides more accurate density estimations than a single model. We derive the posterior of unbiased predictions as well as concentration parameters in the filter, and a novel temporal Stein variational gradient descent for sequential data is proposed to adaptively update the posterior distributions. We evaluate DAMix on 22 image datasets, including in-distribution and OoD data, and demonstrate that making use of unbiased predictions has up to 45.6% improvement over the single model trained on ImageNet.", + "primary_area": "data mining and knowledge management", + "author": "Qishi Dong; Fengwei Zhou; Ning Kang; Chuanlong Xie; Shifeng Zhang; Jiawei Li; Heng Peng; Zhenguo Li", + "authorids": "", + "aff": "Huawei Noah\u2019s Ark Lab + Hong Kong Baptist University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Beijing Normal University + Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Hong Kong Baptist University + Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "bibtex": "@article{Dong_Zhou_Kang_Xie_Zhang_Li_Peng_Li_2023, title={DAMix: Exploiting Deep Autoregressive Model Zoo for Improving Lossless Compression Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25543}, DOI={10.1609/aaai.v37i4.25543}, abstractNote={Deep generative models have demonstrated superior performance in lossless compression on identically distributed data. However, in real-world scenarios, data to be compressed are of various distributions and usually cannot be known in advance. Thus, commercially expected neural compression must have strong Out-of-Distribution (OoD) generalization capabilities. Compared with traditional compression methods, deep learning methods have intrinsic flaws for OoD generalization. In this work, we make the attempt to tackle this challenge via exploiting a zoo of Deep Autoregressive models (DAMix). We build a model zoo consisting of autoregressive models trained on data from diverse distributions. In the test phase, we select useful expert models by a simple model evaluation score and adaptively aggregate the predictions of selected models. By assuming the outputs from each expert model are biased in favor of their training distributions, a von Mises-Fisher based filter is proposed to recover the value of unbiased predictions that provides more accurate density estimations than a single model. We derive the posterior of unbiased predictions as well as concentration parameters in the filter, and a novel temporal Stein variational gradient descent for sequential data is proposed to adaptively update the posterior distributions. We evaluate DAMix on 22 image datasets, including in-distribution and OoD data, and demonstrate that making use of unbiased predictions has up to 45.6% improvement over the single model trained on ImageNet.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Qishi and Zhou, Fengwei and Kang, Ning and Xie, Chuanlong and Zhang, Shifeng and Li, Jiawei and Peng, Heng and Li, Zhenguo}, year={2023}, month={Jun.}, pages={4250-4258} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25543/25315", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25543", + "pdf_size": 285283, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6735250824388517635&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "life.hkbu.edu.hk;connect.ust.hk;huawei.com;huawei.com;huawei.com;bnu.edu.cn;hkbu.edu.hk;huawei.com", + "email": "life.hkbu.edu.hk;connect.ust.hk;huawei.com;huawei.com;huawei.com;bnu.edu.cn;hkbu.edu.hk;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0;0;2+0;0;0;1+0;0", + "aff_unique_norm": "Huawei;Hong Kong Baptist University;Beijing Normal University", + "aff_unique_dep": "Noah\u2019s Ark Lab;;", + "aff_unique_url": "https://www.huawei.com;https://www.hkbu.edu.hk;https://www.bnu.edu.cn", + "aff_unique_abbr": "Huawei;HKBU;BNU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26327", + "title": "DARL: Distance-Aware Uncertainty Estimation for Offline Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "To facilitate offline reinforcement learning, uncertainty estimation is commonly used to detect out-of-distribution data. By inspecting, we show that current explicit uncertainty estimators such as Monte Carlo Dropout and model ensemble are not competent to provide trustworthy uncertainty estimation in offline reinforcement learning. Accordingly, we propose a non-parametric distance-aware uncertainty estimator which is sensitive to the change in the input space for offline reinforcement learning. Based on our new estimator, adaptive truncated quantile critics are proposed to underestimate the out-of-distribution samples. We show that the proposed distance-aware uncertainty estimator is able to offer better uncertainty estimation compared to previous methods. Experimental results demonstrate that our proposed DARL method is competitive to the state-of-the-art methods in offline evaluation tasks.", + "primary_area": "machine learning iv", + "author": "Hongchang Zhang; Jianzhun Shao; Shuncheng He; Yuhang Jiang; Xiangyang Ji", + "authorids": "", + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University", + "bibtex": "@article{Zhang_Shao_He_Jiang_Ji_2023, title={DARL: Distance-Aware Uncertainty Estimation for Offline Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26327}, DOI={10.1609/aaai.v37i9.26327}, abstractNote={To facilitate offline reinforcement learning, uncertainty estimation is commonly used to detect out-of-distribution data. By inspecting, we show that current explicit uncertainty estimators such as Monte Carlo Dropout and model ensemble are not competent to provide trustworthy uncertainty estimation in offline reinforcement learning. Accordingly, we propose a non-parametric distance-aware uncertainty estimator which is sensitive to the change in the input space for offline reinforcement learning. Based on our new estimator, adaptive truncated quantile critics are proposed to underestimate the out-of-distribution samples. We show that the proposed distance-aware uncertainty estimator is able to offer better uncertainty estimation compared to previous methods. Experimental results demonstrate that our proposed DARL method is competitive to the state-of-the-art methods in offline evaluation tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Hongchang and Shao, Jianzhun and He, Shuncheng and Jiang, Yuhang and Ji, Xiangyang}, year={2023}, month={Jun.}, pages={11210-11218} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26327/26099", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26327", + "pdf_size": 748438, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2632882939866495080&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn; ; ; ; ", + "email": "mails.tsinghua.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25508", + "title": "DASH: A Distributed and Parallelizable Algorithm for Size-Constrained Submodular Maximization", + "track": "main", + "status": "Technical", + "abstract": "MapReduce (MR) algorithms for maximizing monotone, submodular functions subject to a cardinality constraint (SMCC) are currently restricted to the use of the linear-adaptive (non-parallelizable) algorithm GREEDY. Low-adaptive algorithms do not satisfy the requirements of these distributed MR frameworks, thereby limiting their performance. We study the SMCC problem in a distributed setting and propose the first MR algorithms with sublinear adaptive complexity. Our algorithms, R-DASH, T-DASH and G-DASH provide 0.316 - \u03b5, 3/8 - \u03b5 , and (1 - 1/e - \u03b5) approximation ratios, respectively, with nearly optimal adaptive complexity and nearly linear time complexity. Additionally, we provide a framework to increase, under some mild assumptions, the maximum permissible cardinality constraint from O( n / \u2113^2) of prior MR algorithms to O( n / \u2113 ), where n is the data size and \u2113 is the number of machines; under a stronger condition on the objective function, we increase the maximum constraint value to n. Finally, we provide empirical evidence to demonstrate that our sublinear-adaptive, distributed algorithms provide orders of magnitude faster runtime compared to current state-of-the-art distributed algorithms.", + "primary_area": "constraint satisfaction and optimization", + "author": "Tonmoy Dey; Yixin Chen; Alan Kuhnle", + "authorids": "", + "aff": "Department of Computer Science, Florida State University; Department of Computer Science & Engineering, Texas A&M University; Department of Computer Science & Engineering, Texas A&M University", + "bibtex": "@article{Dey_Chen_Kuhnle_2023, title={DASH: A Distributed and Parallelizable Algorithm for Size-Constrained Submodular Maximization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25508}, DOI={10.1609/aaai.v37i4.25508}, abstractNote={MapReduce (MR) algorithms for maximizing monotone, submodular functions subject to a cardinality constraint (SMCC) are currently restricted to the use of the linear-adaptive (non-parallelizable) algorithm GREEDY. Low-adaptive algorithms do not satisfy the requirements of these distributed MR frameworks, thereby limiting their performance. We study the SMCC problem in a distributed setting and propose the first MR algorithms with sublinear adaptive complexity. Our algorithms, R-DASH, T-DASH and G-DASH provide 0.316 - \u03b5, 3/8 - \u03b5 , and (1 - 1/e - \u03b5) approximation ratios, respectively, with nearly optimal adaptive complexity and nearly linear time complexity. Additionally, we provide a framework to increase, under some mild assumptions, the maximum permissible cardinality constraint from O( n / \u2113^2) of prior MR algorithms to O( n / \u2113 ), where n is the data size and \u2113 is the number of machines; under a stronger condition on the objective function, we increase the maximum constraint value to n. Finally, we provide empirical evidence to demonstrate that our sublinear-adaptive, distributed algorithms provide orders of magnitude faster runtime compared to current state-of-the-art distributed algorithms.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dey, Tonmoy and Chen, Yixin and Kuhnle, Alan}, year={2023}, month={Jun.}, pages={3941-3948} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25508/25280", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25508", + "pdf_size": 1618752, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9345271013209805427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "fsu.edu;tamu.edu;tamu.com", + "email": "fsu.edu;tamu.edu;tamu.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Florida State University;Texas A&M University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science & Engineering", + "aff_unique_url": "https://www.fsu.edu;https://www.tamu.edu", + "aff_unique_abbr": "FSU;TAMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25226", + "title": "DC-Former: Diverse and Compact Transformer for Person Re-identification", + "track": "main", + "status": "Technical", + "abstract": "In person re-identification (ReID) task, it is still challenging to learn discriminative representation by deep learning, due to limited data. Generally speaking, the model will get better performance when increasing the amount of data. The addition of similar classes strengthens the ability of the classifier to identify similar identities, thereby improving the discrimination of representation. In this paper, we propose a Diverse and Compact Transformer (DC-Former) that can achieve a similar effect by splitting embedding space into multiple diverse and compact subspaces. Compact embedding subspace helps model learn more robust and discriminative embedding to identify similar classes. And the fusion of these diverse embeddings containing more fine-grained information can further improve the effect of ReID. Specifically, multiple class tokens are used in vision transformer to represent multiple embedding spaces. Then, a self-diverse constraint (SDC) is applied to these spaces to push them away from each other, which makes each embedding space diverse and compact. Further, a dynamic weight controller (DWC) is further designed for balancing the relative importance among them during training. The experimental results of our method are promising, which surpass previous state-of-the-art methods on several commonly used person ReID benchmarks. Our code is available at https://github.com/ant-research/Diverse-and-Compact-Transformer.", + "primary_area": "computer vision ii", + "author": "Wen Li; Cheng Zou; Meng Wang; Furong Xu; Jianan Zhao; Ruobing Zheng; Yuan Cheng; Wei Chu", + "authorids": "", + "aff": "Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Artificial Intelligence Innovation and Incubation (AI3) Institute, Fudan University; Ant Group", + "bibtex": "@article{Li_Zou_Wang_Xu_Zhao_Zheng_Cheng_Chu_2023, title={DC-Former: Diverse and Compact Transformer for Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25226}, DOI={10.1609/aaai.v37i2.25226}, abstractNote={In person re-identification (ReID) task, it is still challenging to learn discriminative representation by deep learning, due to limited data. Generally speaking, the model will get better performance when increasing the amount of data. The addition of similar classes strengthens the ability of the classifier to identify similar identities, thereby improving the discrimination of representation. In this paper, we propose a Diverse and Compact Transformer (DC-Former) that can achieve a similar effect by splitting embedding space into multiple diverse and compact subspaces. Compact embedding subspace helps model learn more robust and discriminative embedding to identify similar classes. And the fusion of these diverse embeddings containing more fine-grained information can further improve the effect of ReID. Specifically, multiple class tokens are used in vision transformer to represent multiple embedding spaces. Then, a self-diverse constraint (SDC) is applied to these spaces to push them away from each other, which makes each embedding space diverse and compact. Further, a dynamic weight controller (DWC) is further designed for balancing the relative importance among them during training. The experimental results of our method are promising, which surpass previous state-of-the-art methods on several commonly used person ReID benchmarks. Our code is available at https://github.com/ant-research/Diverse-and-Compact-Transformer.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Wen and Zou, Cheng and Wang, Meng and Xu, Furong and Zhao, Jianan and Zheng, Ruobing and Cheng, Yuan and Chu, Wei}, year={2023}, month={Jun.}, pages={1415-1423} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25226/24998", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25226", + "pdf_size": 9418160, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11147064971382378900&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;fudan.edu.cn;antgroup.com", + "email": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;fudan.edu.cn;antgroup.com", + "github": "https://github.com/ant-research/Diverse-and-Compact-Transformer", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;1;0", + "aff_unique_norm": "Ant Group;Fudan University", + "aff_unique_dep": ";Artificial Intelligence Innovation and Incubation (AI3) Institute", + "aff_unique_url": "https://www.antgroup.com;https://www.fudan.edu.cn", + "aff_unique_abbr": "Ant Group;Fudan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26189", + "title": "DE-net: Dynamic Text-Guided Image Editing Adversarial Networks", + "track": "main", + "status": "Technical", + "abstract": "Text-guided image editing models have shown remarkable results. However, there remain two problems. First, they employ fixed manipulation modules for various editing requirements (e.g., color changing, texture changing, content adding and removing), which results in over-editing or insufficient editing. Second, they do not clearly distinguish between text-required and text-irrelevant parts, which leads to inaccurate editing.\nTo solve these limitations, we propose:\n(i) a Dynamic Editing Block (DEBlock) that composes different editing modules dynamically for various editing requirements.\n(ii) a Composition Predictor (Comp-Pred), which predicts the composition weights for DEBlock according to the inference on target texts and source images.\n(iii) a Dynamic text-adaptive Convolution Block (DCBlock) that queries source image features to distinguish text-required parts and text-irrelevant parts.\nExtensive experiments demonstrate that our DE-Net achieves excellent performance and manipulates source images more correctly and accurately.", + "primary_area": "machine learning iii", + "author": "Ming Tao; Bing-Kun Bao; Hao Tang; Fei Wu; Longhui Wei; Qi Tian", + "authorids": "", + "aff": "Nanjing University of Posts and Telecommunications; Nanjing University of Posts and Telecommunications; CVL, ETH Z\u00fcrich; Nanjing University of Posts and Telecommunications; Huawei Inc.; Huawei Inc.", + "bibtex": "@article{Tao_Bao_Tang_Wu_Wei_Tian_2023, title={DE-net: Dynamic Text-Guided Image Editing Adversarial Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26189}, DOI={10.1609/aaai.v37i8.26189}, abstractNote={Text-guided image editing models have shown remarkable results. However, there remain two problems. First, they employ fixed manipulation modules for various editing requirements (e.g., color changing, texture changing, content adding and removing), which results in over-editing or insufficient editing. Second, they do not clearly distinguish between text-required and text-irrelevant parts, which leads to inaccurate editing.\nTo solve these limitations, we propose:\n(i) a Dynamic Editing Block (DEBlock) that composes different editing modules dynamically for various editing requirements.\n(ii) a Composition Predictor (Comp-Pred), which predicts the composition weights for DEBlock according to the inference on target texts and source images.\n(iii) a Dynamic text-adaptive Convolution Block (DCBlock) that queries source image features to distinguish text-required parts and text-irrelevant parts.\nExtensive experiments demonstrate that our DE-Net achieves excellent performance and manipulates source images more correctly and accurately.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tao, Ming and Bao, Bing-Kun and Tang, Hao and Wu, Fei and Wei, Longhui and Tian, Qi}, year={2023}, month={Jun.}, pages={9971-9979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26189/25961", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26189", + "pdf_size": 5409998, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13104187906124891075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "njupt.edu.cn; ; ; ; ; ", + "email": "njupt.edu.cn; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;2", + "aff_unique_norm": "Nanjing University of Posts and Telecommunications;ETH Z\u00fcrich;Huawei", + "aff_unique_dep": ";Computer Vision Laboratory;", + "aff_unique_url": "http://www.njupt.edu.cn;https://www.ethz.ch;https://www.huawei.com", + "aff_unique_abbr": "NJUPT;ETHZ;Huawei", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;Switzerland" + }, + { + "id": "article-25337", + "title": "DENet: Disentangled Embedding Network for Visible Watermark Removal", + "track": "main", + "status": "Technical", + "abstract": "Adding visible watermark into image is a common copyright protection method of medias. Meanwhile, public research on watermark removal can be utilized as an adversarial technology to help the further development of watermarking. \nExisting watermark removal methods mainly adopt multi-task learning networks, which locate the watermark and restore the background simultaneously. However, these approaches view the task as an image-to-image reconstruction problem, where they only impose supervision after the final output, making the high-level semantic features shared between different tasks. \nTo this end, inspired by the two-stage coarse-refinement network, we propose a novel contrastive learning mechanism to disentangle the high-level embedding semantic information of the images and watermarks, driving the respective network branch more oriented.\nSpecifically, the proposed mechanism is leveraged for watermark image decomposition, which aims to decouple the clean image and watermark hints in the high-level embedding space. This can guarantee the learning representation of the restored image enjoy more task-specific cues.\nIn addition, we introduce a self-attention-based enhancement module, which promotes the network's ability to capture semantic information among different regions, leading to further improvement on the contrastive learning mechanism. \nTo validate the effectiveness of our proposed method, extensive experiments are conducted on different challenging benchmarks. Experimental evaluations show that our approach can achieve state-of-the-art performance and yield high-quality images. The code is available at: https://github.com/lianchengmingjue/DENet.", + "primary_area": "computer vision ii", + "author": "Ruizhou Sun; Yukun Su; Qingyao Wu", + "authorids": "", + "aff": "School of Software Engineering, South China University of Technology + Key Laboratory of Big Data and Intelligent Robot, Ministry of Education; School of Software Engineering, South China University of Technology + Pazhou Lab, Guangzhou, China; School of Software Engineering, South China University of Technology + Peng Cheng Laboratory, China", + "bibtex": "@article{Sun_Su_Wu_2023, title={DENet: Disentangled Embedding Network for Visible Watermark Removal}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25337}, DOI={10.1609/aaai.v37i2.25337}, abstractNote={Adding visible watermark into image is a common copyright protection method of medias. Meanwhile, public research on watermark removal can be utilized as an adversarial technology to help the further development of watermarking. Existing watermark removal methods mainly adopt multi-task learning networks, which locate the watermark and restore the background simultaneously. However, these approaches view the task as an image-to-image reconstruction problem, where they only impose supervision after the final output, making the high-level semantic features shared between different tasks. To this end, inspired by the two-stage coarse-refinement network, we propose a novel contrastive learning mechanism to disentangle the high-level embedding semantic information of the images and watermarks, driving the respective network branch more oriented.\nSpecifically, the proposed mechanism is leveraged for watermark image decomposition, which aims to decouple the clean image and watermark hints in the high-level embedding space. This can guarantee the learning representation of the restored image enjoy more task-specific cues.\nIn addition, we introduce a self-attention-based enhancement module, which promotes the network\u2019s ability to capture semantic information among different regions, leading to further improvement on the contrastive learning mechanism. To validate the effectiveness of our proposed method, extensive experiments are conducted on different challenging benchmarks. Experimental evaluations show that our approach can achieve state-of-the-art performance and yield high-quality images. The code is available at: https://github.com/lianchengmingjue/DENet.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Ruizhou and Su, Yukun and Wu, Qingyao}, year={2023}, month={Jun.}, pages={2411-2419} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25337/25109", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25337", + "pdf_size": 2508022, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12898145018590504353&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "foxmail.com; ; ", + "email": "foxmail.com; ; ", + "github": "https://github.com/lianchengmingjue/DENet", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+2;0+3", + "aff_unique_norm": "South China University of Technology;Ministry of Education;Pazhou Lab;Peng Cheng Laboratory", + "aff_unique_dep": "School of Software Engineering;Key Laboratory of Big Data and Intelligent Robot;;", + "aff_unique_url": "https://www.scut.edu.cn;;;", + "aff_unique_abbr": "SCUT;;;", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27073", + "title": "DFEE: Interactive DataFlow Execution and Evaluation Kit", + "track": "demonstrations", + "status": "Technical", + "abstract": "DataFlow has been emerging as a new paradigm for building task-oriented chatbots due to its expressive semantic representations of the dialogue tasks. Despite the availability of a large dataset SMCalFlow and a simplified syntax, the development and evaluation of DataFlow-based chatbots remain challenging due to the system complexity and the lack of downstream toolchains. In this demonstration, we present DFEE, an interactive DataFlow Execution and Evaluation toolkit that supports execution, visualization and benchmarking of semantic parsers given dialogue input and backend database. We demonstrate the system via a complex dialog task: event scheduling that involves temporal reasoning. It also supports diagnosing the parsing results via a friendly interface that allows developers to examine dynamic DataFlow and the corresponding execution results. To illustrate how to benchmark SoTA models, we propose a novel benchmark that covers more sophisticated event scheduling scenarios and a new metric on task success evaluation. The codes of DFEE have been released on https://github.com/amazonscience/dataflow-evaluation-toolkit.", + "primary_area": "", + "author": "Han He; Song Feng; Daniele Bonadiman; Yi Zhang; Saab Mansour", + "authorids": "", + "aff": "Emory University+AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs", + "bibtex": "@article{He_Feng_Bonadiman_Zhang_Mansour_2024, title={DFEE: Interactive DataFlow Execution and Evaluation Kit}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27073}, DOI={10.1609/aaai.v37i13.27073}, abstractNote={DataFlow has been emerging as a new paradigm for building task-oriented chatbots due to its expressive semantic representations of the dialogue tasks. Despite the availability of a large dataset SMCalFlow and a simplified syntax, the development and evaluation of DataFlow-based chatbots remain challenging due to the system complexity and the lack of downstream toolchains. In this demonstration, we present DFEE, an interactive DataFlow Execution and Evaluation toolkit that supports execution, visualization and benchmarking of semantic parsers given dialogue input and backend database. We demonstrate the system via a complex dialog task: event scheduling that involves temporal reasoning. It also supports diagnosing the parsing results via a friendly interface that allows developers to examine dynamic DataFlow and the corresponding execution results. To illustrate how to benchmark SoTA models, we propose a novel benchmark that covers more sophisticated event scheduling scenarios and a new metric on task success evaluation. The codes of DFEE have been released on https://github.com/amazonscience/dataflow-evaluation-toolkit.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Han and Feng, Song and Bonadiman, Daniele and Zhang, Yi and Mansour, Saab}, year={2024}, month={Jul.}, pages={16443-16445} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27073/26845", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27073", + "pdf_size": 126732, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:5ABqqlghKFEJ:scholar.google.com/&scioq=DFEE:+Interactive+DataFlow+Execution+and+Evaluation+Kit&hl=en&as_sdt=0,5", + "gs_version_total": 6, + "aff_domain": "emory.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "emory.edu;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "https://github.com/amazon-science/dataflow-evaluation-toolkit", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;1", + "aff_unique_norm": "Emory University;Amazon Web Services", + "aff_unique_dep": ";AWS AI Labs", + "aff_unique_url": "https://www.emory.edu;https://aws.amazon.com", + "aff_unique_abbr": "Emory;AWS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25795", + "title": "DHGE: Dual-View Hyper-Relational Knowledge Graph Embedding for Link Prediction and Entity Typing", + "track": "main", + "status": "Technical", + "abstract": "In the field of representation learning on knowledge graphs (KGs), a hyper-relational fact consists of a main triple and several auxiliary attribute-value descriptions, which is considered more comprehensive and specific than a triple-based fact. However, currently available hyper-relational KG embedding methods in a single view are limited in application because they weaken the hierarchical structure that represents the affiliation between entities. To overcome this limitation, we propose a dual-view hyper-relational KG structure (DH-KG) that contains a hyper-relational instance view for entities and a hyper-relational ontology view for concepts that are abstracted hierarchically from the entities. This paper defines link prediction and entity typing tasks on DH-KG for the first time and constructs two DH-KG datasets, JW44K-6K, extracted from Wikidata, and HTDM based on medical data. Furthermore, we propose DHGE, a DH-KG embedding model based on GRAN encoders, HGNNs, and joint learning. DHGE outperforms baseline models on DH-KG, according to experimental results. Finally, we provide an example of how this technology can be used to treat hypertension. Our model and new datasets are publicly available.", + "primary_area": "knowledge representation and reasoning", + "author": "Haoran Luo; Haihong E; Ling Tan; Gengxian Zhou; Tianyu Yao; Kaiyang Wan", + "authorids": "", + "aff": "School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China", + "bibtex": "@article{Luo_E_Tan_Zhou_Yao_Wan_2023, title={DHGE: Dual-View Hyper-Relational Knowledge Graph Embedding for Link Prediction and Entity Typing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25795}, DOI={10.1609/aaai.v37i5.25795}, abstractNote={In the field of representation learning on knowledge graphs (KGs), a hyper-relational fact consists of a main triple and several auxiliary attribute-value descriptions, which is considered more comprehensive and specific than a triple-based fact. However, currently available hyper-relational KG embedding methods in a single view are limited in application because they weaken the hierarchical structure that represents the affiliation between entities. To overcome this limitation, we propose a dual-view hyper-relational KG structure (DH-KG) that contains a hyper-relational instance view for entities and a hyper-relational ontology view for concepts that are abstracted hierarchically from the entities. This paper defines link prediction and entity typing tasks on DH-KG for the first time and constructs two DH-KG datasets, JW44K-6K, extracted from Wikidata, and HTDM based on medical data. Furthermore, we propose DHGE, a DH-KG embedding model based on GRAN encoders, HGNNs, and joint learning. DHGE outperforms baseline models on DH-KG, according to experimental results. Finally, we provide an example of how this technology can be used to treat hypertension. Our model and new datasets are publicly available.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Haoran and E, Haihong and Tan, Ling and Zhou, Gengxian and Yao, Tianyu and Wan, Kaiyang}, year={2023}, month={Jun.}, pages={6467-6474} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25795/25567", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25795", + "pdf_size": 424100, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11920622779911874323&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bupt.edu.cn;bupt.edu.cn; ; ; ; ", + "email": "bupt.edu.cn;bupt.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26059", + "title": "DICNet: Deep Instance-Level Contrastive Network for Double Incomplete Multi-View Multi-Label Classification", + "track": "main", + "status": "Technical", + "abstract": "In recent years, multi-view multi-label learning has aroused extensive research enthusiasm. However, multi-view multi-label data in the real world is commonly incomplete due to the uncertain factors of data collection and manual annotation, which means that not only multi-view features are often missing, and label completeness is also difficult to be satisfied. To deal with the double incomplete multi-view multi-label classification problem, we propose a deep instance-level contrastive network, namely DICNet. Different from conventional methods, our DICNet focuses on leveraging deep neural network to exploit the high-level semantic representations of samples rather than shallow-level features. First, we utilize the stacked autoencoders to build an end-to-end multi-view feature extraction framework to learn the view-specific representations of samples. Furthermore, in order to improve the consensus representation ability, we introduce an incomplete instance-level contrastive learning scheme to guide the encoders to better extract the consensus information of multiple views and use a multi-view weighted fusion module to enhance the discrimination of semantic features. Overall, our DICNet is adept in capturing consistent discriminative representations of multi-view multi-label data and avoiding the negative effects of missing views and missing labels. Extensive experiments performed on five datasets validate that our method outperforms other state-of-the-art methods.", + "primary_area": "machine learning ii", + "author": "Chengliang Liu; Jie Wen; Xiaoling Luo; Chao Huang; Zhihao Wu; Yong Xu", + "authorids": "", + "aff": "Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; School of Cyber Science and Technology, Shenzhen Campus of Sun Yat-sen University, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China+Pengcheng Laboratory, Shenzhen, China", + "bibtex": "@article{Liu_Wen_Luo_Huang_Wu_Xu_2023, title={DICNet: Deep Instance-Level Contrastive Network for Double Incomplete Multi-View Multi-Label Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26059}, DOI={10.1609/aaai.v37i7.26059}, abstractNote={In recent years, multi-view multi-label learning has aroused extensive research enthusiasm. However, multi-view multi-label data in the real world is commonly incomplete due to the uncertain factors of data collection and manual annotation, which means that not only multi-view features are often missing, and label completeness is also difficult to be satisfied. To deal with the double incomplete multi-view multi-label classification problem, we propose a deep instance-level contrastive network, namely DICNet. Different from conventional methods, our DICNet focuses on leveraging deep neural network to exploit the high-level semantic representations of samples rather than shallow-level features. First, we utilize the stacked autoencoders to build an end-to-end multi-view feature extraction framework to learn the view-specific representations of samples. Furthermore, in order to improve the consensus representation ability, we introduce an incomplete instance-level contrastive learning scheme to guide the encoders to better extract the consensus information of multiple views and use a multi-view weighted fusion module to enhance the discrimination of semantic features. Overall, our DICNet is adept in capturing consistent discriminative representations of multi-view multi-label data and avoiding the negative effects of missing views and missing labels. Extensive experiments performed on five datasets validate that our method outperforms other state-of-the-art methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Chengliang and Wen, Jie and Luo, Xiaoling and Huang, Chao and Wu, Zhihao and Xu, Yong}, year={2023}, month={Jun.}, pages={8807-8815} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26059/25831", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26059", + "pdf_size": 1349847, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15422485311458918609&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "163.com;126.com;outlook.com;126.com;163.com;ymail.com", + "email": "163.com;126.com;outlook.com;126.com;163.com;ymail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0+2", + "aff_unique_norm": "Harbin Institute of Technology;Sun Yat-sen University;Pengcheng Laboratory", + "aff_unique_dep": "Shenzhen Key Laboratory of Visual Object Detection and Recognition;School of Cyber Science and Technology;", + "aff_unique_url": "http://www.hit.edu.cn/;http://www.sysu.edu.cn;", + "aff_unique_abbr": "HIT;SYSU;", + "aff_campus_unique_index": "0;0;0;0;0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25464", + "title": "DINet: Deformation Inpainting Network for Realistic Face Visually Dubbing on High Resolution Video", + "track": "main", + "status": "Technical", + "abstract": "For few-shot learning, it is still a critical challenge to realize photo-realistic face visually dubbing on high-resolution videos. Previous works fail to generate high-fidelity dubbing results. To address the above problem, this paper proposes a Deformation Inpainting Network (DINet) for high-resolution face visually dubbing. Different from previous works relying on multiple up-sample layers to directly generate pixels from latent embeddings, DINet performs spatial deformation on feature maps of reference images to better preserve high-frequency textural details. Specifically, DINet consists of one deformation part and one inpainting part. In the first part, five reference facial images adaptively perform spatial deformation to create deformed feature maps encoding mouth shapes at each frame, in order to align with input driving audio and also the head poses of input source images. In the second part, to produce face visually dubbing, a feature decoder is responsible for adaptively incorporating mouth movements from the deformed feature maps and other attributes (i.e., head pose and upper facial expression) from the source feature maps together. Finally, DINet achieves face visually dubbing with rich textural details. We conduct qualitative and quantitative comparisons to validate our DINet on high-resolution videos. The experimental results show that our method outperforms state-of-the-art works.", + "primary_area": "computer vision iii", + "author": "Zhimeng Zhang; Zhipeng Hu; Wenjin Deng; Changjie Fan; Tangjie Lv; Yu Ding", + "authorids": "", + "aff": "Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University; Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University; Xiamen University; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University", + "bibtex": "@article{Zhang_Hu_Deng_Fan_Lv_Ding_2023, title={DINet: Deformation Inpainting Network for Realistic Face Visually Dubbing on High Resolution Video}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25464}, DOI={10.1609/aaai.v37i3.25464}, abstractNote={For few-shot learning, it is still a critical challenge to realize photo-realistic face visually dubbing on high-resolution videos. Previous works fail to generate high-fidelity dubbing results. To address the above problem, this paper proposes a Deformation Inpainting Network (DINet) for high-resolution face visually dubbing. Different from previous works relying on multiple up-sample layers to directly generate pixels from latent embeddings, DINet performs spatial deformation on feature maps of reference images to better preserve high-frequency textural details. Specifically, DINet consists of one deformation part and one inpainting part. In the first part, five reference facial images adaptively perform spatial deformation to create deformed feature maps encoding mouth shapes at each frame, in order to align with input driving audio and also the head poses of input source images. In the second part, to produce face visually dubbing, a feature decoder is responsible for adaptively incorporating mouth movements from the deformed feature maps and other attributes (i.e., head pose and upper facial expression) from the source feature maps together. Finally, DINet achieves face visually dubbing with rich textural details. We conduct qualitative and quantitative comparisons to validate our DINet on high-resolution videos. The experimental results show that our method outperforms state-of-the-art works.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhimeng and Hu, Zhipeng and Deng, Wenjin and Fan, Changjie and Lv, Tangjie and Ding, Yu}, year={2023}, month={Jun.}, pages={3543-3551} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25464/25236", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25464", + "pdf_size": 5800405, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3726056795906178232&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "corp.netease.com;corp.netease.com;stu.xmu.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com", + "email": "corp.netease.com;corp.netease.com;stu.xmu.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;2;0;0;0+1", + "aff_unique_norm": "Netease Fuxi AI Lab;Zhejiang University;Xiamen University", + "aff_unique_dep": "Virtual Human Group;;", + "aff_unique_url": "https://www.netease.com;https://www.zju.edu.cn;https://www.xmu.edu.cn", + "aff_unique_abbr": "Netease;ZJU;XMU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27069", + "title": "DISPUTool 2.0: A Modular Architecture for Multi-Layer Argumentative Analysis of Political Debates", + "track": "demonstrations", + "status": "Technical", + "abstract": "Political debates are one of the most salient moments of an election campaign, where candidates are challenged to discuss the main contemporary and historical issues in a country. These debates represent a natural ground for argumentative analysis, which has always been employed to investigate political discourse structure and strategy in philosophy and linguistics. In this paper, we present DISPUTool 2.0, an automated tool which relies on Argument Mining methods to analyse the political debates from the US presidential campaigns to extract argument components (i.e., premise and claim) and relations (i.e., support and attack), and highlight fallacious arguments. DISPUTool 2.0 allows also for the automatic analysis of a piece of a debate proposed by the user to identify and classify the arguments contained in the text. A REST API is provided to exploit the tool's functionalities.", + "primary_area": "", + "author": "Pierpaolo Goffredo; Elena Cabrio; Serena Villata; Shohreh Haddadan; Jhonatan Torres Sanchez", + "authorids": "", + "aff": "Universit \u00b4e C\u02c6ote d\u2019Azur, CNRS, Inria, I3S, France; Universit \u00b4e C\u02c6ote d\u2019Azur, CNRS, Inria, I3S, France; Universit \u00b4e C\u02c6ote d\u2019Azur, CNRS, Inria, I3S, France; Zortify, Luxembourg; Universit \u00b4e C\u02c6ote d\u2019Azur, 3IA Techpool, France", + "bibtex": "@article{Goffredo_Cabrio_Villata_Haddadan_Torres Sanchez_2024, title={DISPUTool 2.0: A Modular Architecture for Multi-Layer Argumentative Analysis of Political Debates}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27069}, DOI={10.1609/aaai.v37i13.27069}, abstractNote={Political debates are one of the most salient moments of an election campaign, where candidates are challenged to discuss the main contemporary and historical issues in a country. These debates represent a natural ground for argumentative analysis, which has always been employed to investigate political discourse structure and strategy in philosophy and linguistics. In this paper, we present DISPUTool 2.0, an automated tool which relies on Argument Mining methods to analyse the political debates from the US presidential campaigns to extract argument components (i.e., premise and claim) and relations (i.e., support and attack), and highlight fallacious arguments. DISPUTool 2.0 allows also for the automatic analysis of a piece of a debate proposed by the user to identify and classify the arguments contained in the text. A REST API is provided to exploit the tool\u2019s functionalities.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Goffredo, Pierpaolo and Cabrio, Elena and Villata, Serena and Haddadan, Shohreh and Torres Sanchez, Jhonatan}, year={2024}, month={Jul.}, pages={16431-16433} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27069/26841", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27069", + "pdf_size": 158076, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9463215461225185978&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "i3s.unice.fr;univ-cotedazur.fr;univ-cotedazur.fr;gmail.com;univ-cotedazur.fr", + "email": "i3s.unice.fr;univ-cotedazur.fr;univ-cotedazur.fr;gmail.com;univ-cotedazur.fr", + "github": "", + "project": "www.debates.org", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur;Zortify", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.univ-cotedazur.fr;", + "aff_unique_abbr": "UCA;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "France;Luxembourg" + }, + { + "id": "article-25669", + "title": "DMIS: Dynamic Mesh-Based Importance Sampling for Training Physics-Informed Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Modeling dynamics in the form of partial differential equations (PDEs) is an effectual way to understand real-world physics processes. For complex physics systems, analytical solutions are not available and numerical solutions are widely-used. However, traditional numerical algorithms are computationally expensive and challenging in handling multiphysics systems. Recently, using neural networks to solve PDEs has made significant progress, called physics-informed neural networks (PINNs). PINNs encode physical laws into neural networks and learn the continuous solutions of PDEs. For the training of PINNs, existing methods suffer from the problems of inefficiency and unstable convergence, since the PDE residuals require calculating automatic differentiation. In this paper, we propose Dynamic Mesh-based Importance Sampling (DMIS) to tackle these problems. DMIS is a novel sampling scheme based on importance sampling, which constructs a dynamic triangular mesh to estimate sample weights efficiently. DMIS has broad applicability and can be easily integrated into existing methods. The evaluation of DMIS on three widely-used benchmarks shows that DMIS improves the convergence speed and accuracy in the meantime. Especially in solving the highly nonlinear Schr\u00f6dinger Equation, compared with state-of-the-art methods, DMIS shows up to 46% smaller root mean square error and five times faster convergence speed. Code is available at https://github.com/MatrixBrain/DMIS.", + "primary_area": "domain s of application", + "author": "Zijiang Yang; Zhongwei Qiu; Dongmei Fu", + "authorids": "", + "aff": "School of Automation and Electrical Engineering, University of Science and Technology Beijing + Beijing Engineering Research Center of Industrial Spectrum Imaging; School of Automation and Electrical Engineering, University of Science and Technology Beijing + Beijing Engineering Research Center of Industrial Spectrum Imaging + The University of Sydney; School of Automation and Electrical Engineering, University of Science and Technology Beijing + Beijing Engineering Research Center of Industrial Spectrum Imaging + Shunde Innovation School, University of Science and Technology Beijing", + "bibtex": "@article{Yang_Qiu_Fu_2023, title={DMIS: Dynamic Mesh-Based Importance Sampling for Training Physics-Informed Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25669}, DOI={10.1609/aaai.v37i4.25669}, abstractNote={Modeling dynamics in the form of partial differential equations (PDEs) is an effectual way to understand real-world physics processes. For complex physics systems, analytical solutions are not available and numerical solutions are widely-used. However, traditional numerical algorithms are computationally expensive and challenging in handling multiphysics systems. Recently, using neural networks to solve PDEs has made significant progress, called physics-informed neural networks (PINNs). PINNs encode physical laws into neural networks and learn the continuous solutions of PDEs. For the training of PINNs, existing methods suffer from the problems of inefficiency and unstable convergence, since the PDE residuals require calculating automatic differentiation. In this paper, we propose Dynamic Mesh-based Importance Sampling (DMIS) to tackle these problems. DMIS is a novel sampling scheme based on importance sampling, which constructs a dynamic triangular mesh to estimate sample weights efficiently. DMIS has broad applicability and can be easily integrated into existing methods. The evaluation of DMIS on three widely-used benchmarks shows that DMIS improves the convergence speed and accuracy in the meantime. Especially in solving the highly nonlinear Schr\u00f6dinger Equation, compared with state-of-the-art methods, DMIS shows up to 46% smaller root mean square error and five times faster convergence speed. Code is available at https://github.com/MatrixBrain/DMIS.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zijiang and Qiu, Zhongwei and Fu, Dongmei}, year={2023}, month={Jun.}, pages={5375-5383} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25669/25441", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25669", + "pdf_size": 6719552, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10380507009419795778&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "xs.ustb.edu.cn;xs.ustb.edu.cn;ustb.edu.cn", + "email": "xs.ustb.edu.cn;xs.ustb.edu.cn;ustb.edu.cn", + "github": "https://github.com/MatrixBrain/DMIS", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1+2;0+1+0", + "aff_unique_norm": "University of Science and Technology Beijing;Beijing Engineering Research Center of Industrial Spectrum Imaging;University of Sydney", + "aff_unique_dep": "School of Automation and Electrical Engineering;;", + "aff_unique_url": "https://www.ustb.edu.cn;;https://www.sydney.edu.au", + "aff_unique_abbr": "USTB;;USYD", + "aff_campus_unique_index": "0;0;0+2", + "aff_campus_unique": "Beijing;;Shunde", + "aff_country_unique_index": "0+0;0+0+1;0+0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26382", + "title": "DM\u00b2: Decentralized Multi-Agent Reinforcement Learning via Distribution Matching", + "track": "main", + "status": "Technical", + "abstract": "Current approaches to multi-agent cooperation rely heavily on centralized mechanisms or explicit communication protocols to ensure convergence. This paper studies the problem of distributed multi-agent learning without resorting to centralized components or explicit communication. It examines the use of distribution matching to facilitate the coordination of independent agents. In the proposed scheme, each agent independently minimizes the distribution mismatch to the corresponding component of a target visitation distribution. The theoretical analysis shows that under certain conditions, each agent minimizing its individual distribution mismatch allows the convergence to the joint policy that generated the target distribution. Further, if the target distribution is from a joint policy that optimizes a cooperative task, the optimal policy for a combination of this task reward and the distribution matching reward is the same joint policy. This insight is used to formulate a practical algorithm (DM^2), in which each individual agent matches a target distribution derived from concurrently sampled trajectories from a joint expert policy. Experimental validation on the StarCraft domain shows that combining (1) a task reward, and (2) a distribution matching reward for expert demonstrations for the same task, allows agents to outperform a naive distributed baseline. Additional experiments probe the conditions under which expert demonstrations need to be sampled to obtain the learning benefits.", + "primary_area": "multiagent systems", + "author": "Caroline Wang; Ishan Durugkar; Elad Liebman; Peter Stone", + "authorids": "", + "aff": "The University of Texas at Austin; The University of Texas at Austin; SparkCognition Research; The University of Texas at Austin+Sony AI", + "bibtex": "@article{Wang_Durugkar_Liebman_Stone_2023, title={DM\u00b2: Decentralized Multi-Agent Reinforcement Learning via Distribution Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26382}, DOI={10.1609/aaai.v37i10.26382}, abstractNote={Current approaches to multi-agent cooperation rely heavily on centralized mechanisms or explicit communication protocols to ensure convergence. This paper studies the problem of distributed multi-agent learning without resorting to centralized components or explicit communication. It examines the use of distribution matching to facilitate the coordination of independent agents. In the proposed scheme, each agent independently minimizes the distribution mismatch to the corresponding component of a target visitation distribution. The theoretical analysis shows that under certain conditions, each agent minimizing its individual distribution mismatch allows the convergence to the joint policy that generated the target distribution. Further, if the target distribution is from a joint policy that optimizes a cooperative task, the optimal policy for a combination of this task reward and the distribution matching reward is the same joint policy. This insight is used to formulate a practical algorithm (DM^2), in which each individual agent matches a target distribution derived from concurrently sampled trajectories from a joint expert policy. Experimental validation on the StarCraft domain shows that combining (1) a task reward, and (2) a distribution matching reward for expert demonstrations for the same task, allows agents to outperform a naive distributed baseline. Additional experiments probe the conditions under which expert demonstrations need to be sampled to obtain the learning benefits.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Caroline and Durugkar, Ishan and Liebman, Elad and Stone, Peter}, year={2023}, month={Jun.}, pages={11699-11707} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26382/26154", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26382", + "pdf_size": 345244, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2739557569703252282&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "utexas.edu;cs.utexas.edu;sparkcognition.com;cs.utexas.edu", + "email": "utexas.edu;cs.utexas.edu;sparkcognition.com;cs.utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+2", + "aff_unique_norm": "University of Texas at Austin;SparkCognition;Sony", + "aff_unique_dep": ";Research;Sony AI", + "aff_unique_url": "https://www.utexas.edu;https://www.sparkcognition.com;https://www.sony.com", + "aff_unique_abbr": "UT Austin;SparkCognition;Sony AI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Austin;", + "aff_country_unique_index": "0;0;0;0+1", + "aff_country_unique": "United States;Japan" + }, + { + "id": "article-25810", + "title": "DNG: Taxonomy Expansion by Exploring the Intrinsic Directed Structure on Non-gaussian Space", + "track": "main", + "status": "Technical", + "abstract": "Taxonomy expansion is the process of incorporating a large number of additional nodes (i.e., ''queries'') into an existing taxonomy (i.e., ''seed''), with the most important step being the selection of appropriate positions for each query.\nEnormous efforts have been made by exploring the seed's structure.\nHowever, existing approaches are deficient in their mining of structural information in two ways: poor modeling of the hierarchical semantics and failure to capture directionality of the is-a relation.\nThis paper seeks to address these issues by explicitly denoting each node as the combination of inherited feature (i.e., structural part) and incremental feature (i.e., supplementary part).\nSpecifically, the inherited feature originates from ''parent'' nodes and is weighted by an inheritance factor.\nWith this node representation, the hierarchy of semantics in taxonomies (i.e., the inheritance and accumulation of features from ''parent'' to ''child'') could be embodied.\nAdditionally, based on this representation, the directionality of the is-a relation could be easily translated into the irreversible inheritance of features.\nInspired by the Darmois-Skitovich Theorem, we implement this irreversibility by a non-Gaussian constraint on the supplementary feature.\nA log-likelihood learning objective is further utilized to optimize the proposed model (dubbed DNG), whereby the required non-Gaussianity is also theoretically ensured.\nExtensive experimental results on two real-world datasets verify the superiority of DNG relative to several strong baselines.", + "primary_area": "knowledge representation and reasoning", + "author": "Songlin Zhai; Weiqing Wang; Yuanfang Li; Yuan Meng", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University, China; Faculty of Information Technology, Monash University, Australia; Faculty of Information Technology, Monash University, Australia; School of Computer Science and Engineering, Southeast University, China", + "bibtex": "@article{Zhai_Wang_Li_Meng_2023, title={DNG: Taxonomy Expansion by Exploring the Intrinsic Directed Structure on Non-gaussian Space}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25810}, DOI={10.1609/aaai.v37i5.25810}, abstractNote={Taxonomy expansion is the process of incorporating a large number of additional nodes (i.e., \u2019\u2019queries\u2019\u2019) into an existing taxonomy (i.e., \u2019\u2019seed\u2019\u2019), with the most important step being the selection of appropriate positions for each query.\nEnormous efforts have been made by exploring the seed\u2019s structure.\nHowever, existing approaches are deficient in their mining of structural information in two ways: poor modeling of the hierarchical semantics and failure to capture directionality of the is-a relation.\nThis paper seeks to address these issues by explicitly denoting each node as the combination of inherited feature (i.e., structural part) and incremental feature (i.e., supplementary part).\nSpecifically, the inherited feature originates from \u2019\u2019parent\u2019\u2019 nodes and is weighted by an inheritance factor.\nWith this node representation, the hierarchy of semantics in taxonomies (i.e., the inheritance and accumulation of features from \u2019\u2019parent\u2019\u2019 to \u2019\u2019child\u2019\u2019) could be embodied.\nAdditionally, based on this representation, the directionality of the is-a relation could be easily translated into the irreversible inheritance of features.\nInspired by the Darmois-Skitovich Theorem, we implement this irreversibility by a non-Gaussian constraint on the supplementary feature.\nA log-likelihood learning objective is further utilized to optimize the proposed model (dubbed DNG), whereby the required non-Gaussianity is also theoretically ensured.\nExtensive experimental results on two real-world datasets verify the superiority of DNG relative to several strong baselines.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhai, Songlin and Wang, Weiqing and Li, Yuanfang and Meng, Yuan}, year={2023}, month={Jun.}, pages={6593-6601} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25810/25582", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25810", + "pdf_size": 557793, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=334864276803385005&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "seu.edu.cn;monash.edu;monash.edu;seu.edu.cn", + "email": "seu.edu.cn;monash.edu;monash.edu;seu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "Southeast University;Monash University", + "aff_unique_dep": "School of Computer Science and Engineering;Faculty of Information Technology", + "aff_unique_url": "https://www.seu.edu.cn/;https://www.monash.edu", + "aff_unique_abbr": "SEU;Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26770", + "title": "DPAUC: Differentially Private AUC Computation in Federated Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Federated learning (FL) has gained significant attention recently as a privacy-enhancing tool to jointly train a machine learning model by multiple participants. \nThe prior work on FL has mostly studied how to protect label privacy during model training. However, model evaluation in FL might also lead to the potential leakage of private label information.\nIn this work, we propose an evaluation algorithm that can accurately compute the widely used AUC (area under the curve) metric when using the label differential privacy (DP) in FL. Through extensive experiments, we show our algorithms can compute accurate AUCs compared to the ground truth. The code is available at https://github.com/bytedance/fedlearner/tree/master/example/privacy/DPAUC", + "primary_area": "safe and robust ai", + "author": "Jiankai Sun; Xin Yang; Yuanshun Yao; Junyuan Xie; Di Wu; Chong Wang", + "authorids": "", + "aff": "ByteDance Inc.; ByteDance Inc.; ByteDance Inc.; ByteDance Ltd; ByteDance Ltd; Apple", + "bibtex": "@article{Sun_Yang_Yao_Xie_Wu_Wang_2023, title={DPAUC: Differentially Private AUC Computation in Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26770}, DOI={10.1609/aaai.v37i12.26770}, abstractNote={Federated learning (FL) has gained significant attention recently as a privacy-enhancing tool to jointly train a machine learning model by multiple participants. The prior work on FL has mostly studied how to protect label privacy during model training. However, model evaluation in FL might also lead to the potential leakage of private label information.\nIn this work, we propose an evaluation algorithm that can accurately compute the widely used AUC (area under the curve) metric when using the label differential privacy (DP) in FL. Through extensive experiments, we show our algorithms can compute accurate AUCs compared to the ground truth. The code is available at https://github.com/bytedance/fedlearner/tree/master/example/privacy/DPAUC}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Jiankai and Yang, Xin and Yao, Yuanshun and Xie, Junyuan and Wu, Di and Wang, Chong}, year={2023}, month={Jun.}, pages={15170-15178} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26770/26542", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26770", + "pdf_size": 782053, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17587190325497937079&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;apple.com", + "email": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;apple.com", + "github": "https://github.com/bytedance/fedlearner/tree/master/example/privacy/DPAUC", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "ByteDance;Apple Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bytedance.com;https://www.apple.com", + "aff_unique_abbr": "ByteDance;Apple", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25430", + "title": "DPText-DETR: Towards Better Scene Text Detection with Dynamic Points in Transformer", + "track": "main", + "status": "Technical", + "abstract": "Recently, Transformer-based methods, which predict polygon points or Bezier curve control points for localizing texts, are popular in scene text detection. However, these methods built upon detection transformer framework might achieve sub-optimal training efficiency and performance due to coarse positional query modeling. In addition, the point label form exploited in previous works implies the reading order of humans, which impedes the detection robustness from our observation. To address these challenges, this paper proposes a concise Dynamic Point Text DEtection TRansformer network, termed DPText-DETR. In detail, DPText-DETR directly leverages explicit point coordinates to generate position queries and dynamically updates them in a progressive way. Moreover, to improve the spatial inductive bias of non-local self-attention in Transformer, we present an Enhanced Factorized Self-Attention module which provides point queries within each instance with circular shape guidance. Furthermore, we design a simple yet effective positional label form to tackle the side effect of the previous form. To further evaluate the impact of different label forms on the detection robustness in real-world scenario, we establish an Inverse-Text test set containing 500 manually labeled images. Extensive experiments prove the high training efficiency, robustness, and state-of-the-art performance of our method on popular benchmarks. The code and the Inverse-Text test set are available at https://github.com/ymy-k/DPText-DETR.", + "primary_area": "computer vision iii", + "author": "Maoyuan Ye; Jing Zhang; Shanshan Zhao; Juhua Liu; Bo Du; Dacheng Tao", + "authorids": "", + "aff": "Research Center for Graphic Communication, Printing and Packaging, Institute of Artificial Intelligence, Wuhan University; The University of Sydney; JD Explore Academy; National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University; Research Center for Graphic Communication, Printing and Packaging, Institute of Artificial Intelligence, Wuhan University; The University of Sydney+JD Explore Academy", + "bibtex": "@article{Ye_Zhang_Zhao_Liu_Du_Tao_2023, title={DPText-DETR: Towards Better Scene Text Detection with Dynamic Points in Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25430}, DOI={10.1609/aaai.v37i3.25430}, abstractNote={Recently, Transformer-based methods, which predict polygon points or Bezier curve control points for localizing texts, are popular in scene text detection. However, these methods built upon detection transformer framework might achieve sub-optimal training efficiency and performance due to coarse positional query modeling. In addition, the point label form exploited in previous works implies the reading order of humans, which impedes the detection robustness from our observation. To address these challenges, this paper proposes a concise Dynamic Point Text DEtection TRansformer network, termed DPText-DETR. In detail, DPText-DETR directly leverages explicit point coordinates to generate position queries and dynamically updates them in a progressive way. Moreover, to improve the spatial inductive bias of non-local self-attention in Transformer, we present an Enhanced Factorized Self-Attention module which provides point queries within each instance with circular shape guidance. Furthermore, we design a simple yet effective positional label form to tackle the side effect of the previous form. To further evaluate the impact of different label forms on the detection robustness in real-world scenario, we establish an Inverse-Text test set containing 500 manually labeled images. Extensive experiments prove the high training efficiency, robustness, and state-of-the-art performance of our method on popular benchmarks. The code and the Inverse-Text test set are available at https://github.com/ymy-k/DPText-DETR.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Maoyuan and Zhang, Jing and Zhao, Shanshan and Liu, Juhua and Du, Bo and Tao, Dacheng}, year={2023}, month={Jun.}, pages={3241-3249} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25430/25202", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25430", + "pdf_size": 1949641, + "gs_citation": 85, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2170285609951401184&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "whu.edu.cn;sydney.edu.au;gmail.com;whu.edu.cn;whu.edu.cn;gmail.com", + "email": "whu.edu.cn;sydney.edu.au;gmail.com;whu.edu.cn;whu.edu.cn;gmail.com", + "github": "https://github.com/ymy-k/DPText-DETR", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;1+2", + "aff_unique_norm": "Wuhan University;University of Sydney;JD Explore Academy", + "aff_unique_dep": "Research Center for Graphic Communication, Printing and Packaging, Institute of Artificial Intelligence;;", + "aff_unique_url": "http://www.whu.edu.cn/;https://www.sydney.edu.au;", + "aff_unique_abbr": ";USYD;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;1", + "aff_country_unique": "China;Australia;" + }, + { + "id": "article-25261", + "title": "DQ-DETR: Dual Query Detection Transformer for Phrase Extraction and Grounding", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we study the problem of visual grounding by considering both phrase extraction and grounding (PEG). In contrast to the previous phrase-known-at-test setting, PEG requires a model to extract phrases from text and locate objects from image simultaneously, which is a more practical setting in real applications. As phrase extraction can be regarded as a 1D text segmentation problem, we formulate PEG as a dual detection problem and propose a novel DQ-DETR model, which introduces dual queries to probe different features from image and text for object prediction and phrase mask prediction. Each pair of dual queries are designed to have shared positional parts but different content parts. Such a design effectively alleviates the difficulty of modality alignment between image and text (in contrast to a single query design) and empowers Transformer decoder to leverage phrase mask-guided attention to improve the performance. To evaluate the performance of PEG, we also propose a new metric CMAP (cross-modal average precision), analogous to the AP metric in object detection. The new metric overcomes the ambiguity of Recall@1 in many-box-to-one-phrase cases in phrase grounding. As a result, our PEG pre-trained DQ-DETR establishes new state-of-the-art results on all visual grounding benchmarks with a ResNet-101 backbone. For example, it achieves 91.04% and 83.51% in terms of recall rate on RefCOCO testA and testB with a ResNet-101 backbone.", + "primary_area": "computer vision ii", + "author": "Shilong Liu; Shijia Huang; Feng Li; Hao Zhang; Yaoyuan Liang; Hang Su; Jun Zhu; Lei Zhang", + "authorids": "", + "aff": "Dept. of CST, BNRist Center, Inst. for AI, Tsinghua-Bosch Joint Center for ML, Tsinghua University + International Digital Economy Academy (IDEA); The Chinese University of Hong Kong; International Digital Economy Academy (IDEA) + The Hong Kong University of Science and Technology; International Digital Economy Academy (IDEA) + The Hong Kong University of Science and Technology; Tsinghua-Berkeley Shenzhen Institute, Tsinghua University; Dept. of CST, BNRist Center, Inst. for AI, Tsinghua-Bosch Joint Center for ML, Tsinghua University; Dept. of CST, BNRist Center, Inst. for AI, Tsinghua-Bosch Joint Center for ML, Tsinghua University; International Digital Economy Academy (IDEA)", + "bibtex": "@article{Liu_Huang_Li_Zhang_Liang_Su_Zhu_Zhang_2023, title={DQ-DETR: Dual Query Detection Transformer for Phrase Extraction and Grounding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25261}, DOI={10.1609/aaai.v37i2.25261}, abstractNote={In this paper, we study the problem of visual grounding by considering both phrase extraction and grounding (PEG). In contrast to the previous phrase-known-at-test setting, PEG requires a model to extract phrases from text and locate objects from image simultaneously, which is a more practical setting in real applications. As phrase extraction can be regarded as a 1D text segmentation problem, we formulate PEG as a dual detection problem and propose a novel DQ-DETR model, which introduces dual queries to probe different features from image and text for object prediction and phrase mask prediction. Each pair of dual queries are designed to have shared positional parts but different content parts. Such a design effectively alleviates the difficulty of modality alignment between image and text (in contrast to a single query design) and empowers Transformer decoder to leverage phrase mask-guided attention to improve the performance. To evaluate the performance of PEG, we also propose a new metric CMAP (cross-modal average precision), analogous to the AP metric in object detection. The new metric overcomes the ambiguity of Recall@1 in many-box-to-one-phrase cases in phrase grounding. As a result, our PEG pre-trained DQ-DETR establishes new state-of-the-art results on all visual grounding benchmarks with a ResNet-101 backbone. For example, it achieves 91.04% and 83.51% in terms of recall rate on RefCOCO testA and testB with a ResNet-101 backbone.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shilong and Huang, Shijia and Li, Feng and Zhang, Hao and Liang, Yaoyuan and Su, Hang and Zhu, Jun and Zhang, Lei}, year={2023}, month={Jun.}, pages={1728-1736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25261/25033", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25261", + "pdf_size": 436566, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18250852612715290956&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;cse.cuhk.edu.hk;connect.ust.hk;connect.ust.hk;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn;mail.tsinghua.edu.cn;idea.edu.cn", + "email": "mails.tsinghua.edu.cn;cse.cuhk.edu.hk;connect.ust.hk;connect.ust.hk;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn;mail.tsinghua.edu.cn;idea.edu.cn", + "github": "https://github.com/IDEA-Research/DQ-DETR", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;1+3;1+3;0;0;0;1", + "aff_unique_norm": "Tsinghua University;International Digital Economy Academy;The Chinese University of Hong Kong;Hong Kong University of Science and Technology", + "aff_unique_dep": "Dept. of CST;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;;https://www.cuhk.edu.hk;https://www.ust.hk", + "aff_unique_abbr": "Tsinghua;IDEA;CUHK;HKUST", + "aff_campus_unique_index": ";;;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26332", + "title": "DRGCN: Dynamic Evolving Initial Residual for Deep Graph Convolutional Networks", + "track": "main", + "status": "Technical", + "abstract": "Graph convolutional networks (GCNs) have been proved to be very practical to handle various graph-related tasks. It has attracted considerable research interest to study deep GCNs, due to their potential superior performance compared with shallow ones. However, simply increasing network depth will, on the contrary, hurt the performance due to the over-smoothing problem. Adding residual connection is proved to be effective for learning deep convolutional neural networks (deep CNNs), it is not trivial when applied to deep GCNs. Recent works proposed an initial residual mechanism that did alleviate the over-smoothing problem in deep GCNs. However, according to our study, their algorithms are quite sensitive to different datasets. In their setting, the personalization (dynamic) and correlation (evolving) of how residual applies are ignored. To this end, we propose a novel model called Dynamic evolving initial Residual Graph Convolutional Network (DRGCN). Firstly, we use a dynamic block for each node to adaptively fetch information from the initial representation. Secondly, we use an evolving block to model the residual evolving pattern between layers. Our experimental results show that our model effectively relieves the problem of over-smoothing in deep GCNs and outperforms the state-of-the-art (SOTA) methods on various benchmark datasets. Moreover, we develop a mini-batch version of DRGCN which can be applied to large-scale data. Coupling with several fair training techniques, our model reaches new SOTA results on the large-scale ogbn-arxiv dataset of Open Graph Benchmark (OGB). Our reproducible code is available on GitHub.", + "primary_area": "machine learning iv", + "author": "Lei Zhang; Xiaodong Yan; Jianshan He; Ruopeng Li; Wei Chu", + "authorids": "", + "aff": "Ant Group, Beijing, China; Ant Group, Beijing, China; Ant Group, Beijing, China; Ant Group, Beijing, China; Ant Group, Beijing, China", + "bibtex": "@article{Zhang_Yan_He_Li_Chu_2023, title={DRGCN: Dynamic Evolving Initial Residual for Deep Graph Convolutional Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26332}, DOI={10.1609/aaai.v37i9.26332}, abstractNote={Graph convolutional networks (GCNs) have been proved to be very practical to handle various graph-related tasks. It has attracted considerable research interest to study deep GCNs, due to their potential superior performance compared with shallow ones. However, simply increasing network depth will, on the contrary, hurt the performance due to the over-smoothing problem. Adding residual connection is proved to be effective for learning deep convolutional neural networks (deep CNNs), it is not trivial when applied to deep GCNs. Recent works proposed an initial residual mechanism that did alleviate the over-smoothing problem in deep GCNs. However, according to our study, their algorithms are quite sensitive to different datasets. In their setting, the personalization (dynamic) and correlation (evolving) of how residual applies are ignored. To this end, we propose a novel model called Dynamic evolving initial Residual Graph Convolutional Network (DRGCN). Firstly, we use a dynamic block for each node to adaptively fetch information from the initial representation. Secondly, we use an evolving block to model the residual evolving pattern between layers. Our experimental results show that our model effectively relieves the problem of over-smoothing in deep GCNs and outperforms the state-of-the-art (SOTA) methods on various benchmark datasets. Moreover, we develop a mini-batch version of DRGCN which can be applied to large-scale data. Coupling with several fair training techniques, our model reaches new SOTA results on the large-scale ogbn-arxiv dataset of Open Graph Benchmark (OGB). Our reproducible code is available on GitHub.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Lei and Yan, Xiaodong and He, Jianshan and Li, Ruopeng and Chu, Wei}, year={2023}, month={Jun.}, pages={11254-11261} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26332/26104", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26332", + "pdf_size": 5826628, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3795352095701027635&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "email": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Ant Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.antgroup.com", + "aff_unique_abbr": "Ant Group", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27067", + "title": "DUCK: A Drone-Urban Cyber-Defense Framework Based on Pareto-Optimal Deontic Logic Agents", + "track": "demonstrations", + "status": "Technical", + "abstract": "Drone based terrorist attacks are increasing daily. It is not expected to be long before drones are used to carry out terror attacks in urban areas. We have developed the DUCK multi-agent testbed that security agencies can use to simulate drone-based attacks by diverse actors and develop a combination of surveillance camera, drone, and cyber defenses against them.", + "primary_area": "", + "author": "Tonmoay Deb; J\u00fcrgen Dix; Mingi Jeong; Cristian Molinaro; Andrea Pugliese; Alberto Quattrini Li; Eugene Santos, Jr; V.S. Subrahmanian; Shanchieh Yang; Youzhi Zhang", + "authorids": "", + "aff": "Department of Computer Science, Northwestern University, Evanston, IL, United States; Department of Informatics, Technical University of Clausthal, Clausthal, Germany; Department of Computer Science, Dartmouth College, Hanover, NH, United States; Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Rende, Italy; Department of Informatics, Modeling, Electronics and System Engineering, University of Calabria, Rende, Italy; Department of Computer Science, Dartmouth College, Hanover, NH, United States; Department of Computer Science, Dartmouth College, Hanover, NH, United States; Department of Computer Science, Northwestern University, Evanston, IL, United States; Department of Computer Engineering, Rochester Institute of Technology, Rochester, NY, United States; CAIR, Hong Kong Institute of Science & Innovation, Hong Kong", + "bibtex": "@article{Deb_Dix_Jeong_Molinaro_Pugliese_Quattrini Li_Santos, Jr_Subrahmanian_Yang_Zhang_2024, title={DUCK: A Drone-Urban Cyber-Defense Framework Based on Pareto-Optimal Deontic Logic Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27067}, DOI={10.1609/aaai.v37i13.27067}, abstractNote={Drone based terrorist attacks are increasing daily. It is not expected to be long before drones are used to carry out terror attacks in urban areas. We have developed the DUCK multi-agent testbed that security agencies can use to simulate drone-based attacks by diverse actors and develop a combination of surveillance camera, drone, and cyber defenses against them.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deb, Tonmoay and Dix, J\u00fcrgen and Jeong, Mingi and Molinaro, Cristian and Pugliese, Andrea and Quattrini Li, Alberto and Santos, Jr, Eugene and Subrahmanian, V.S. and Yang, Shanchieh and Zhang, Youzhi}, year={2024}, month={Jul.}, pages={16425-16427} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27067/26839", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27067", + "pdf_size": 9822721, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:c5Rw7Q54PjAJ:scholar.google.com/&scioq=DUCK:+A+Drone-Urban+Cyber-Defense+Framework+Based+on+Pareto-Optimal+Deontic+Logic+Agents&hl=en&as_sdt=0,44", + "gs_version_total": 4, + "aff_domain": "northwestern.edu; ; ; ; ; ; ; ; ; ", + "email": "northwestern.edu; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "https://sites.northwestern.edu/nsail/projects/duck/", + "author_num": 10, + "aff_unique_index": "0;1;2;3;3;2;2;0;4;5", + "aff_unique_norm": "Northwestern University;Technical University of Clausthal;Dartmouth College;University of Calabria;Rochester Institute of Technology;Hong Kong Institute of Science & Innovation", + "aff_unique_dep": "Department of Computer Science;Department of Informatics;Department of Computer Science;Department of Informatics, Modeling, Electronics and System Engineering;Department of Computer Engineering;CAIR", + "aff_unique_url": "https://www.northwestern.edu;https://www.tu-clausthal.de;https://dartmouth.edu;https://www.unical.it;https://www.rit.edu;", + "aff_unique_abbr": "NU;;Dartmouth;;RIT;", + "aff_campus_unique_index": "0;1;2;3;3;2;2;0;4;5", + "aff_campus_unique": "Evanston;Clausthal;Hanover;Rende;Rochester;Hong Kong", + "aff_country_unique_index": "0;1;0;2;2;0;0;0;0;3", + "aff_country_unique": "United States;Germany;Italy;China" + }, + { + "id": "article-25114", + "title": "DUET: Cross-Modal Semantic Grounding for Contrastive Zero-Shot Learning", + "track": "main", + "status": "Technical", + "abstract": "Zero-shot learning (ZSL) aims to predict unseen classes whose samples have never appeared during training. One of the most effective and widely used semantic information for zero-shot image classification are attributes which are annotations for class-level visual characteristics. However, the current methods often fail to discriminate those subtle visual distinctions between images due to not only the shortage of fine-grained annotations, but also the attribute imbalance and co-occurrence. In this paper, we present a transformer-based end-to-end ZSL method named DUET, which integrates latent semantic knowledge from the pre-trained language models (PLMs) via a self-supervised multi-modal learning paradigm. Specifically, we (1) developed a cross-modal semantic grounding network to investigate the model's capability of disentangling semantic attributes from the images; (2) applied an attribute-level contrastive learning strategy to further enhance the model's discrimination on fine-grained visual characteristics against the attribute co-occurrence and imbalance; (3) proposed a multi-task learning policy for considering multi-model objectives. We find that our DUET can achieve state-of-the-art performance on three standard ZSL benchmarks and a knowledge graph equipped ZSL benchmark. Its components are effective and its predictions are interpretable.", + "primary_area": "computer vision i", + "author": "Zhuo Chen; Yufeng Huang; Jiaoyan Chen; Yuxia Geng; Wen Zhang; Yin Fang; Jeff Z. Pan; Huajun Chen", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University + Donghai Laboratory, Zhoushan 316021, China + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; School of Software Technology, Zhejiang University + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; Department of Computer Science, The University of Manchester; College of Computer Science and Technology, Zhejiang University + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; School of Software Technology, Zhejiang University + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; College of Computer Science and Technology, Zhejiang University + Alibaba-Zhejiang University Joint Institute of Frontier Technologies; School of Informatics, The University of Edinburgh; College of Computer Science and Technology, Zhejiang University + Donghai Laboratory, Zhoushan 316021, China + Alibaba-Zhejiang University Joint Institute of Frontier Technologies", + "bibtex": "@article{Chen_Huang_Chen_Geng_Zhang_Fang_Z. Pan_Chen_2023, title={DUET: Cross-Modal Semantic Grounding for Contrastive Zero-Shot Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25114}, DOI={10.1609/aaai.v37i1.25114}, abstractNote={Zero-shot learning (ZSL) aims to predict unseen classes whose samples have never appeared during training. One of the most effective and widely used semantic information for zero-shot image classification are attributes which are annotations for class-level visual characteristics. However, the current methods often fail to discriminate those subtle visual distinctions between images due to not only the shortage of fine-grained annotations, but also the attribute imbalance and co-occurrence. In this paper, we present a transformer-based end-to-end ZSL method named DUET, which integrates latent semantic knowledge from the pre-trained language models (PLMs) via a self-supervised multi-modal learning paradigm. Specifically, we (1) developed a cross-modal semantic grounding network to investigate the model\u2019s capability of disentangling semantic attributes from the images; (2) applied an attribute-level contrastive learning strategy to further enhance the model\u2019s discrimination on fine-grained visual characteristics against the attribute co-occurrence and imbalance; (3) proposed a multi-task learning policy for considering multi-model objectives. We find that our DUET can achieve state-of-the-art performance on three standard ZSL benchmarks and a knowledge graph equipped ZSL benchmark. Its components are effective and its predictions are interpretable.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Zhuo and Huang, Yufeng and Chen, Jiaoyan and Geng, Yuxia and Zhang, Wen and Fang, Yin and Z. Pan, Jeff and Chen, Huajun}, year={2023}, month={Jun.}, pages={405-413} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25114/24886", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25114", + "pdf_size": 4734356, + "gs_citation": 73, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4130550769570287612&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "zju.edu.cn;zju.edu.cn;manchester.ac.uk;zju.edu.cn;zju.edu.cn;zju.edu.cn;ed.ac.uk;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;manchester.ac.uk;zju.edu.cn;zju.edu.cn;zju.edu.cn;ed.ac.uk;zju.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1+0;0+0;2;0+0;0+0;0+0;3;0+1+0", + "aff_unique_norm": "Zhejiang University;Donghai Laboratory;The University of Manchester;The University of Edinburgh", + "aff_unique_dep": "College of Computer Science and Technology;;Department of Computer Science;School of Informatics", + "aff_unique_url": "http://www.zju.edu.cn;;https://www.manchester.ac.uk;https://www.ed.ac.uk", + "aff_unique_abbr": "ZJU;;UoM;Edinburgh", + "aff_campus_unique_index": ";;;;;1;", + "aff_campus_unique": ";Edinburgh", + "aff_country_unique_index": "0+0+0;0+0;1;0+0;0+0;0+0;1;0+0+0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-27060", + "title": "Dagster: Parallel Structured Search", + "track": "demonstrations", + "status": "Technical", + "abstract": "We demonstrate Dagster, a system that implements a new approach to scheduling interdependent (Boolean) SAT search activities in high-performance computing (HPC) environments.\nOur system takes as input a set of disjunctive clauses (i.e., DIMACS CNF) and a labelled directed acyclic graph (DAG) structure describing how the clauses are decomposed into a set of interrelated problems.\nComponent problems are solved using standard systematic backtracking search, which may optionally be coupled to (stochastic dynamic) local search and/or clause-strengthening processes.\nWe demonstrate Dagster using a new Graph Maximal Determinant combinatorial case study. This demonstration paper presents a new case study, and is adjunct to the longer accepted manuscript at the Pacific Rim International Conference on Artificial Intelligence (2022).", + "primary_area": "", + "author": "Mark Alexander Burgess; Charles Gretton; Josh Milthorpe; Luke Croak; Thomas Willingham; Alwen Tiu", + "authorids": "", + "aff": "School of Computing, Australian National University, Canberra, Australia, 2600; School of Computing, Australian National University, Canberra, Australia, 2600; School of Computing, Australian National University, Canberra, Australia, 2600; School of Computing, Australian National University, Canberra, Australia, 2600; School of Computing, Australian National University, Canberra, Australia, 2600; School of Computing, Australian National University, Canberra, Australia, 2600", + "bibtex": "@article{Burgess_Gretton_Milthorpe_Croak_Willingham_Tiu_2024, title={Dagster: Parallel Structured Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27060}, DOI={10.1609/aaai.v37i13.27060}, abstractNote={We demonstrate Dagster, a system that implements a new approach to scheduling interdependent (Boolean) SAT search activities in high-performance computing (HPC) environments.\nOur system takes as input a set of disjunctive clauses (i.e., DIMACS CNF) and a labelled directed acyclic graph (DAG) structure describing how the clauses are decomposed into a set of interrelated problems.\nComponent problems are solved using standard systematic backtracking search, which may optionally be coupled to (stochastic dynamic) local search and/or clause-strengthening processes.\nWe demonstrate Dagster using a new Graph Maximal Determinant combinatorial case study. This demonstration paper presents a new case study, and is adjunct to the longer accepted manuscript at the Pacific Rim International Conference on Artificial Intelligence (2022).}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Burgess, Mark Alexander and Gretton, Charles and Milthorpe, Josh and Croak, Luke and Willingham, Thomas and Tiu, Alwen}, year={2024}, month={Jul.}, pages={16404-16406} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27060/26832", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27060", + "pdf_size": 150843, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:r-WUyXhzCbsJ:scholar.google.com/&scioq=Dagster:+Parallel+Structured+Search&hl=en&as_sdt=0,11", + "gs_version_total": 3, + "aff_domain": "anu.edu.au;anu.edu.au;anu.edu.au;defence.gov.au;anu.edu.au;anu.edu.au", + "email": "anu.edu.au;anu.edu.au;anu.edu.au;defence.gov.au;anu.edu.au;anu.edu.au", + "github": "https://github.com/ANU-HPC/dagster", + "project": "Zenodo(Burgess 2022)", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Australian National University", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.anu.edu.au", + "aff_unique_abbr": "ANU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Canberra", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25161", + "title": "DarkFeat: Noise-Robust Feature Detector and Descriptor for Extremely Low-Light RAW Images", + "track": "main", + "status": "Technical", + "abstract": "Low-light visual perception, such as SLAM or SfM at night, has received increasing attention, in which keypoint detection and local feature description play an important role. Both handcraft designs and machine learning methods have been widely studied for local feature detection and description, however, the performance of existing methods degrades in the extreme low-light scenarios in a certain degree, due to the low signal-to-noise ratio in images. To address this challenge, images in RAW format that retain more raw sensing information have been considered in recent works with a denoise-then-detect scheme. However, existing denoising methods are still insufficient for RAW images and heavily time-consuming, which limits the practical applications of such scheme. In this paper, we propose DarkFeat, a deep learning model which directly detects and describes local features from extreme low-light RAW images in an end-to-end manner. A novel noise robustness map and selective suppression constraints are proposed to effectively mitigate the influence of noise and extract more reliable keypoints. Furthermore, a customized pipeline of synthesizing dataset containing low-light RAW image matching pairs is proposed to extend end-to-end training. Experimental results show that DarkFeat achieves state-of-the-art performance on both indoor and outdoor parts of the challenging MID benchmark, outperforms the denoise-then-detect methods and significantly reduces computational costs up to 70%. Code is available at https://github.com/THU-LYJ-Lab/DarkFeat.", + "primary_area": "computer vision i", + "author": "Yuze He; Yubin Hu; Wang Zhao; Jisheng Li; Yong-Jin Liu; Yuxing Han; Jiangtao Wen", + "authorids": "", + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Research Institute of Tsinghua University in Shenzhen; Eastern Institute for Advanced Study", + "bibtex": "@article{He_Hu_Zhao_Li_Liu_Han_Wen_2023, title={DarkFeat: Noise-Robust Feature Detector and Descriptor for Extremely Low-Light RAW Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25161}, DOI={10.1609/aaai.v37i1.25161}, abstractNote={Low-light visual perception, such as SLAM or SfM at night, has received increasing attention, in which keypoint detection and local feature description play an important role. Both handcraft designs and machine learning methods have been widely studied for local feature detection and description, however, the performance of existing methods degrades in the extreme low-light scenarios in a certain degree, due to the low signal-to-noise ratio in images. To address this challenge, images in RAW format that retain more raw sensing information have been considered in recent works with a denoise-then-detect scheme. However, existing denoising methods are still insufficient for RAW images and heavily time-consuming, which limits the practical applications of such scheme. In this paper, we propose DarkFeat, a deep learning model which directly detects and describes local features from extreme low-light RAW images in an end-to-end manner. A novel noise robustness map and selective suppression constraints are proposed to effectively mitigate the influence of noise and extract more reliable keypoints. Furthermore, a customized pipeline of synthesizing dataset containing low-light RAW image matching pairs is proposed to extend end-to-end training. Experimental results show that DarkFeat achieves state-of-the-art performance on both indoor and outdoor parts of the challenging MID benchmark, outperforms the denoise-then-detect methods and significantly reduces computational costs up to 70%. Code is available at https://github.com/THU-LYJ-Lab/DarkFeat.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Yuze and Hu, Yubin and Zhao, Wang and Li, Jisheng and Liu, Yong-Jin and Han, Yuxing and Wen, Jiangtao}, year={2023}, month={Jun.}, pages={826-834} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25161/24933", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25161", + "pdf_size": 18846329, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14071988381297612487&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;sz.tsinghua.edu.cn;eias.ac.cn", + "email": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;sz.tsinghua.edu.cn;eias.ac.cn", + "github": "https://github.com/THU-LYJ-Lab/DarkFeat", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;1", + "aff_unique_norm": "Tsinghua University;Eastern Institute for Advanced Study", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "THU;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25447", + "title": "Darwinian Model Upgrades: Model Evolving with Selective Compatibility", + "track": "main", + "status": "Technical", + "abstract": "The traditional model upgrading paradigm for retrieval requires recomputing all gallery embeddings before deploying the new model (dubbed as \"backfilling\"), which is quite expensive and time-consuming considering billions of instances in industrial applications. BCT presents the first step towards backward-compatible model upgrades to get rid of backfilling. It is workable but leaves the new model in a dilemma between new feature discriminativeness and new-to-old compatibility due to the undifferentiated compatibility constraints. In this work, we propose Darwinian Model Upgrades (DMU), which disentangle the inheritance and variation in the model evolving with selective backward compatibility and forward adaptation, respectively. The old-to-new heritable knowledge is measured by old feature discriminativeness, and the gallery features, especially those of poor quality, are evolved in a lightweight manner to become more adaptive in the new latent space. We demonstrate the superiority of DMU through comprehensive experiments on large-scale landmark retrieval and face recognition benchmarks. DMU effectively alleviates the new-to-new degradation at the same time improving new-to-old compatibility, rendering a more proper model upgrading paradigm in large-scale retrieval systems.Code: https://github.com/TencentARC/OpenCompatible.", + "primary_area": "computer vision iii", + "author": "Binjie Zhang; Shupeng Su; Yixiao Ge; Xuyuan Xu; Yexin Wang; Chun Yuan; Mike Zheng Shou; Ying Shan", + "authorids": "", + "aff": "ARC Lab, Tencent PCG+National University of Singapore+Tsinghua University; ARC Lab, Tencent PCG; ARC Lab, Tencent PCG; AI Technology Center of Tencent Video; AI Technology Center of Tencent Video; Tsinghua University; National University of Singapore; ARC Lab, Tencent PCG", + "bibtex": "@article{Zhang_Su_Ge_Xu_Wang_Yuan_Shou_Shan_2023, title={Darwinian Model Upgrades: Model Evolving with Selective Compatibility}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25447}, DOI={10.1609/aaai.v37i3.25447}, abstractNote={The traditional model upgrading paradigm for retrieval requires recomputing all gallery embeddings before deploying the new model (dubbed as "backfilling"), which is quite expensive and time-consuming considering billions of instances in industrial applications. BCT presents the first step towards backward-compatible model upgrades to get rid of backfilling. It is workable but leaves the new model in a dilemma between new feature discriminativeness and new-to-old compatibility due to the undifferentiated compatibility constraints. In this work, we propose Darwinian Model Upgrades (DMU), which disentangle the inheritance and variation in the model evolving with selective backward compatibility and forward adaptation, respectively. The old-to-new heritable knowledge is measured by old feature discriminativeness, and the gallery features, especially those of poor quality, are evolved in a lightweight manner to become more adaptive in the new latent space. We demonstrate the superiority of DMU through comprehensive experiments on large-scale landmark retrieval and face recognition benchmarks. DMU effectively alleviates the new-to-new degradation at the same time improving new-to-old compatibility, rendering a more proper model upgrading paradigm in large-scale retrieval systems.Code: https://github.com/TencentARC/OpenCompatible.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Binjie and Su, Shupeng and Ge, Yixiao and Xu, Xuyuan and Wang, Yexin and Yuan, Chun and Shou, Mike Zheng and Shan, Ying}, year={2023}, month={Jun.}, pages={3393-3400} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25447/25219", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25447", + "pdf_size": 431408, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15273449851897787264&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "example.com;example.com;example.com;example.com;example.com;example.com;example.com;example.com", + "email": "example.com;example.com;example.com;example.com;example.com;example.com;example.com;example.com", + "github": "https://github.com/TencentARC/OpenCompatible", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1+2;0;0;3;3;2;1;0", + "aff_unique_norm": "Tencent;National University of Singapore;Tsinghua University;Tencent Video", + "aff_unique_dep": "ARC Lab;;;AI Technology Center", + "aff_unique_url": "https://www.tencent.com;https://www.nus.edu.sg;https://www.tsinghua.edu.cn;https://v.qq.com", + "aff_unique_abbr": "Tencent;NUS;THU;Tencent Video", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+0;0;0;0;0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26348", + "title": "Data Imputation with Iterative Graph Reconstruction", + "track": "main", + "status": "Technical", + "abstract": "Effective data imputation demands rich latent ``structure\" discovery capabilities from ``plain\" tabular data. Recent advances in graph neural networks-based data imputation solutions show their structure learning potentials by translating tabular data as bipartite graphs. However, due to a lack of relations between samples, they treat all samples equally which is against one important observation: ``similar sample should give more information about missing values.\" This paper presents a novel Iterative graph Generation and Reconstruction framework for Missing data imputation(IGRM). Instead of treating all samples equally, we introduce the concept: ``friend networks\" to represent different relations among samples. To generate an accurate friend network with missing data, an end-to-end friend network reconstruction solution is designed to allow for continuous friend network optimization during imputation learning. The representation of the optimized friend network, in turn, is used to further optimize the data imputation process with differentiated message passing. Experiment results on eight benchmark datasets show that IGRM yields 39.13% lower mean absolute error compared with nine baselines and 9.04% lower than the second-best. Our code is available at https://github.com/G-AILab/IGRM.", + "primary_area": "machine learning iv", + "author": "Jiajun Zhong; Ning Gui; Weiwei Ye", + "authorids": "", + "aff": "School of Computer Science and Engineering, Central South University, China; School of Computer Science and Engineering, Central South University, China; School of Computer Science and Engineering, Central South University, China", + "bibtex": "@article{Zhong_Gui_Ye_2023, title={Data Imputation with Iterative Graph Reconstruction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26348}, DOI={10.1609/aaai.v37i9.26348}, abstractNote={Effective data imputation demands rich latent ``structure" discovery capabilities from ``plain" tabular data. Recent advances in graph neural networks-based data imputation solutions show their structure learning potentials by translating tabular data as bipartite graphs. However, due to a lack of relations between samples, they treat all samples equally which is against one important observation: ``similar sample should give more information about missing values." This paper presents a novel Iterative graph Generation and Reconstruction framework for Missing data imputation(IGRM). Instead of treating all samples equally, we introduce the concept: ``friend networks" to represent different relations among samples. To generate an accurate friend network with missing data, an end-to-end friend network reconstruction solution is designed to allow for continuous friend network optimization during imputation learning. The representation of the optimized friend network, in turn, is used to further optimize the data imputation process with differentiated message passing. Experiment results on eight benchmark datasets show that IGRM yields 39.13% lower mean absolute error compared with nine baselines and 9.04% lower than the second-best. Our code is available at https://github.com/G-AILab/IGRM.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Jiajun and Gui, Ning and Ye, Weiwei}, year={2023}, month={Jun.}, pages={11399-11407} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26348/26120", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26348", + "pdf_size": 3906122, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12539360407709381698&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "csu.edu.cn;csu.edu.cn;qq.com", + "email": "csu.edu.cn;csu.edu.cn;qq.com", + "github": "https://github.com/G-AILab/IGRM", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Central South University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.csu.edu.cn", + "aff_unique_abbr": "CSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26886", + "title": "Data Labeling for Machine Learning Engineers: Project-Based Curriculum and Data-Centric Competitions", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "The process of training and evaluating machine learning (ML) models relies on high-quality and timely annotated datasets. While a significant portion of academic and industrial research is focused on creating new ML methods, these communities rely on open datasets and benchmarks. However, practitioners often face issues with unlabeled and unavailable data specific to their domain. We believe that building scalable and sustainable processes for collecting data of high quality for ML is a complex skill that needs focused development. To fill the need for this competency, we created a semester course on Data Collection and Labeling for Machine Learning, integrated into a bachelor program that trains data analysts and ML engineers. The course design and delivery illustrate how to overcome the challenge of putting university students with a theoretical background in mathematics, computer science, and physics through a program that is substantially different from their educational habits. Our goal was to motivate students to focus on practicing and mastering a skill that was considered unnecessary to their work. We created a system of inverse ML competitions that showed the students how high-quality and relevant data affect their work with ML models, and their mindset changed completely in the end. Project-based learning with increasing complexity of conditions at each stage helped to raise the satisfaction index of students accustomed to difficult challenges. During the course, our invited industry practitioners drew on their first-hand experience with data, which helped us avoid overtheorizing and made the course highly applicable to the students\u2019 future career paths.", + "primary_area": "", + "author": "Anastasia Zhdanovskaya; Daria Baidakova; Dmitry Ustalov", + "authorids": "", + "aff": "Toloka; Toloka; Toloka", + "bibtex": "@article{Zhdanovskaya_Baidakova_Ustalov_2024, title={Data Labeling for Machine Learning Engineers: Project-Based Curriculum and Data-Centric Competitions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26886}, DOI={10.1609/aaai.v37i13.26886}, abstractNote={The process of training and evaluating machine learning (ML) models relies on high-quality and timely annotated datasets. While a significant portion of academic and industrial research is focused on creating new ML methods, these communities rely on open datasets and benchmarks. However, practitioners often face issues with unlabeled and unavailable data specific to their domain. We believe that building scalable and sustainable processes for collecting data of high quality for ML is a complex skill that needs focused development. To fill the need for this competency, we created a semester course on Data Collection and Labeling for Machine Learning, integrated into a bachelor program that trains data analysts and ML engineers. The course design and delivery illustrate how to overcome the challenge of putting university students with a theoretical background in mathematics, computer science, and physics through a program that is substantially different from their educational habits. Our goal was to motivate students to focus on practicing and mastering a skill that was considered unnecessary to their work. We created a system of inverse ML competitions that showed the students how high-quality and relevant data affect their work with ML models, and their mindset changed completely in the end. Project-based learning with increasing complexity of conditions at each stage helped to raise the satisfaction index of students accustomed to difficult challenges. During the course, our invited industry practitioners drew on their first-hand experience with data, which helped us avoid overtheorizing and made the course highly applicable to the students\u2019 future career paths.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhdanovskaya, Anastasia and Baidakova, Daria and Ustalov, Dmitry}, year={2024}, month={Jul.}, pages={15886-15893} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26886/26658", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26886", + "pdf_size": 409580, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7819923499169580086&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "toloka.ai;toloka.ai;toloka.ai", + "email": "toloka.ai;toloka.ai;toloka.ai", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Toloka", + "aff_unique_dep": "", + "aff_unique_url": "https://toloka.yandex.com", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Russia" + }, + { + "id": "article-26863", + "title": "Data-Driven Machine Learning Models for a Multi-Objective Flapping Fin Unmanned Underwater Vehicle Control System", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Flapping-fin unmanned underwater vehicle (UUV) propulsion systems provide high maneuverability for naval tasks such as surveillance and terrain exploration. Recent work has explored the use of time-series neural network surrogate models to predict thrust from vehicle design and fin kinematics. We develop a search-based inverse model that leverages a kinematics-to-thrust neural network model for control system design. Our inverse model finds a set of fin kinematics with the multi-objective goal of reaching a target thrust and creating a smooth kinematic transition between flapping cycles. We demonstrate how a control system integrating this inverse model can make online, cycle-to-cycle adjustments to prioritize different system objectives.", + "primary_area": "emerging applications of ai", + "author": "Julian Lee; Kamal Viswanath; Alisha Sharma; Jason Geder; Marius Pruessner; Brian Zhou", + "authorids": "", + "aff": "Yale University; Naval Research Laboratory; Naval Research Laboratory; Naval Research Laboratory; Naval Research Laboratory; Thomas Jefferson High School for Science and Technology", + "bibtex": "@article{Lee_Viswanath_Sharma_Geder_Pruessner_Zhou_2024, title={Data-Driven Machine Learning Models for a Multi-Objective Flapping Fin Unmanned Underwater Vehicle Control System}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26863}, DOI={10.1609/aaai.v37i13.26863}, abstractNote={Flapping-fin unmanned underwater vehicle (UUV) propulsion systems provide high maneuverability for naval tasks such as surveillance and terrain exploration. Recent work has explored the use of time-series neural network surrogate models to predict thrust from vehicle design and fin kinematics. We develop a search-based inverse model that leverages a kinematics-to-thrust neural network model for control system design. Our inverse model finds a set of fin kinematics with the multi-objective goal of reaching a target thrust and creating a smooth kinematic transition between flapping cycles. We demonstrate how a control system integrating this inverse model can make online, cycle-to-cycle adjustments to prioritize different system objectives.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Julian and Viswanath, Kamal and Sharma, Alisha and Geder, Jason and Pruessner, Marius and Zhou, Brian}, year={2024}, month={Jul.}, pages={15703-15709} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26863/26635", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26863", + "pdf_size": 2191341, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17622945635446593080&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "yale.edu;nrl.navy.mil;nrl.navy.mil;nrl.navy.mil;nrl.navy.mil;tjhsst.edu", + "email": "yale.edu;nrl.navy.mil;nrl.navy.mil;nrl.navy.mil;nrl.navy.mil;tjhsst.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;2", + "aff_unique_norm": "Yale University;Naval Research Laboratory;Thomas Jefferson High School for Science and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.yale.edu;https://www.nrl.navy.mil;https://www.tjhsst.edu/", + "aff_unique_abbr": "Yale;NRL;TJHSST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25302", + "title": "Data-Efficient Image Quality Assessment with Attention-Panel Decoder", + "track": "main", + "status": "Technical", + "abstract": "Blind Image Quality Assessment (BIQA) is a fundamental task in computer vision, which however remains unresolved due to the complex distortion conditions and diversified image contents. To confront this challenge, we in this paper propose a novel BIQA pipeline based on the Transformer architecture, which achieves an efficient quality-aware feature representation with much fewer data. More specifically, we consider the traditional fine-tuning in BIQA as an interpretation of the pre-trained model. In this way, we further introduce a Transformer decoder to refine the perceptual information of the CLS token from different perspectives. This enables our model to establish the quality-aware feature manifold efficiently while attaining a strong generalization capability. Meanwhile, inspired by the subjective evaluation behaviors of human, we introduce a novel attention panel mechanism, which improves the model performance and reduces the prediction uncertainty simultaneously. The proposed BIQA method maintains a light-weight design with only one layer of the decoder, yet extensive experiments on eight standard BIQA datasets (both synthetic and authentic) demonstrate its superior performance to the state-of-the-art BIQA methods, i.e., achieving the SRCC values of 0.875 (vs. 0.859 in LIVEC) and 0.980 (vs. 0.969 in LIVE). Checkpoints, logs and code will be available at https://github.com/narthchin/DEIQT.", + "primary_area": "computer vision ii", + "author": "Guanyi Qin; Runze Hu; Yutao Liu; Xiawu Zheng; Haotian Liu; Xiu Li; Yan Zhang", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen 518055, China; School of Information and Electronics, Beijing Institute of Technology, Beijing 100086, China; School of Computer Science and Technology, Ocean University of China, Qingdao 266100, China; Peng Cheng Laboratory, Shenzhen 518066, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen 518055, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen 518055, China; Media Analytics and Computing Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, Xiamen 361005, China", + "bibtex": "@article{Qin_Hu_Liu_Zheng_Liu_Li_Zhang_2023, title={Data-Efficient Image Quality Assessment with Attention-Panel Decoder}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25302}, DOI={10.1609/aaai.v37i2.25302}, abstractNote={Blind Image Quality Assessment (BIQA) is a fundamental task in computer vision, which however remains unresolved due to the complex distortion conditions and diversified image contents. To confront this challenge, we in this paper propose a novel BIQA pipeline based on the Transformer architecture, which achieves an efficient quality-aware feature representation with much fewer data. More specifically, we consider the traditional fine-tuning in BIQA as an interpretation of the pre-trained model. In this way, we further introduce a Transformer decoder to refine the perceptual information of the CLS token from different perspectives. This enables our model to establish the quality-aware feature manifold efficiently while attaining a strong generalization capability. Meanwhile, inspired by the subjective evaluation behaviors of human, we introduce a novel attention panel mechanism, which improves the model performance and reduces the prediction uncertainty simultaneously. The proposed BIQA method maintains a light-weight design with only one layer of the decoder, yet extensive experiments on eight standard BIQA datasets (both synthetic and authentic) demonstrate its superior performance to the state-of-the-art BIQA methods, i.e., achieving the SRCC values of 0.875 (vs. 0.859 in LIVEC) and 0.980 (vs. 0.969 in LIVE). Checkpoints, logs and code will be available at https://github.com/narthchin/DEIQT.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Guanyi and Hu, Runze and Liu, Yutao and Zheng, Xiawu and Liu, Haotian and Li, Xiu and Zhang, Yan}, year={2023}, month={Jun.}, pages={2091-2100} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25302/25074", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25302", + "pdf_size": 7779680, + "gs_citation": 74, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14676244128333454912&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com;ouc.edu.cn;pcl.ac.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn;gmail.com", + "email": "mails.tsinghua.edu.cn;gmail.com;ouc.edu.cn;pcl.ac.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn;gmail.com", + "github": "https://github.com/narthchin/DEIQT", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;0;0;4", + "aff_unique_norm": "Tsinghua University;Beijing Institute of Technology;Ocean University of China;Peng Cheng Laboratory;Xiamen University", + "aff_unique_dep": "Shenzhen International Graduate School;School of Information and Electronics;School of Computer Science and Technology;;Department of Artificial Intelligence", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.bit.edu.cn;http://www.ouc.edu.cn;;http://www.xmu.edu.cn", + "aff_unique_abbr": "THU;BIT;OUC;;XMU", + "aff_campus_unique_index": "0;1;2;0;0;0;3", + "aff_campus_unique": "Shenzhen;Beijing;Qingdao;Xiamen", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26694", + "title": "Daycare Matching in Japan: Transfers and Siblings", + "track": "aaai special track", + "status": "Technical", + "abstract": "In this paper, we study a daycare matching problem in Japan and report the design and implementation of a new centralized algorithm, which is going to be deployed in one municipality in the Tokyo metropolis. There are two features that make this market different from the classical hospital-doctor matching problem: i) some children are initially enrolled and prefer to be transferred to other daycare centers; ii) one family may be associated with two or more children and is allowed to submit preferences over combinations of daycare centers. We revisit some well-studied properties including individual rationality, non-wastefulness, as well as stability, and generalize them to this new setting. We design an algorithm based on integer programming (IP) that captures these properties and conduct experiments on five real-life data sets provided by three municipalities. Experimental results show that i) our algorithm performs at least as well as currently used methods in terms of numbers of matched children and blocking coalition; ii) we can find a stable outcome for all instances, although the existence of such an outcome is not guaranteed in theory.", + "primary_area": "ai for social impact", + "author": "Zhaohong Sun; Yoshihiro Takenami; Daisuke Moriwaki; Yoji Tomita; Makoto Yokoo", + "authorids": "", + "aff": "AI Lab, CyberAgent Inc, Japan; AI Lab, CyberAgent Inc, Japan; AI Lab, CyberAgent Inc, Japan; AI Lab, CyberAgent Inc, Japan; Kyushu University, Japan", + "bibtex": "@article{Sun_Takenami_Moriwaki_Tomita_Yokoo_2023, title={Daycare Matching in Japan: Transfers and Siblings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26694}, DOI={10.1609/aaai.v37i12.26694}, abstractNote={In this paper, we study a daycare matching problem in Japan and report the design and implementation of a new centralized algorithm, which is going to be deployed in one municipality in the Tokyo metropolis. There are two features that make this market different from the classical hospital-doctor matching problem: i) some children are initially enrolled and prefer to be transferred to other daycare centers; ii) one family may be associated with two or more children and is allowed to submit preferences over combinations of daycare centers. We revisit some well-studied properties including individual rationality, non-wastefulness, as well as stability, and generalize them to this new setting. We design an algorithm based on integer programming (IP) that captures these properties and conduct experiments on five real-life data sets provided by three municipalities. Experimental results show that i) our algorithm performs at least as well as currently used methods in terms of numbers of matched children and blocking coalition; ii) we can find a stable outcome for all instances, although the existence of such an outcome is not guaranteed in theory.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Zhaohong and Takenami, Yoshihiro and Moriwaki, Daisuke and Tomita, Yoji and Yokoo, Makoto}, year={2023}, month={Jun.}, pages={14487-14495} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26694/26466", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26694", + "pdf_size": 151365, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6666145393305856674&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "CyberAgent Inc;Kyushu University", + "aff_unique_dep": "AI Lab;", + "aff_unique_url": "https://www.cyberagent.co.jp;https://www.kyushu-u.ac.jp", + "aff_unique_abbr": "CyberAgent;Kyushu U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-25355", + "title": "De-biased Teacher: Rethinking IoU Matching for Semi-supervised Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Most of the recent research in semi-supervised object detection follows the pseudo-labeling paradigm evolved from the semi-supervised image classification task. However, the training paradigm of the two-stage object detector inevitably makes the pseudo-label learning process for unlabeled images full of bias. Specifically, the IoU matching scheme used for selecting and labeling candidate boxes is based on the assumption that the matching source~(ground truth) is accurate enough in terms of the number of objects, object position and object category. Obviously, pseudo-labels generated for unlabeled images cannot satisfy such a strong assumption, which makes the produced training proposals extremely unreliable and thus severely spoil the follow-up training. To de-bias the training proposals generated by the pseudo-label-based IoU matching, we propose a general framework -- De-biased Teacher, which abandons both the IoU matching and pseudo labeling processes by directly generating favorable training proposals for consistency regularization between the weak/strong augmented image pairs. Moreover, a distribution-based refinement scheme is designed to eliminate the scattered class predictions of significantly low values for higher efficiency. Extensive experiments demonstrate that the proposed De-biased Teacher consistently outperforms other state-of-the-art methods on the MS-COCO and PASCAL VOC benchmarks. Source codes are available at https://github.com/wkfdb/De-biased-Teracher.", + "primary_area": "computer vision ii", + "author": "Kuo Wang; Jingyu Zhuang; Guanbin Li; Chaowei Fang; Lechao Cheng; Liang Lin; Fan Zhou", + "authorids": "", + "aff": "School of Computer Science and Engineering, Research Institute of Sun Yat-sen University in Shenzhen, Sun Yat-sen University, Guangzhou, China; School of Computer Science and Engineering, Research Institute of Sun Yat-sen University in Shenzhen, Sun Yat-sen University, Guangzhou, China; School of Computer Science and Engineering, Research Institute of Sun Yat-sen University in Shenzhen, Sun Yat-sen University, Guangzhou, China+\u2020; School of Arti\ufb01cial Intelligence, Xidian University, Xi\u2019an, China; Zhejiang Lab, Zhejiang, China; School of Computer Science and Engineering, Research Institute of Sun Yat-sen University in Shenzhen, Sun Yat-sen University, Guangzhou, China; School of Computer Science and Engineering, Research Institute of Sun Yat-sen University in Shenzhen, Sun Yat-sen University, Guangzhou, China+\u2020", + "bibtex": "@article{Wang_Zhuang_Li_Fang_Cheng_Lin_Zhou_2023, title={De-biased Teacher: Rethinking IoU Matching for Semi-supervised Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25355}, DOI={10.1609/aaai.v37i2.25355}, abstractNote={Most of the recent research in semi-supervised object detection follows the pseudo-labeling paradigm evolved from the semi-supervised image classification task. However, the training paradigm of the two-stage object detector inevitably makes the pseudo-label learning process for unlabeled images full of bias. Specifically, the IoU matching scheme used for selecting and labeling candidate boxes is based on the assumption that the matching source~(ground truth) is accurate enough in terms of the number of objects, object position and object category. Obviously, pseudo-labels generated for unlabeled images cannot satisfy such a strong assumption, which makes the produced training proposals extremely unreliable and thus severely spoil the follow-up training. To de-bias the training proposals generated by the pseudo-label-based IoU matching, we propose a general framework -- De-biased Teacher, which abandons both the IoU matching and pseudo labeling processes by directly generating favorable training proposals for consistency regularization between the weak/strong augmented image pairs. Moreover, a distribution-based refinement scheme is designed to eliminate the scattered class predictions of significantly low values for higher efficiency. Extensive experiments demonstrate that the proposed De-biased Teacher consistently outperforms other state-of-the-art methods on the MS-COCO and PASCAL VOC benchmarks. Source codes are available at https://github.com/wkfdb/De-biased-Teracher.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Kuo and Zhuang, Jingyu and Li, Guanbin and Fang, Chaowei and Cheng, Lechao and Lin, Liang and Zhou, Fan}, year={2023}, month={Jun.}, pages={2573-2580} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25355/25127", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25355", + "pdf_size": 912393, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10596355441399169180&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;outlook.com;zhejianglab.com;ieee.org;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;outlook.com;zhejianglab.com;ieee.org;mail.sysu.edu.cn", + "github": "https://github.com/wkfdb/De-biased-Teracher", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;2;3;0;0", + "aff_unique_norm": "Sun Yat-sen University;;Xidian University;Zhejiang Lab", + "aff_unique_dep": "School of Computer Science and Engineering;;School of Arti\ufb01cial Intelligence;", + "aff_unique_url": "http://www.sysu.edu.cn;;http://www.xidian.edu.cn/;", + "aff_unique_abbr": "SYSU;;Xidian;", + "aff_campus_unique_index": "0;0;0;2;0;0", + "aff_campus_unique": "Shenzhen;;Xi'an", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26550", + "title": "DeAR: A Deep-Learning-Based Audio Re-recording Resilient Watermarking", + "track": "main", + "status": "Technical", + "abstract": "Audio watermarking is widely used for leaking source tracing. The robustness of the watermark determines the traceability of the algorithm. With the development of digital technology, audio re-recording (AR) has become an efficient and covert means to steal secrets. AR process could drastically destroy the watermark signal while preserving the original information. This puts forward a new requirement for audio watermarking at this stage, that is, to be robust to AR distortions. Unfortunately, none of the existing algorithms can effectively resist AR attacks due to the complexity of the AR process. To address this limitation, this paper proposes DeAR, a deep-learning-based audio re-recording resistant watermarking. Inspired by DNN-based image watermarking, we pioneer a deep learning framework for audio carriers, based on which the watermark signal can be effectively embedded and extracted. Meanwhile, in order to resist the AR attack, we delicately analyze the distortions that occurred in the AR process and design the corresponding distortion layer to cooperate with the proposed watermarking framework. Extensive experiments show that the proposed algorithm can resist not only common electronic channel distortions but also AR distortions. Under the premise of high-quality embedding (SNR=25.86dB), in the case of a common re-recording distance (20cm), the algorithm can effectively achieve an average bit recovery accuracy of 98.55%.", + "primary_area": "speech natural language processing", + "author": "Chang Liu; Jie Zhang; Han Fang; Zehua Ma; Weiming Zhang; Nenghai Yu", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China+University of Waterloo; National University of Singapore; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Liu_Zhang_Fang_Ma_Zhang_Yu_2023, title={DeAR: A Deep-Learning-Based Audio Re-recording Resilient Watermarking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26550}, DOI={10.1609/aaai.v37i11.26550}, abstractNote={Audio watermarking is widely used for leaking source tracing. The robustness of the watermark determines the traceability of the algorithm. With the development of digital technology, audio re-recording (AR) has become an efficient and covert means to steal secrets. AR process could drastically destroy the watermark signal while preserving the original information. This puts forward a new requirement for audio watermarking at this stage, that is, to be robust to AR distortions. Unfortunately, none of the existing algorithms can effectively resist AR attacks due to the complexity of the AR process. To address this limitation, this paper proposes DeAR, a deep-learning-based audio re-recording resistant watermarking. Inspired by DNN-based image watermarking, we pioneer a deep learning framework for audio carriers, based on which the watermark signal can be effectively embedded and extracted. Meanwhile, in order to resist the AR attack, we delicately analyze the distortions that occurred in the AR process and design the corresponding distortion layer to cooperate with the proposed watermarking framework. Extensive experiments show that the proposed algorithm can resist not only common electronic channel distortions but also AR distortions. Under the premise of high-quality embedding (SNR=25.86dB), in the case of a common re-recording distance (20cm), the algorithm can effectively achieve an average bit recovery accuracy of 98.55%.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Chang and Zhang, Jie and Fang, Han and Ma, Zehua and Zhang, Weiming and Yu, Nenghai}, year={2023}, month={Jun.}, pages={13201-13209} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26550/26322", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26550", + "pdf_size": 1251569, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12328791446792312848&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;gmail.com;nus.edu.sg;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;gmail.com;nus.edu.sg;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;2;0;0;0", + "aff_unique_norm": "University of Science and Technology of China;University of Waterloo;National University of Singapore", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://uwaterloo.ca;https://www.nus.edu.sg", + "aff_unique_abbr": "USTC;UW;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;2;0;0;0", + "aff_country_unique": "China;Canada;Singapore" + }, + { + "id": "article-26288", + "title": "DeCOM: Decomposed Policy for Constrained Cooperative Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In recent years, multi-agent reinforcement learning (MARL) has presented impressive performance in various applications. However, physical limitations, budget restrictions, and many other factors usually impose constraints on a multi-agent system (MAS), which cannot be handled by traditional MARL frameworks. Specifically, this paper focuses on constrained MASes where agents work cooperatively to maximize the expected team-average return under various constraints on expected team-average costs, and develops a constrained cooperative MARL framework, named DeCOM, for such MASes. In particular, DeCOM decomposes the policy of each agent into two modules, which empowers information sharing among agents to achieve better cooperation. In addition, with such modularization, the training algorithm of DeCOM separates the original constrained optimization into an unconstrained optimization on reward and a constraints satisfaction problem on costs. DeCOM then iteratively solves these problems in a computationally efficient manner, which makes DeCOM highly scalable. We also provide theoretical guarantees on the convergence of DeCOM's policy update algorithm. Finally, we conduct extensive experiments to show the effectiveness of DeCOM with various types of costs in both moderate-scale and large-scale (with 500 agents) environments that originate from real-world applications.", + "primary_area": "machine learning iv", + "author": "Zhaoxing Yang; Haiming Jin; Rong Ding; Haoyi You; Guiyun Fan; Xinbing Wang; Chenghu Zhou", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Yang_Jin_Ding_You_Fan_Wang_Zhou_2023, title={DeCOM: Decomposed Policy for Constrained Cooperative Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26288}, DOI={10.1609/aaai.v37i9.26288}, abstractNote={In recent years, multi-agent reinforcement learning (MARL) has presented impressive performance in various applications. However, physical limitations, budget restrictions, and many other factors usually impose constraints on a multi-agent system (MAS), which cannot be handled by traditional MARL frameworks. Specifically, this paper focuses on constrained MASes where agents work cooperatively to maximize the expected team-average return under various constraints on expected team-average costs, and develops a constrained cooperative MARL framework, named DeCOM, for such MASes. In particular, DeCOM decomposes the policy of each agent into two modules, which empowers information sharing among agents to achieve better cooperation. In addition, with such modularization, the training algorithm of DeCOM separates the original constrained optimization into an unconstrained optimization on reward and a constraints satisfaction problem on costs. DeCOM then iteratively solves these problems in a computationally efficient manner, which makes DeCOM highly scalable. We also provide theoretical guarantees on the convergence of DeCOM\u2019s policy update algorithm. Finally, we conduct extensive experiments to show the effectiveness of DeCOM with various types of costs in both moderate-scale and large-scale (with 500 agents) environments that originate from real-world applications.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zhaoxing and Jin, Haiming and Ding, Rong and You, Haoyi and Fan, Guiyun and Wang, Xinbing and Zhou, Chenghu}, year={2023}, month={Jun.}, pages={10861-10870} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26288/26060", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26288", + "pdf_size": 1884365, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1991029663432420519&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26271", + "title": "DeFL: Defending against Model Poisoning Attacks in Federated Learning via Critical Learning Periods Awareness", + "track": "main", + "status": "Technical", + "abstract": "Federated learning (FL) is known to be susceptible to model poisoning attacks in which malicious clients hamper the accuracy of the global model by sending manipulated model updates to the central server during the FL training process. Existing defenses mainly focus on Byzantine-robust FL aggregations, and largely ignore the impact of the underlying deep neural network (DNN) that is used to FL training. Inspired by recent findings on critical learning periods (CLP) in DNNs, where small gradient errors have irrecoverable impact on the final model accuracy, we propose a new defense, called a CLP-aware defense against poisoning of FL (DeFL). The key idea of DeFL is to measure fine-grained differences between DNN model updates via an easy-to-compute federated gradient norm vector (FGNV) metric. Using FGNV, DeFL simultaneously detects malicious clients and identifies CLP, which in turn is leveraged to guide the adaptive removal of detected malicious clients from aggregation. As a result, DeFL not only mitigates model poisoning attacks on the global model but also is robust to detection errors. Our extensive experiments on three benchmark datasets demonstrate that DeFL produces significant performance gain over conventional defenses against state-of-the-art model poisoning attacks.", + "primary_area": "machine learning iv", + "author": "Gang Yan; Hao Wang; Xu Yuan; Jian Li", + "authorids": "", + "aff": "SUNY-Binghamton University; Louisiana State University; University of Louisiana at Lafayette; SUNY-Binghamton University", + "bibtex": "@article{Yan_Wang_Yuan_Li_2023, title={DeFL: Defending against Model Poisoning Attacks in Federated Learning via Critical Learning Periods Awareness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26271}, DOI={10.1609/aaai.v37i9.26271}, abstractNote={Federated learning (FL) is known to be susceptible to model poisoning attacks in which malicious clients hamper the accuracy of the global model by sending manipulated model updates to the central server during the FL training process. Existing defenses mainly focus on Byzantine-robust FL aggregations, and largely ignore the impact of the underlying deep neural network (DNN) that is used to FL training. Inspired by recent findings on critical learning periods (CLP) in DNNs, where small gradient errors have irrecoverable impact on the final model accuracy, we propose a new defense, called a CLP-aware defense against poisoning of FL (DeFL). The key idea of DeFL is to measure fine-grained differences between DNN model updates via an easy-to-compute federated gradient norm vector (FGNV) metric. Using FGNV, DeFL simultaneously detects malicious clients and identifies CLP, which in turn is leveraged to guide the adaptive removal of detected malicious clients from aggregation. As a result, DeFL not only mitigates model poisoning attacks on the global model but also is robust to detection errors. Our extensive experiments on three benchmark datasets demonstrate that DeFL produces significant performance gain over conventional defenses against state-of-the-art model poisoning attacks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Gang and Wang, Hao and Yuan, Xu and Li, Jian}, year={2023}, month={Jun.}, pages={10711-10719} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26271/26043", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26271", + "pdf_size": 343989, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15570901578735579588&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "binghamton.edu;lsu.edu;louisiana.edu;binghamton.edu", + "email": "binghamton.edu;lsu.edu;louisiana.edu;binghamton.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Binghamton University;Louisiana State University;University of Louisiana at Lafayette", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.binghamton.edu;https://www.lsu.edu;https://www.louisiana.edu", + "aff_unique_abbr": "Binghamton;LSU;ULL", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Binghamton;;Lafayette", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25411", + "title": "DeMT: Deformable Mixer Transformer for Multi-Task Learning of Dense Prediction", + "track": "main", + "status": "Technical", + "abstract": "Convolution neural networks (CNNs) and Transformers have their own advantages and both have been widely used for dense prediction in multi-task learning (MTL). Most of the current studies on MTL solely rely on CNN or Transformer. In this work, we present a novel MTL model by combining both merits of deformable CNN and query-based Transformer for multi-task learning of dense prediction. Our method, named DeMT, is based on a simple and effective encoder-decoder architecture (i.e., deformable mixer encoder and task-aware transformer decoder). First, the deformable mixer encoder contains two types of operators: the channel-aware mixing operator leveraged to allow communication among different channels (i.e., efficient channel location mixing), and the spatial-aware deformable operator with deformable convolution applied to efficiently sample more informative spatial locations (i.e., deformed features). Second, the task-aware transformer decoder consists of the task interaction block and task query block. The former is applied to capture task interaction features via self-attention. The latter leverages the deformed features and task-interacted features to generate the corresponding task-specific feature through a query-based Transformer for corresponding task predictions. Extensive experiments on two dense image prediction datasets, NYUD-v2 and PASCAL-Context, demonstrate that our model uses fewer GFLOPs and significantly outperforms current Transformer- and CNN-based competitive models on a variety of metrics. The code is available at https://github.com/yangyangxu0/DeMT.", + "primary_area": "computer vision iii", + "author": "Yangyang Xu; Yibo Yang; Lefei Zhang", + "authorids": "", + "aff": "National Engineering Research Center for Multimedia Software, School of Computer Science, Wuhan University, China+Hubei Luojia Laboratory, China; JD Explore Academy, China; National Engineering Research Center for Multimedia Software, School of Computer Science, Wuhan University, China", + "bibtex": "@article{Xu_Yang_Zhang_2023, title={DeMT: Deformable Mixer Transformer for Multi-Task Learning of Dense Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25411}, DOI={10.1609/aaai.v37i3.25411}, abstractNote={Convolution neural networks (CNNs) and Transformers have their own advantages and both have been widely used for dense prediction in multi-task learning (MTL). Most of the current studies on MTL solely rely on CNN or Transformer. In this work, we present a novel MTL model by combining both merits of deformable CNN and query-based Transformer for multi-task learning of dense prediction. Our method, named DeMT, is based on a simple and effective encoder-decoder architecture (i.e., deformable mixer encoder and task-aware transformer decoder). First, the deformable mixer encoder contains two types of operators: the channel-aware mixing operator leveraged to allow communication among different channels (i.e., efficient channel location mixing), and the spatial-aware deformable operator with deformable convolution applied to efficiently sample more informative spatial locations (i.e., deformed features). Second, the task-aware transformer decoder consists of the task interaction block and task query block. The former is applied to capture task interaction features via self-attention. The latter leverages the deformed features and task-interacted features to generate the corresponding task-specific feature through a query-based Transformer for corresponding task predictions. Extensive experiments on two dense image prediction datasets, NYUD-v2 and PASCAL-Context, demonstrate that our model uses fewer GFLOPs and significantly outperforms current Transformer- and CNN-based competitive models on a variety of metrics. The code is available at https://github.com/yangyangxu0/DeMT.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Yangyang and Yang, Yibo and Zhang, Lefei}, year={2023}, month={Jun.}, pages={3072-3080} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25411/25183", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25411", + "pdf_size": 580430, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5637839806221384660&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "whu.edu.cn;pku.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;pku.edu.cn;whu.edu.cn", + "github": "https://github.com/yangyangxu0/DeMT", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0", + "aff_unique_norm": "Wuhan University;Hubei Luojia Laboratory;JD Explore Academy", + "aff_unique_dep": "School of Computer Science;;", + "aff_unique_url": "http://www.whu.edu.cn;;", + "aff_unique_abbr": "WHU;;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25496", + "title": "Debiased Fine-Tuning for Vision-Language Models by Prompt Regularization", + "track": "main", + "status": "Technical", + "abstract": "We present a new paradigm for fine-tuning large-scale vision-language pre-trained models on downstream task, dubbed Prompt Regularization (ProReg). Different from traditional fine-tuning which easily overfits to the downstream task data, ProReg uses the prediction by prompting the pretrained model to regularize the fine-tuning. The motivation is: by prompting the large model \u201ca photo of a [CLASS]\u201d, the fill-in answer is only dependent on the pretraining encyclopedic knowledge while independent of the task data distribution, which is usually biased. Specifically, given a training sample prediction during fine-tuning, we first calculate its Kullback-Leibler loss of the prompt prediction and Cross-Entropy loss of the ground-truth label, and then combine them with a proposed sample-wise adaptive trade- off weight, which automatically adjusts the transfer between the pretrained and downstream domains. On various out-of-distribution benchmarks, we show the consistently strong performance of ProReg compared with conventional fine-tuning, zero-shot prompt, prompt tuning, and other state-of-the-art methods.", + "primary_area": "computer vision iii", + "author": "Beier Zhu; Yulei Niu; Saeil Lee; Minhoe Hur; Hanwang Zhang", + "authorids": "", + "aff": "Nanyang Technological University; Columbia University; HMGICS AIR Center; AIRS Company, Hyundai Motor Group; Nanyang Technological University", + "bibtex": "@article{Zhu_Niu_Lee_Hur_Zhang_2023, title={Debiased Fine-Tuning for Vision-Language Models by Prompt Regularization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25496}, DOI={10.1609/aaai.v37i3.25496}, abstractNote={We present a new paradigm for fine-tuning large-scale vision-language pre-trained models on downstream task, dubbed Prompt Regularization (ProReg). Different from traditional fine-tuning which easily overfits to the downstream task data, ProReg uses the prediction by prompting the pretrained model to regularize the fine-tuning. The motivation is: by prompting the large model \u201ca photo of a [CLASS]\u201d, the fill-in answer is only dependent on the pretraining encyclopedic knowledge while independent of the task data distribution, which is usually biased. Specifically, given a training sample prediction during fine-tuning, we first calculate its Kullback-Leibler loss of the prompt prediction and Cross-Entropy loss of the ground-truth label, and then combine them with a proposed sample-wise adaptive trade- off weight, which automatically adjusts the transfer between the pretrained and downstream domains. On various out-of-distribution benchmarks, we show the consistently strong performance of ProReg compared with conventional fine-tuning, zero-shot prompt, prompt tuning, and other state-of-the-art methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Beier and Niu, Yulei and Lee, Saeil and Hur, Minhoe and Zhang, Hanwang}, year={2023}, month={Jun.}, pages={3834-3842} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25496/25268", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25496", + "pdf_size": 1148814, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2495281730957276544&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff_domain": "e.ntu.edu.sg;gmail.com;hmgics.com;hyundai.com;ntu.edu.sg", + "email": "e.ntu.edu.sg;gmail.com;hmgics.com;hyundai.com;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Nanyang Technological University;Columbia University;HMGICS AIR Center;Hyundai Motor Group", + "aff_unique_dep": ";;HMGICS AIR Center;AIRS Company", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.columbia.edu;;https://www.hyundai.com", + "aff_unique_abbr": "NTU;Columbia;;HMG", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;2;0", + "aff_country_unique": "Singapore;United States;South Korea" + }, + { + "id": "article-27000", + "title": "Debiasing Intrinsic Bias and Application Bias Jointly via Invariant Risk Minimization (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Demographic biases and social stereotypes are common in pretrained language models (PLMs), while the fine-tuning in downstream applications can also produce new biases or amplify the impact of the original biases. Existing works separate the debiasing from the fine-tuning procedure, which results in a gap between intrinsic bias and application bias. In this work, we propose a debiasing framework CauDebias to eliminate both biases, which directly combines debiasing with fine-tuning and can be applied for any PLMs in downstream tasks. We distinguish the bias-relevant (non-causal factors) and label-relevant (causal factors) parts in sentences from a causal invariant perspective. Specifically, we perform intervention on non-causal factors in different demographic groups, and then devise an invariant risk minimization loss to trade-off performance between bias mitigation and task accuracy. Experimental results on three downstream tasks show that our CauDebias can remarkably reduce biases in PLMs while minimizing the impact on downstream tasks.", + "primary_area": "", + "author": "Yuzhou Mao; Liu Yu; Yi Yang; Fan Zhou; Ting Zhong", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; Hong Kong University of Science and Technology; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China", + "bibtex": "@article{Mao_Yu_Yang_Zhou_Zhong_2024, title={Debiasing Intrinsic Bias and Application Bias Jointly via Invariant Risk Minimization (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27000}, DOI={10.1609/aaai.v37i13.27000}, abstractNote={Demographic biases and social stereotypes are common in pretrained language models (PLMs), while the fine-tuning in downstream applications can also produce new biases or amplify the impact of the original biases. Existing works separate the debiasing from the fine-tuning procedure, which results in a gap between intrinsic bias and application bias. In this work, we propose a debiasing framework CauDebias to eliminate both biases, which directly combines debiasing with fine-tuning and can be applied for any PLMs in downstream tasks. We distinguish the bias-relevant (non-causal factors) and label-relevant (causal factors) parts in sentences from a causal invariant perspective. Specifically, we perform intervention on non-causal factors in different demographic groups, and then devise an invariant risk minimization loss to trade-off performance between bias mitigation and task accuracy. Experimental results on three downstream tasks show that our CauDebias can remarkably reduce biases in PLMs while minimizing the impact on downstream tasks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Yuzhou and Yu, Liu and Yang, Yi and Zhou, Fan and Zhong, Ting}, year={2024}, month={Jul.}, pages={16280-16281} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27000/26772", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27000", + "pdf_size": 168300, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1116734180380434206&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "outlook.com;std.uestc.edu.cn;ust.hk;uestc.edu.cn;uestc.edu.cn", + "email": "outlook.com;std.uestc.edu.cn;ust.hk;uestc.edu.cn;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.ust.hk", + "aff_unique_abbr": "UESTC;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26234", + "title": "Decentralized Riemannian Algorithm for Nonconvex Minimax Problems", + "track": "main", + "status": "Technical", + "abstract": "The minimax optimization over Riemannian manifolds (possibly nonconvex constraints) has been actively applied to solve many problems, such as robust dimensionality reduction and deep neural networks with orthogonal weights (Stiefel manifold). Although many optimization algorithms for minimax problems have been developed in the Euclidean setting, it is difficult to convert them into Riemannian cases, and algorithms for nonconvex minimax problems with nonconvex constraints are even rare. On the other hand, to address the big data challenges, decentralized (serverless) training techniques have recently been emerging since they can reduce communications overhead and avoid the bottleneck problem on the server node. Nonetheless, the algorithm for decentralized Riemannian minimax problems has not been studied. In this paper, we study the distributed nonconvex-strongly-concave minimax optimization problem over the Stiefel manifold and propose both deterministic and stochastic minimax methods. The Steifel manifold is a non-convex set. The global function is represented as the finite sum of local functions. For the deterministic setting, we propose DRGDA and prove that our deterministic method achieves a gradient complexity of O( epsilon(-2)) under mild conditions. For the stochastic setting, we propose DRSGDA and prove that our stochastic method achieves a gradient complexity of O( epsilon(-4)). The DRGDA and DRSGDA are the first algorithms for distributed minimax optimization with nonconvex constraints with exact convergence. Extensive experimental results on the Deep Neural Networks (DNNs) training over the Stiefel manifold demonstrate the efficiency of our algorithms.", + "primary_area": "machine learning iv", + "author": "Xidong Wu; Zhengmian Hu; Heng Huang", + "authorids": "", + "aff": "Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States; Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States; Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States", + "bibtex": "@article{Wu_Hu_Huang_2023, title={Decentralized Riemannian Algorithm for Nonconvex Minimax Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26234}, DOI={10.1609/aaai.v37i9.26234}, abstractNote={The minimax optimization over Riemannian manifolds (possibly nonconvex constraints) has been actively applied to solve many problems, such as robust dimensionality reduction and deep neural networks with orthogonal weights (Stiefel manifold). Although many optimization algorithms for minimax problems have been developed in the Euclidean setting, it is difficult to convert them into Riemannian cases, and algorithms for nonconvex minimax problems with nonconvex constraints are even rare. On the other hand, to address the big data challenges, decentralized (serverless) training techniques have recently been emerging since they can reduce communications overhead and avoid the bottleneck problem on the server node. Nonetheless, the algorithm for decentralized Riemannian minimax problems has not been studied. In this paper, we study the distributed nonconvex-strongly-concave minimax optimization problem over the Stiefel manifold and propose both deterministic and stochastic minimax methods. The Steifel manifold is a non-convex set. The global function is represented as the finite sum of local functions. For the deterministic setting, we propose DRGDA and prove that our deterministic method achieves a gradient complexity of O( epsilon(-2)) under mild conditions. For the stochastic setting, we propose DRSGDA and prove that our stochastic method achieves a gradient complexity of O( epsilon(-4)). The DRGDA and DRSGDA are the first algorithms for distributed minimax optimization with nonconvex constraints with exact convergence. Extensive experimental results on the Deep Neural Networks (DNNs) training over the Stiefel manifold demonstrate the efficiency of our algorithms.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xidong and Hu, Zhengmian and Huang, Heng}, year={2023}, month={Jun.}, pages={10370-10378} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26234/26006", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26234", + "pdf_size": 488752, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7219243174295169248&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "outlook.com;gmail.com;gmail.com", + "email": "outlook.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Pittsburgh", + "aff_unique_dep": "Electrical and Computer Engineering", + "aff_unique_url": "https://www.pitt.edu", + "aff_unique_abbr": "Pitt", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26251", + "title": "Decentralized Stochastic Multi-Player Multi-Armed Walking Bandits", + "track": "main", + "status": "Technical", + "abstract": "Multi-player multi-armed bandit is an increasingly relevant decision-making problem, motivated by applications to cognitive radio systems. Most research for this problem focuses exclusively on the settings that players have full access to all arms and receive no reward when pulling the same arm. Hence all players solve the same bandit problem with the goal of maximizing their cumulative reward. However, these settings neglect several important factors in many real-world applications, where players have limited access to a dynamic local subset of arms (i.e., an arm could sometimes be ``walking'' and not accessible to the player). To this end, this paper proposes a multi-player multi-armed walking bandits model, aiming to address aforementioned modeling issues. The goal now is to maximize the reward, however, players can only pull arms from the local subset and only collect a full reward if no other players pull the same arm. We adopt Upper Confidence Bound (UCB) to deal with the exploration-exploitation tradeoff and employ distributed optimization techniques to properly handle collisions. By carefully integrating these two techniques, we propose a decentralized algorithm with near-optimal guarantee on the regret, and can be easily implemented to obtain competitive empirical performance.", + "primary_area": "machine learning iv", + "author": "Guojun Xiong; Jian Li", + "authorids": "", + "aff": "SUNY-Binghamton University; SUNY-Binghamton University", + "bibtex": "@article{Xiong_Li_2023, title={Decentralized Stochastic Multi-Player Multi-Armed Walking Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26251}, DOI={10.1609/aaai.v37i9.26251}, abstractNote={Multi-player multi-armed bandit is an increasingly relevant decision-making problem, motivated by applications to cognitive radio systems. Most research for this problem focuses exclusively on the settings that players have full access to all arms and receive no reward when pulling the same arm. Hence all players solve the same bandit problem with the goal of maximizing their cumulative reward. However, these settings neglect several important factors in many real-world applications, where players have limited access to a dynamic local subset of arms (i.e., an arm could sometimes be ``walking\u2019\u2019 and not accessible to the player). To this end, this paper proposes a multi-player multi-armed walking bandits model, aiming to address aforementioned modeling issues. The goal now is to maximize the reward, however, players can only pull arms from the local subset and only collect a full reward if no other players pull the same arm. We adopt Upper Confidence Bound (UCB) to deal with the exploration-exploitation tradeoff and employ distributed optimization techniques to properly handle collisions. By carefully integrating these two techniques, we propose a decentralized algorithm with near-optimal guarantee on the regret, and can be easily implemented to obtain competitive empirical performance.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiong, Guojun and Li, Jian}, year={2023}, month={Jun.}, pages={10528-10536} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26251/26023", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26251", + "pdf_size": 325854, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11717671519463461020&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "binghamton.edu;binghamton.edu", + "email": "binghamton.edu;binghamton.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Binghamton University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.binghamton.edu", + "aff_unique_abbr": "Binghamton", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Binghamton", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25649", + "title": "Decision-Making Context Interaction Network for Click-Through Rate Prediction", + "track": "main", + "status": "Technical", + "abstract": "Click-through rate (CTR) prediction is crucial in recommendation and online advertising systems. Existing methods usually model user behaviors, while ignoring the informative context which influences the user to make a click decision, e.g., click pages and pre-ranking candidates that inform inferences about user interests, leading to suboptimal performance. In this paper, we propose a Decision-Making Context Interaction Network (DCIN), which deploys a carefully designed Context Interaction Unit (CIU) to learn decision-making contexts and thus benefits CTR prediction. In addition, the relationship between different decision-making context sources is explored by the proposed Adaptive Interest Aggregation Unit (AIAU) to improve CTR prediction further. In the experiments on public and industrial datasets, DCIN significantly outperforms the state-of-the-art methods. Notably, the model has obtained the improvement of CTR+2.9%/CPM+2.1%/GMV+1.5% for online A/B testing and served the main traffic of Meituan Waimai advertising system.", + "primary_area": "domain s of application", + "author": "Xiang Li; Shuwei Chen; Jian Dong; Jin Zhang; Yongkang Wang; Xingxing Wang; Dong Wang", + "authorids": "", + "aff": "Meituan, Beijing, China; Meituan, Beijing, China; Meituan, Beijing, China; Meituan, Beijing, China; Meituan, Beijing, China; Meituan, Beijing, China; Meituan, Beijing, China", + "bibtex": "@article{Li_Chen_Dong_Zhang_Wang_Wang_Wang_2023, title={Decision-Making Context Interaction Network for Click-Through Rate Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25649}, DOI={10.1609/aaai.v37i4.25649}, abstractNote={Click-through rate (CTR) prediction is crucial in recommendation and online advertising systems. Existing methods usually model user behaviors, while ignoring the informative context which influences the user to make a click decision, e.g., click pages and pre-ranking candidates that inform inferences about user interests, leading to suboptimal performance. In this paper, we propose a Decision-Making Context Interaction Network (DCIN), which deploys a carefully designed Context Interaction Unit (CIU) to learn decision-making contexts and thus benefits CTR prediction. In addition, the relationship between different decision-making context sources is explored by the proposed Adaptive Interest Aggregation Unit (AIAU) to improve CTR prediction further. In the experiments on public and industrial datasets, DCIN significantly outperforms the state-of-the-art methods. Notably, the model has obtained the improvement of CTR+2.9%/CPM+2.1%/GMV+1.5% for online A/B testing and served the main traffic of Meituan Waimai advertising system.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiang and Chen, Shuwei and Dong, Jian and Zhang, Jin and Wang, Yongkang and Wang, Xingxing and Wang, Dong}, year={2023}, month={Jun.}, pages={5195-5202} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25649/25421", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25649", + "pdf_size": 4822181, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3119520589835121524&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com", + "email": "meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Meituan", + "aff_unique_dep": "", + "aff_unique_url": "https://www.meituan.com", + "aff_unique_abbr": "Meituan", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25102", + "title": "Deconstructed Generation-Based Zero-Shot Model", + "track": "main", + "status": "Technical", + "abstract": "Recent research on Generalized Zero-Shot Learning (GZSL) has focused primarily on generation-based methods. However, current literature has overlooked the fundamental principles of these methods and has made limited progress in a complex manner. In this paper, we aim to deconstruct the generator-classifier framework and provide guidance for its improvement and extension. We begin by breaking down the generator-learned unseen class distribution into class-level and instance-level distributions. Through our analysis of the role of these two types of distributions in solving the GZSL problem, we generalize the focus of the generation-based approach, emphasizing the importance of (i) attribute generalization in generator learning and (ii) independent classifier learning with partially biased data. We present a simple method based on this analysis that outperforms SotAs on four public GZSL datasets, demonstrating the validity of our deconstruction. Furthermore, our proposed method remains effective even without a generative model, representing a step towards simplifying the generator-classifier structure. Our code is available at https://github.com/cdb342/DGZ.", + "primary_area": "computer vision i", + "author": "Dubing Chen; Yuming Shen; Haofeng Zhang; Philip H.S. Torr", + "authorids": "", + "aff": "Nanjing University of Science and Technology; University of Oxford; Nanjing University of Science and Technology; University of Oxford", + "bibtex": "@article{Chen_Shen_Zhang_Torr_2023, title={Deconstructed Generation-Based Zero-Shot Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25102}, DOI={10.1609/aaai.v37i1.25102}, abstractNote={Recent research on Generalized Zero-Shot Learning (GZSL) has focused primarily on generation-based methods. However, current literature has overlooked the fundamental principles of these methods and has made limited progress in a complex manner. In this paper, we aim to deconstruct the generator-classifier framework and provide guidance for its improvement and extension. We begin by breaking down the generator-learned unseen class distribution into class-level and instance-level distributions. Through our analysis of the role of these two types of distributions in solving the GZSL problem, we generalize the focus of the generation-based approach, emphasizing the importance of (i) attribute generalization in generator learning and (ii) independent classifier learning with partially biased data. We present a simple method based on this analysis that outperforms SotAs on four public GZSL datasets, demonstrating the validity of our deconstruction. Furthermore, our proposed method remains effective even without a generative model, representing a step towards simplifying the generator-classifier structure. Our code is available at https://github.com/cdb342/DGZ.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Dubing and Shen, Yuming and Zhang, Haofeng and Torr, Philip H.S.}, year={2023}, month={Jun.}, pages={295-303} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25102/24874", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25102", + "pdf_size": 793592, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6807514132910163806&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "njust.edu.cn;gmail.com;njust.edu.cn;eng.ox.ac.uk", + "email": "njust.edu.cn;gmail.com;njust.edu.cn;eng.ox.ac.uk", + "github": "https://github.com/cdb342/DGZ", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Nanjing University of Science and Technology;University of Oxford", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.nust.edu.cn/;https://www.ox.ac.uk", + "aff_unique_abbr": "NUST;Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25922", + "title": "Decorate the Newcomers: Visual Domain Prompt for Continual Test Time Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Continual Test-Time Adaptation (CTTA) aims to adapt the source model to continually changing unlabeled target domains without access to the source data. Existing methods mainly focus on model-based adaptation in a self-training manner, such as predicting pseudo labels for new domain datasets. Since pseudo labels are noisy and unreliable, these methods suffer from catastrophic forgetting and error accumulation when dealing with dynamic data distributions. Motivated by the prompt learning in NLP, in this paper, we propose to learn an image-layer visual domain prompt for target domains while having the source model parameters frozen. During testing, the changing target datasets can be adapted to the source model by reformulating the input data with the learned visual prompts. Specifically, we devise two types of prompts, i.e., domains-specific prompts and domains-agnostic prompts, to extract current domain knowledge and maintain the domain-shared knowledge in the continual adaptation. Furthermore, we design a homeostasis-based adaptation strategy to suppress domain-sensitive parameters in domain-invariant prompts to learn domain-shared knowledge more effectively. This transition from the model-dependent paradigm to the model-free one enables us to bypass the catastrophic forgetting and error accumulation problems. Experiments show that our proposed method achieves significant performance gains over state-of-the-art methods on four widely-used benchmarks, including CIFAR-10C, CIFAR-100C, ImageNet-C, and VLCS datasets.", + "primary_area": "machine learning i", + "author": "Yulu Gan; Yan Bai; Yihang Lou; Xianzheng Ma; Renrui Zhang; Nian Shi; Lin Luo", + "authorids": "", + "aff": "Peking University; Huawei Technology; Wuhan University; Aerospace Information Research Institute, Chinese Academy of Sciences; Peking University; Aerospace Information Research Institute, Chinese Academy of Sciences; Peking University", + "bibtex": "@article{Gan_Bai_Lou_Ma_Zhang_Shi_Luo_2023, title={Decorate the Newcomers: Visual Domain Prompt for Continual Test Time Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25922}, DOI={10.1609/aaai.v37i6.25922}, abstractNote={Continual Test-Time Adaptation (CTTA) aims to adapt the source model to continually changing unlabeled target domains without access to the source data. Existing methods mainly focus on model-based adaptation in a self-training manner, such as predicting pseudo labels for new domain datasets. Since pseudo labels are noisy and unreliable, these methods suffer from catastrophic forgetting and error accumulation when dealing with dynamic data distributions. Motivated by the prompt learning in NLP, in this paper, we propose to learn an image-layer visual domain prompt for target domains while having the source model parameters frozen. During testing, the changing target datasets can be adapted to the source model by reformulating the input data with the learned visual prompts. Specifically, we devise two types of prompts, i.e., domains-specific prompts and domains-agnostic prompts, to extract current domain knowledge and maintain the domain-shared knowledge in the continual adaptation. Furthermore, we design a homeostasis-based adaptation strategy to suppress domain-sensitive parameters in domain-invariant prompts to learn domain-shared knowledge more effectively. This transition from the model-dependent paradigm to the model-free one enables us to bypass the catastrophic forgetting and error accumulation problems. Experiments show that our proposed method achieves significant performance gains over state-of-the-art methods on four widely-used benchmarks, including CIFAR-10C, CIFAR-100C, ImageNet-C, and VLCS datasets.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gan, Yulu and Bai, Yan and Lou, Yihang and Ma, Xianzheng and Zhang, Renrui and Shi, Nian and Luo, Lin}, year={2023}, month={Jun.}, pages={7595-7603} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25922/25694", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25922", + "pdf_size": 1393741, + "gs_citation": 100, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13306153535919523224&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn; ; ; ;", + "email": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn; ; ; ;", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;0;3;0", + "aff_unique_norm": "Peking University;Huawei;Wuhan University;Chinese Academy of Sciences", + "aff_unique_dep": ";;;Aerospace Information Research Institute", + "aff_unique_url": "http://www.pku.edu.cn;https://www.huawei.com;http://www.whu.edu.cn/;http://www.cas.ac.cn", + "aff_unique_abbr": "Peking U;Huawei;WHU;CAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26950", + "title": "Deep Anomaly Detection and Search via Reinforcement Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Semi-supervised anomaly detection is a data mining task which aims at learning features from partially-labeled datasets. We propose Deep Anomaly Detection and Search (DADS) with reinforcement learning. During the training process, the agent searches for possible anomalies in unlabeled dataset to enhance performance. Empirically, we compare DADS with several methods in the settings of leveraging known anomalies to detect both other known and unknown anomalies. Results show that DADS achieves good performance.", + "primary_area": "", + "author": "Chao Chen; Dawei Wang; Feng Mao; Zongzhang Zhang; Yang Yu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; Alibaba Group, Hangzhou 310052, China; Alibaba Group, Hangzhou 310052, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Chen_Wang_Mao_Zhang_Yu_2024, title={Deep Anomaly Detection and Search via Reinforcement Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26950}, DOI={10.1609/aaai.v37i13.26950}, abstractNote={Semi-supervised anomaly detection is a data mining task which aims at learning features from partially-labeled datasets. We propose Deep Anomaly Detection and Search (DADS) with reinforcement learning. During the training process, the agent searches for possible anomalies in unlabeled dataset to enhance performance. Empirically, we compare DADS with several methods in the settings of leveraging known anomalies to detect both other known and unknown anomalies. Results show that DADS achieves good performance.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Chao and Wang, Dawei and Mao, Feng and Zhang, Zongzhang and Yu, Yang}, year={2024}, month={Jul.}, pages={16180-16181} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26950/26722", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26950", + "pdf_size": 306875, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18273534743948585731&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;0", + "aff_unique_norm": "Nanjing University;Alibaba Group", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "Nanjing U;Alibaba", + "aff_campus_unique_index": "0;1;1;0;0", + "aff_campus_unique": "Nanjing;Hangzhou", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26214", + "title": "Deep Attentive Model for Knowledge Tracing", + "track": "main", + "status": "Technical", + "abstract": "Knowledge Tracing (KT) is a crucial task in the field of online education, since it aims to predict students' performance on exercises based on their learning history. One typical solution for knowledge tracing is to combine the classic models in educational psychology, such as Item Response Theory (IRT) and Cognitive Diagnosis (CD), with Deep Neural Networks (DNN) technologies. In this solution, a student and related exercises are mapped into feature vectors based on the student's performance at the current time step, however, it does not consider the impact of historical behavior sequences, and the relationships between historical sequences and students. In this paper, we develop DAKTN, a novel model which assimilates the historical sequences to tackle this challenge for better knowledge tracing. To be specific, we apply a pooling layer to incorporate the student behavior sequence in the embedding layer. After that, we further design a local activation unit, which can adaptively calculate the representation vectors by taking the relevance of historical sequences into consideration with respect to candidate student and exercises. Through experimental results on three real-world datasets, DAKTN significantly outperforms state-of-the-art baseline models. We also present the reasonableness of DAKTN by ablation testing.", + "primary_area": "machine learning iii", + "author": "Xinping Wang; Liangyu Chen; Min Zhang", + "authorids": "", + "aff": "East China Normal University; East China Normal University; East China Normal University", + "bibtex": "@article{Wang_Chen_Zhang_2023, title={Deep Attentive Model for Knowledge Tracing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26214}, DOI={10.1609/aaai.v37i8.26214}, abstractNote={Knowledge Tracing (KT) is a crucial task in the field of online education, since it aims to predict students\u2019 performance on exercises based on their learning history. One typical solution for knowledge tracing is to combine the classic models in educational psychology, such as Item Response Theory (IRT) and Cognitive Diagnosis (CD), with Deep Neural Networks (DNN) technologies. In this solution, a student and related exercises are mapped into feature vectors based on the student\u2019s performance at the current time step, however, it does not consider the impact of historical behavior sequences, and the relationships between historical sequences and students. In this paper, we develop DAKTN, a novel model which assimilates the historical sequences to tackle this challenge for better knowledge tracing. To be specific, we apply a pooling layer to incorporate the student behavior sequence in the embedding layer. After that, we further design a local activation unit, which can adaptively calculate the representation vectors by taking the relevance of historical sequences into consideration with respect to candidate student and exercises. Through experimental results on three real-world datasets, DAKTN significantly outperforms state-of-the-art baseline models. We also present the reasonableness of DAKTN by ablation testing.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xinping and Chen, Liangyu and Zhang, Min}, year={2023}, month={Jun.}, pages={10192-10199} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26214/25986", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26214", + "pdf_size": 587291, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11936802496026932020&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "sei.ecnu.edu.cn; ; ", + "email": "sei.ecnu.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25090", + "title": "Deep Digging into the Generalization of Self-Supervised Monocular Depth Estimation", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised monocular depth estimation has been widely studied recently. Most of the work has focused on improving performance on benchmark datasets, such as KITTI, but has offered a few experiments on generalization performance. In this paper, we investigate the backbone networks (e.g., CNNs, Transformers, and CNN-Transformer hybrid models) toward the generalization of monocular depth estimation. We first evaluate state-of-the-art models on diverse public datasets, which have never been seen during the network training. Next, we investigate the effects of texture-biased and shape-biased representations using the various texture-shifted datasets that we generated. We observe that Transformers exhibit a strong shape bias and CNNs do a strong texture-bias. We also find that shape-biased models show better generalization performance for monocular depth estimation compared to texture-biased models. Based on these observations, we newly design a CNN-Transformer hybrid network with a multi-level adaptive feature fusion module, called MonoFormer. The design intuition behind MonoFormer is to increase shape bias by employing Transformers while compensating for the weak locality bias of Transformers by adaptively fusing multi-level representations. Extensive experiments show that the proposed method achieves state-of-the-art performance with various public datasets. Our method also shows the best generalization ability among the competitive methods.", + "primary_area": "computer vision i", + "author": "Jinwoo Bae; Sungho Moon; Sunghoon Im", + "authorids": "", + "aff": "Department of Electrical Engineering and Computer Science, DGIST, Daegu, Korea; Department of Electrical Engineering and Computer Science, DGIST, Daegu, Korea; Department of Electrical Engineering and Computer Science, DGIST, Daegu, Korea", + "bibtex": "@article{Bae_Moon_Im_2023, title={Deep Digging into the Generalization of Self-Supervised Monocular Depth Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25090}, DOI={10.1609/aaai.v37i1.25090}, abstractNote={Self-supervised monocular depth estimation has been widely studied recently. Most of the work has focused on improving performance on benchmark datasets, such as KITTI, but has offered a few experiments on generalization performance. In this paper, we investigate the backbone networks (e.g., CNNs, Transformers, and CNN-Transformer hybrid models) toward the generalization of monocular depth estimation. We first evaluate state-of-the-art models on diverse public datasets, which have never been seen during the network training. Next, we investigate the effects of texture-biased and shape-biased representations using the various texture-shifted datasets that we generated. We observe that Transformers exhibit a strong shape bias and CNNs do a strong texture-bias. We also find that shape-biased models show better generalization performance for monocular depth estimation compared to texture-biased models. Based on these observations, we newly design a CNN-Transformer hybrid network with a multi-level adaptive feature fusion module, called MonoFormer. The design intuition behind MonoFormer is to increase shape bias by employing Transformers while compensating for the weak locality bias of Transformers by adaptively fusing multi-level representations. Extensive experiments show that the proposed method achieves state-of-the-art performance with various public datasets. Our method also shows the best generalization ability among the competitive methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bae, Jinwoo and Moon, Sungho and Im, Sunghoon}, year={2023}, month={Jun.}, pages={187-196} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25090/24862", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25090", + "pdf_size": 9069372, + "gs_citation": 100, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8985207188672815430&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "dgist.ac.kr;dgist.ac.kr;dgist.ac.kr", + "email": "dgist.ac.kr;dgist.ac.kr;dgist.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Daegu Gyeongbuk Institute of Science and Technology", + "aff_unique_dep": "Department of Electrical Engineering and Computer Science", + "aff_unique_url": "https://www.dgist.ac.kr", + "aff_unique_abbr": "DGIST", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Daegu", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25475", + "title": "Deep Equilibrium Models for Snapshot Compressive Imaging", + "track": "main", + "status": "Technical", + "abstract": "The ability of snapshot compressive imaging (SCI) systems to efficiently capture high-dimensional (HD) data has led to an inverse problem, which consists of recovering the HD signal from the compressed and noisy measurement. While reconstruction algorithms grow fast to solve it with the recent advances of deep learning, the fundamental issue of accurate and stable recovery remains. To this end, we propose deep equilibrium models (DEQ) for video SCI, fusing data-driven regularization and stable convergence in a theoretically sound manner. Each equilibrium model implicitly learns a nonexpansive operator and analytically computes the fixed point, thus enabling unlimited iterative steps and infinite network depth with only a constant memory requirement in training and testing. Specifically, we demonstrate how DEQ can be applied to two existing models for video SCI reconstruction: recurrent neural networks (RNN) and Plug-and-Play (PnP) algorithms. On a variety of datasets and real data, both quantitative and qualitative evaluations of our results demonstrate the effectiveness and stability of our proposed method. The code and models are available at: https://github.com/IndigoPurple/DEQSCI.", + "primary_area": "computer vision iii", + "author": "Yaping Zhao; Siming Zheng; Xin Yuan", + "authorids": "", + "aff": "Westlake University, Hangzhou, China + The University of Hong Kong, Pokfulam, Hong Kong SAR, China; Computer Network Information Center, Chinese Academy of Science, Beijing, China + University of Chinese Academy of Sciences, Beijing, China; Westlake University, Hangzhou, China", + "bibtex": "@article{Zhao_Zheng_Yuan_2023, title={Deep Equilibrium Models for Snapshot Compressive Imaging}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25475}, DOI={10.1609/aaai.v37i3.25475}, abstractNote={The ability of snapshot compressive imaging (SCI) systems to efficiently capture high-dimensional (HD) data has led to an inverse problem, which consists of recovering the HD signal from the compressed and noisy measurement. While reconstruction algorithms grow fast to solve it with the recent advances of deep learning, the fundamental issue of accurate and stable recovery remains. To this end, we propose deep equilibrium models (DEQ) for video SCI, fusing data-driven regularization and stable convergence in a theoretically sound manner. Each equilibrium model implicitly learns a nonexpansive operator and analytically computes the fixed point, thus enabling unlimited iterative steps and infinite network depth with only a constant memory requirement in training and testing. Specifically, we demonstrate how DEQ can be applied to two existing models for video SCI reconstruction: recurrent neural networks (RNN) and Plug-and-Play (PnP) algorithms. On a variety of datasets and real data, both quantitative and qualitative evaluations of our results demonstrate the effectiveness and stability of our proposed method. The code and models are available at: https://github.com/IndigoPurple/DEQSCI.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yaping and Zheng, Siming and Yuan, Xin}, year={2023}, month={Jun.}, pages={3642-3650} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25475/25247", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25475", + "pdf_size": 5148978, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12817036608794355369&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "connect.hku.hk;cnic.cn;westlake.edu.cn", + "email": "connect.hku.hk;cnic.cn;westlake.edu.cn", + "github": "https://github.com/IndigoPurple/DEQSCI", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2+3;0", + "aff_unique_norm": "Westlake University;The University of Hong Kong;Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": ";;Computer Network Information Center;", + "aff_unique_url": "https://www.westlake.edu.cn;https://www.hku.hk;http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": ";HKU;CAS;UCAS", + "aff_campus_unique_index": "0+1;2+2;0", + "aff_campus_unique": "Hangzhou;Pokfulam;Beijing", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25618", + "title": "Deep Graph Structural Infomax", + "track": "main", + "status": "Technical", + "abstract": "In the scene of self-supervised graph learning, Mutual Information (MI) was recently introduced for graph encoding to generate robust node embeddings. A successful representative is Deep Graph Infomax (DGI), which essentially operates on the space of node features but ignores topological structures, and just considers global graph summary. In this paper, we present an effective model called Deep Graph Structural Infomax (DGSI) to learn node representation. We explore to derive the structural mutual information from the perspective of Information Bottleneck (IB), which defines a trade-off between the sufficiency and minimality of representation on the condition of the topological structure preservation. Intuitively, the derived constraints formally maximize the structural mutual information both edge-wise and local neighborhood-wise. Besides, we develop a general framework that incorporates the global representational mutual information, local representational mutual information, and sufficient structural information into the node representation. Essentially, our DGSI extends DGI and could capture more fine-grained semantic information as well as beneficial structural information in a self-supervised manner, thereby improving node representation and further boosting the learning performance. Extensive experiments on different types of datasets demonstrate the effectiveness and superiority of the proposed method.", + "primary_area": "data mining and knowledge management", + "author": "Wenting Zhao; Gongping Xu; Zhen Cui; Siqiang Luo; Cheng Long; Tong Zhang", + "authorids": "", + "aff": "Nanjing University of Science and Technology; Nanjing University of Science and Technology; Nanjing University of Science and Technology+*; Nanyang Technological University; Nanyang Technological University; Nanjing University of Science and Technology", + "bibtex": "@article{Zhao_Xu_Cui_Luo_Long_Zhang_2023, title={Deep Graph Structural Infomax}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25618}, DOI={10.1609/aaai.v37i4.25618}, abstractNote={In the scene of self-supervised graph learning, Mutual Information (MI) was recently introduced for graph encoding to generate robust node embeddings. A successful representative is Deep Graph Infomax (DGI), which essentially operates on the space of node features but ignores topological structures, and just considers global graph summary. In this paper, we present an effective model called Deep Graph Structural Infomax (DGSI) to learn node representation. We explore to derive the structural mutual information from the perspective of Information Bottleneck (IB), which defines a trade-off between the sufficiency and minimality of representation on the condition of the topological structure preservation. Intuitively, the derived constraints formally maximize the structural mutual information both edge-wise and local neighborhood-wise. Besides, we develop a general framework that incorporates the global representational mutual information, local representational mutual information, and sufficient structural information into the node representation. Essentially, our DGSI extends DGI and could capture more fine-grained semantic information as well as beneficial structural information in a self-supervised manner, thereby improving node representation and further boosting the learning performance. Extensive experiments on different types of datasets demonstrate the effectiveness and superiority of the proposed method.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Wenting and Xu, Gongping and Cui, Zhen and Luo, Siqiang and Long, Cheng and Zhang, Tong}, year={2023}, month={Jun.}, pages={4920-4928} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25618/25390", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25618", + "pdf_size": 871898, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10754572487505957930&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;ntu.edu.sg;ntu.edu.sg;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;ntu.edu.sg;ntu.edu.sg;njust.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;2;2;0", + "aff_unique_norm": "Nanjing University of Science and Technology;;Nanyang Technological University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.nust.edu.cn/;;https://www.ntu.edu.sg", + "aff_unique_abbr": "NUST;;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;2;2;0", + "aff_country_unique": "China;;Singapore" + }, + { + "id": "article-25938", + "title": "Deep Latent Regularity Network for Modeling Stochastic Partial Differential Equations", + "track": "main", + "status": "Technical", + "abstract": "Stochastic partial differential equations (SPDEs) are crucial for modelling dynamics with randomness in many areas including economics, physics, and atmospheric sciences. Recently, using deep learning approaches to learn the PDE solution for accelerating PDE simulation becomes increasingly popular. However, SPDEs have two unique properties that require new design on the models. First, the model to approximate the solution of SPDE should be generalizable over both initial conditions and the random sampled forcing term. Second, the random forcing terms usually have poor regularity whose statistics may diverge (e.g., the space-time white noise). To deal with the problems, in this work, we design a deep neural network called \\emph{Deep Latent Regularity Net} (DLR-Net). DLR-Net includes a regularity feature block as the main component, which maps the initial condition and the random forcing term to a set of regularity features. The processing of regularity features is inspired by regularity structure theory and the features provably compose a set of basis to represent the SPDE solution. The regularity features are then fed into a small backbone neural operator to get the output. We conduct experiments on various SPDEs including the dynamic $\\Phi^4_1$ model and the stochastic 2D Navier-Stokes equation to predict their solutions, and the results demonstrate that the proposed DLR-Net can achieve SOTA accuracy compared with the baselines. Moreover, the inference time is over 20 times faster than the traditional numerical solver and is comparable with the baseline deep learning models.", + "primary_area": "machine learning i", + "author": "Shiqi Gong; Peiyan Hu; Qi Meng; Yue Wang; Rongchan Zhu; Bingguang Chen; Zhiming Ma; Hao Ni; Tie-Yan Liu", + "authorids": "", + "aff": "Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Microsoft Research AI4Science; Microsoft Research AI4Science; Bielefeld University; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Department of Mathematics, University College London+The Alan Turing Institute; Microsoft Research AI4Science", + "bibtex": "@article{Gong_Hu_Meng_Wang_Zhu_Chen_Ma_Ni_Liu_2023, title={Deep Latent Regularity Network for Modeling Stochastic Partial Differential Equations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25938}, DOI={10.1609/aaai.v37i6.25938}, abstractNote={Stochastic partial differential equations (SPDEs) are crucial for modelling dynamics with randomness in many areas including economics, physics, and atmospheric sciences. Recently, using deep learning approaches to learn the PDE solution for accelerating PDE simulation becomes increasingly popular. However, SPDEs have two unique properties that require new design on the models. First, the model to approximate the solution of SPDE should be generalizable over both initial conditions and the random sampled forcing term. Second, the random forcing terms usually have poor regularity whose statistics may diverge (e.g., the space-time white noise). To deal with the problems, in this work, we design a deep neural network called \\emph{Deep Latent Regularity Net} (DLR-Net). DLR-Net includes a regularity feature block as the main component, which maps the initial condition and the random forcing term to a set of regularity features. The processing of regularity features is inspired by regularity structure theory and the features provably compose a set of basis to represent the SPDE solution. The regularity features are then fed into a small backbone neural operator to get the output. We conduct experiments on various SPDEs including the dynamic $\\Phi^4_1$ model and the stochastic 2D Navier-Stokes equation to predict their solutions, and the results demonstrate that the proposed DLR-Net can achieve SOTA accuracy compared with the baselines. Moreover, the inference time is over 20 times faster than the traditional numerical solver and is comparable with the baseline deep learning models.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gong, Shiqi and Hu, Peiyan and Meng, Qi and Wang, Yue and Zhu, Rongchan and Chen, Bingguang and Ma, Zhiming and Ni, Hao and Liu, Tie-Yan}, year={2023}, month={Jun.}, pages={7740-7747} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25938/25710", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25938", + "pdf_size": 871556, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=456594819464466555&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.ucas.ac.cn; ;microsoft.com; ; ; ; ;ucl.ac.uk; ", + "email": "mails.ucas.ac.cn; ;microsoft.com; ; ; ; ;ucl.ac.uk; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;1;2;0;0;3+4;1", + "aff_unique_norm": "Chinese Academy of Sciences;Microsoft Research;Bielefeld University;University College London;The Alan Turing Institute", + "aff_unique_dep": "Academy of Mathematics and Systems Science;AI4Science;;Department of Mathematics;", + "aff_unique_url": "http://www.amss.cas.cn;https://www.microsoft.com/en-us/research/group/ai4science;https://www.uni-bielefeld.de/;https://www.ucl.ac.uk;https://www.turing.ac.uk", + "aff_unique_abbr": "AMSS;Microsoft Research AI4Science;Uni Bielefeld;UCL;ATI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";London", + "aff_country_unique_index": "0;0;1;1;2;0;0;3+3;1", + "aff_country_unique": "China;United States;Germany;United Kingdom" + }, + { + "id": "article-26933", + "title": "Deep Learning for Medical Prediction in Electronic Health Records", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "The widespread adoption of electronic health records (EHRs) has opened up new opportunities for using deep neural networks to enhance healthcare. However, modeling EHR data can be challenging due to its complex properties, such as missing values, data scarcity in multi-hospital systems, and multimodal irregularity. How to tackle various issues in EHRs for improving medical prediction is challenging and under exploration. I separately illustrate my works to address these issues in EHRs and discuss potential future directions.", + "primary_area": "", + "author": "Xinlu Zhang", + "authorids": "", + "aff": "Department of Computer Science, University of California, Santa Barbara", + "bibtex": "@article{Zhang_2024, title={Deep Learning for Medical Prediction in Electronic Health Records}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26933}, DOI={10.1609/aaai.v37i13.26933}, abstractNote={The widespread adoption of electronic health records (EHRs) has opened up new opportunities for using deep neural networks to enhance healthcare. However, modeling EHR data can be challenging due to its complex properties, such as missing values, data scarcity in multi-hospital systems, and multimodal irregularity. How to tackle various issues in EHRs for improving medical prediction is challenging and under exploration. I separately illustrate my works to address these issues in EHRs and discuss potential future directions.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xinlu}, year={2024}, month={Jul.}, pages={16145-16146} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26933/26705", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26933", + "pdf_size": 56345, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17210823021350403143&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ucsb.edu", + "email": "ucsb.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of California, Santa Barbara", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ucsb.edu", + "aff_unique_abbr": "UCSB", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Santa Barbara", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26706", + "title": "Deep Learning on a Healthy Data Diet: Finding Important Examples for Fairness", + "track": "aaai special track", + "status": "Technical", + "abstract": "Data-driven predictive solutions predominant in commercial applications tend to suffer from biases and stereotypes, which raises equity concerns. Prediction models may discover, use, or amplify spurious correlations based on gender or other protected personal characteristics, thus discriminating against marginalized groups. Mitigating gender bias has become an important research focus in natural language processing (NLP) and is an area where annotated corpora are available. Data augmentation reduces gender bias by adding counterfactual examples to the training dataset. In this work, we show that some of the examples in the augmented dataset can be not important or even harmful to fairness. We hence propose a general method for pruning both the factual and counterfactual examples to maximize the model\u2019s fairness as measured by the demographic parity, equality of opportunity, and equality of odds. The fairness achieved by our method surpasses that of data augmentation on three text classification datasets, using no more than half of the examples in the augmented dataset. Our experiments are conducted using models of varying sizes and pre-training settings. WARNING: This work uses language that is offensive in nature.", + "primary_area": "ai for social impact", + "author": "Abdelrahman Zayed; Prasanna Parthasarathi; Gon\u00e7alo Mordido; Hamid Palangi; Samira Shabanian; Sarath Chandar", + "authorids": "", + "aff": "Mila - Quebec AI Institute+Polytechnique Montreal; Mila - Quebec AI Institute+McGill University; Mila - Quebec AI Institute+Polytechnique Montreal; Microsoft Research; Microsoft Research; Mila - Quebec AI Institute+Polytechnique Montreal+Canada CIFAR AI Chair", + "bibtex": "@article{Zayed_Parthasarathi_Mordido_Palangi_Shabanian_Chandar_2023, title={Deep Learning on a Healthy Data Diet: Finding Important Examples for Fairness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26706}, DOI={10.1609/aaai.v37i12.26706}, abstractNote={Data-driven predictive solutions predominant in commercial applications tend to suffer from biases and stereotypes, which raises equity concerns. Prediction models may discover, use, or amplify spurious correlations based on gender or other protected personal characteristics, thus discriminating against marginalized groups. Mitigating gender bias has become an important research focus in natural language processing (NLP) and is an area where annotated corpora are available. Data augmentation reduces gender bias by adding counterfactual examples to the training dataset. In this work, we show that some of the examples in the augmented dataset can be not important or even harmful to fairness. We hence propose a general method for pruning both the factual and counterfactual examples to maximize the model\u2019s fairness as measured by the demographic parity, equality of opportunity, and equality of odds. The fairness achieved by our method surpasses that of data augmentation on three text classification datasets, using no more than half of the examples in the augmented dataset. Our experiments are conducted using models of varying sizes and pre-training settings. WARNING: This work uses language that is offensive in nature.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zayed, Abdelrahman and Parthasarathi, Prasanna and Mordido, Gon\u00e7alo and Palangi, Hamid and Shabanian, Samira and Chandar, Sarath}, year={2023}, month={Jun.}, pages={14593-14601} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26706/26478", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26706", + "pdf_size": 507488, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6153819004421162654&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mila.quebec;mila.quebec;mila.quebec;microsoft.com;microsoft.com;mila.quebec", + "email": "mila.quebec;mila.quebec;mila.quebec;microsoft.com;microsoft.com;mila.quebec", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+2;0+1;3;3;0+1+4", + "aff_unique_norm": "Quebec AI Institute;Polytechnique Montreal;McGill University;Microsoft Corporation;Canadian Institute for Advanced Research", + "aff_unique_dep": "AI Institute;;;Microsoft Research;AI Chair", + "aff_unique_url": "https://mila.quebec;https://www.polymtl.ca;https://www.mcgill.ca;https://www.microsoft.com/en-us/research;https://www.cifar.ca", + "aff_unique_abbr": "Mila;PolyMTL;McGill;MSR;CIFAR", + "aff_campus_unique_index": "1;;1;1", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0+0;0+0;0+0;1;1;0+0+0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "article-25338", + "title": "Deep Manifold Attack on Point Clouds via Parameter Plane Stretching", + "track": "main", + "status": "Technical", + "abstract": "Adversarial attack on point clouds plays a vital role in evaluating and improving the adversarial robustness of 3D deep learning models. Current attack methods are mainly applied by point perturbation in a non-manifold manner. In this paper, we formulate a novel manifold attack, which deforms the underlying 2-manifold surfaces via parameter plane stretching to generate adversarial point clouds. First, we represent the mapping between the parameter plane and underlying surface using generative-based networks. Second, the stretching is learned in the 2D parameter domain such that the generated 3D point cloud fools a pretrained classifier with minimal geometric distortion. Extensive experiments show that adversarial point clouds generated by manifold attack are smooth, undefendable and transferable, and outperform those samples generated by the state-of-the-art non-manifold ones.", + "primary_area": "computer vision ii", + "author": "Keke Tang; Jianpeng Wu; Weilong Peng; Yawen Shi; Peng Song; Zhaoquan Gu; Zhihong Tian; Wenping Wang", + "authorids": "", + "aff": "Guangzhou University; Guangzhou University; Guangzhou University+Peng Cheng Laboratory; Guangzhou University; Singapore University of Technology and Design; Harbin Institute of Technology (Shenzhen)+Peng Cheng Laboratory; Guangzhou University; Texas A&M University", + "bibtex": "@article{Tang_Wu_Peng_Shi_Song_Gu_Tian_Wang_2023, title={Deep Manifold Attack on Point Clouds via Parameter Plane Stretching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25338}, DOI={10.1609/aaai.v37i2.25338}, abstractNote={Adversarial attack on point clouds plays a vital role in evaluating and improving the adversarial robustness of 3D deep learning models. Current attack methods are mainly applied by point perturbation in a non-manifold manner. In this paper, we formulate a novel manifold attack, which deforms the underlying 2-manifold surfaces via parameter plane stretching to generate adversarial point clouds. First, we represent the mapping between the parameter plane and underlying surface using generative-based networks. Second, the stretching is learned in the 2D parameter domain such that the generated 3D point cloud fools a pretrained classifier with minimal geometric distortion. Extensive experiments show that adversarial point clouds generated by manifold attack are smooth, undefendable and transferable, and outperform those samples generated by the state-of-the-art non-manifold ones.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tang, Keke and Wu, Jianpeng and Peng, Weilong and Shi, Yawen and Song, Peng and Gu, Zhaoquan and Tian, Zhihong and Wang, Wenping}, year={2023}, month={Jun.}, pages={2420-2428} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25338/25110", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25338", + "pdf_size": 18831280, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17596314996536979287&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;gzhu.edu.cn;gmail.com;sutd.edu.sg;hit.edu.cn;gzhu.edu.cn;cs.hku.hk", + "email": "gmail.com;gmail.com;gzhu.edu.cn;gmail.com;sutd.edu.sg;hit.edu.cn;gzhu.edu.cn;cs.hku.hk", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0+1;0;2;3+1;0;4", + "aff_unique_norm": "Guangzhou University;Peng Cheng Laboratory;Singapore University of Technology and Design;Harbin Institute of Technology;Texas A&M University", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.gzhu.edu.cn;http://www.pcl.ac.cn;https://www.sutd.edu.sg;http://en.hhit.edu.cn/;https://www.tamu.edu", + "aff_unique_abbr": "GU;PCL;SUTD;HIT;TAMU", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0+0;0;1;0+0;0;2", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "article-25409", + "title": "Deep Parametric 3D Filters for Joint Video Denoising and Illumination Enhancement in Video Super Resolution", + "track": "main", + "status": "Technical", + "abstract": "Despite the quality improvement brought by the recent methods, video super-resolution (SR) is still very challenging, especially for videos that are low-light and noisy. The current best solution is to subsequently employ best models of video SR, denoising, and illumination enhancement, but doing so often lowers the image quality, due to the inconsistency between the models. This paper presents a new parametric representation called the Deep Parametric 3D Filters (DP3DF), which incorporates local spatiotemporal information to enable simultaneous denoising, illumination enhancement, and SR efficiently in a single encoder-and-decoder network. Also, a dynamic residual frame is jointly learned with the DP3DF via a shared backbone to further boost the SR quality. We performed extensive experiments, including a large-scale user study, to show our method's effectiveness. Our method consistently surpasses the best state-of-the-art methods on all the challenging real datasets with top PSNR and user ratings, yet having a very fast run time. The code is available at https://github.com/xiaogang00/DP3DF.", + "primary_area": "computer vision iii", + "author": "Xiaogang Xu; Ruixing Wang; Chi-Wing Fu; Jiaya Jia", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong+SmartMore; SmartMore; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong", + "bibtex": "@article{Xu_Wang_Fu_Jia_2023, title={Deep Parametric 3D Filters for Joint Video Denoising and Illumination Enhancement in Video Super Resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25409}, DOI={10.1609/aaai.v37i3.25409}, abstractNote={Despite the quality improvement brought by the recent methods, video super-resolution (SR) is still very challenging, especially for videos that are low-light and noisy. The current best solution is to subsequently employ best models of video SR, denoising, and illumination enhancement, but doing so often lowers the image quality, due to the inconsistency between the models. This paper presents a new parametric representation called the Deep Parametric 3D Filters (DP3DF), which incorporates local spatiotemporal information to enable simultaneous denoising, illumination enhancement, and SR efficiently in a single encoder-and-decoder network. Also, a dynamic residual frame is jointly learned with the DP3DF via a shared backbone to further boost the SR quality. We performed extensive experiments, including a large-scale user study, to show our method\u2019s effectiveness. Our method consistently surpasses the best state-of-the-art methods on all the challenging real datasets with top PSNR and user ratings, yet having a very fast run time. The code is available at https://github.com/xiaogang00/DP3DF.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Xiaogang and Wang, Ruixing and Fu, Chi-Wing and Jia, Jiaya}, year={2023}, month={Jun.}, pages={3054-3062} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25409/25181", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25409", + "pdf_size": 16798238, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8739866680505941287&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "cse.cuhk.edu.hk;smartmore.com;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;smartmore.com;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "github": "https://github.com/xiaogang00/DP3DF", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;0;0", + "aff_unique_norm": "The Chinese University of Hong Kong;SmartMore", + "aff_unique_dep": "Department of Computer Science and Engineering;", + "aff_unique_url": "https://www.cuhk.edu.hk;", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hong Kong;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-25073", + "title": "Deep Spiking Neural Networks with High Representation Similarity Model Visual Pathways of Macaque and Mouse", + "track": "main", + "status": "Technical", + "abstract": "Deep artificial neural networks (ANNs) play a major role in modeling the visual pathways of primate and rodent. However, they highly simplify the computational properties of neurons compared to their biological counterparts. Instead, Spiking Neural Networks (SNNs) are more biologically plausible models since spiking neurons encode information with time sequences of spikes, just like biological neurons do. However, there is a lack of studies on visual pathways with deep SNNs models. In this study, we model the visual cortex with deep SNNs for the first time, and also with a wide range of state-of-the-art deep CNNs and ViTs for comparison. Using three similarity metrics, we conduct neural representation similarity experiments on three neural datasets collected from two species under three types of stimuli. Based on extensive similarity analyses, we further investigate the functional hierarchy and mechanisms across species. Almost all similarity scores of SNNs are higher than their counterparts of CNNs with an average of 6.6%. Depths of the layers with the highest similarity scores exhibit little differences across mouse cortical regions, but vary significantly across macaque regions, suggesting that the visual processing structure of mice is more regionally homogeneous than that of macaques. Besides, the multi-branch structures observed in some top mouse brain-like neural networks provide computational evidence of parallel processing streams in mice, and the different performance in fitting macaque neural representations under different stimuli exhibits the functional specialization of information processing in macaques. Taken together, our study demonstrates that SNNs could serve as promising candidates to better model and explain the functional hierarchy and mechanisms of the visual system.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Liwei Huang; Zhengyu Ma; Liutao Yu; Huihui Zhou; Yonghong Tian", + "authorids": "", + "aff": "National Engineering Research Center of Visual Technology, School of Computer Science, Peking University, China+Department of Networked Intelligence, Peng Cheng Laboratory, China; Department of Networked Intelligence, Peng Cheng Laboratory, China; Department of Networked Intelligence, Peng Cheng Laboratory, China; Department of Networked Intelligence, Peng Cheng Laboratory, China; National Engineering Research Center of Visual Technology, School of Computer Science, Peking University, China+Department of Networked Intelligence, Peng Cheng Laboratory, China", + "bibtex": "@article{Huang_Ma_Yu_Zhou_Tian_2023, title={Deep Spiking Neural Networks with High Representation Similarity Model Visual Pathways of Macaque and Mouse}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25073}, DOI={10.1609/aaai.v37i1.25073}, abstractNote={Deep artificial neural networks (ANNs) play a major role in modeling the visual pathways of primate and rodent. However, they highly simplify the computational properties of neurons compared to their biological counterparts. Instead, Spiking Neural Networks (SNNs) are more biologically plausible models since spiking neurons encode information with time sequences of spikes, just like biological neurons do. However, there is a lack of studies on visual pathways with deep SNNs models. In this study, we model the visual cortex with deep SNNs for the first time, and also with a wide range of state-of-the-art deep CNNs and ViTs for comparison. Using three similarity metrics, we conduct neural representation similarity experiments on three neural datasets collected from two species under three types of stimuli. Based on extensive similarity analyses, we further investigate the functional hierarchy and mechanisms across species. Almost all similarity scores of SNNs are higher than their counterparts of CNNs with an average of 6.6%. Depths of the layers with the highest similarity scores exhibit little differences across mouse cortical regions, but vary significantly across macaque regions, suggesting that the visual processing structure of mice is more regionally homogeneous than that of macaques. Besides, the multi-branch structures observed in some top mouse brain-like neural networks provide computational evidence of parallel processing streams in mice, and the different performance in fitting macaque neural representations under different stimuli exhibits the functional specialization of information processing in macaques. Taken together, our study demonstrates that SNNs could serve as promising candidates to better model and explain the functional hierarchy and mechanisms of the visual system.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Liwei and Ma, Zhengyu and Yu, Liutao and Zhou, Huihui and Tian, Yonghong}, year={2023}, month={Jun.}, pages={31-39} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25073/24845", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25073", + "pdf_size": 5282345, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3098800315518904499&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.pku.edu.cn;pcl.ac.cn;pcl.ac.cn;pcl.ac.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pcl.ac.cn;pcl.ac.cn;pcl.ac.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;0+1", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": "School of Computer Science;Department of Networked Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "Peking University;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25998", + "title": "Deep Visual Forced Alignment: Learning to Align Transcription with Talking Face Video", + "track": "main", + "status": "Technical", + "abstract": "Forced alignment refers to a technology that time-aligns a given transcription with a corresponding speech. However, as the forced alignment technologies have developed using speech audio, they might fail in alignment when the input speech audio is noise-corrupted or is not accessible. We focus on that there is another component that the speech can be inferred from, the speech video (i.e., talking face video). Since the drawbacks of audio-based forced alignment can be complemented using the visual information when the audio signal is under poor condition, we try to develop a novel video-based forced alignment method. However, different from audio forced alignment, it is challenging to develop a reliable visual forced alignment technology for the following two reasons: 1) Visual Speech Recognition (VSR) has a much lower performance compared to audio-based Automatic Speech Recognition (ASR), and 2) the translation from text to video is not reliable, so the method typically used for building audio forced alignment cannot be utilized in developing visual forced alignment. In order to alleviate these challenges, in this paper, we propose a new method that is appropriate for visual forced alignment, namely Deep Visual Forced Alignment (DVFA). The proposed DVFA can align the input transcription (i.e., sentence) with the talking face video without accessing the speech audio. Moreover, by augmenting the alignment task with anomaly case detection, DVFA can detect mismatches between the input transcription and the input video while performing the alignment. Therefore, we can robustly align the text with the talking face video even if there exist error words in the text. Through extensive experiments, we show the effectiveness of the proposed DVFA not only in the alignment task but also in interpreting the outputs of VSR models.", + "primary_area": "machine learning ii", + "author": "Minsu Kim; Chae Won Kim; Yong Man Ro", + "authorids": "", + "aff": "Image and Video Systems Lab, KAIST, South Korea; Image and Video Systems Lab, KAIST, South Korea; Image and Video Systems Lab, KAIST, South Korea", + "bibtex": "@article{Kim_Kim_Ro_2023, title={Deep Visual Forced Alignment: Learning to Align Transcription with Talking Face Video}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25998}, DOI={10.1609/aaai.v37i7.25998}, abstractNote={Forced alignment refers to a technology that time-aligns a given transcription with a corresponding speech. However, as the forced alignment technologies have developed using speech audio, they might fail in alignment when the input speech audio is noise-corrupted or is not accessible. We focus on that there is another component that the speech can be inferred from, the speech video (i.e., talking face video). Since the drawbacks of audio-based forced alignment can be complemented using the visual information when the audio signal is under poor condition, we try to develop a novel video-based forced alignment method. However, different from audio forced alignment, it is challenging to develop a reliable visual forced alignment technology for the following two reasons: 1) Visual Speech Recognition (VSR) has a much lower performance compared to audio-based Automatic Speech Recognition (ASR), and 2) the translation from text to video is not reliable, so the method typically used for building audio forced alignment cannot be utilized in developing visual forced alignment. In order to alleviate these challenges, in this paper, we propose a new method that is appropriate for visual forced alignment, namely Deep Visual Forced Alignment (DVFA). The proposed DVFA can align the input transcription (i.e., sentence) with the talking face video without accessing the speech audio. Moreover, by augmenting the alignment task with anomaly case detection, DVFA can detect mismatches between the input transcription and the input video while performing the alignment. Therefore, we can robustly align the text with the talking face video even if there exist error words in the text. Through extensive experiments, we show the effectiveness of the proposed DVFA not only in the alignment task but also in interpreting the outputs of VSR models.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Minsu and Kim, Chae Won and Ro, Yong Man}, year={2023}, month={Jun.}, pages={8273-8281} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25998/25770", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25998", + "pdf_size": 500762, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6873487989733618371&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "Image and Video Systems Lab", + "aff_unique_url": "https://www.kaist.edu", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26779", + "title": "DeepGemini: Verifying Dependency Fairness for Deep Neural Network", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep neural networks (DNNs) have been widely adopted in many decision-making industrial applications. Their fairness issues, i.e., whether there exist unintended biases in the DNN, receive much attention and become critical concerns, which can directly cause negative impacts in our daily life and potentially undermine the fairness of our society, especially with their increasing deployment at an unprecedented speed. Recently, some early attempts have been made to provide fairness assurance of DNNs, such as fairness testing, which aims at finding discriminatory samples empirically, and fairness certification, which develops sound but not complete analysis to certify the fairness of DNNs. Nevertheless, how to formally compute discriminatory samples and fairness scores (i.e., the percentage of fair input space), is still largely uninvestigated. In this paper, we propose DeepGemini, a novel fairness formal analysis technique for DNNs, which contains two key components: discriminatory sample discovery and fairness score computation. To uncover discriminatory samples, we encode the fairness of DNNs as safety properties and search for discriminatory samples by means of state-of-the-art verification techniques for DNNs. This reduction enables us to be the first to formally compute discriminatory samples. To compute the fairness score, we develop counterexample guided fairness analysis, which utilizes four heuristics to efficiently approximate a lower bound of fairness score. Extensive experimental evaluations demonstrate the effectiveness and efficiency of DeepGemini on commonly-used benchmarks, and DeepGemini outperforms state-of-the-art DNN fairness certification approaches in terms of both efficiency and scalability.", + "primary_area": "safe and robust ai", + "author": "Xuan Xie; Fuyuan Zhang; Xinwen Hu; Lei Ma", + "authorids": "", + "aff": "University of Alberta, Canada; Kyushu University, Japan; Hunan Normal University, China; University of Alberta, Canada+Kyushu University, Japan+The University of Tokyo, Japan", + "bibtex": "@article{Xie_Zhang_Hu_Ma_2023, title={DeepGemini: Verifying Dependency Fairness for Deep Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26779}, DOI={10.1609/aaai.v37i12.26779}, abstractNote={Deep neural networks (DNNs) have been widely adopted in many decision-making industrial applications. Their fairness issues, i.e., whether there exist unintended biases in the DNN, receive much attention and become critical concerns, which can directly cause negative impacts in our daily life and potentially undermine the fairness of our society, especially with their increasing deployment at an unprecedented speed. Recently, some early attempts have been made to provide fairness assurance of DNNs, such as fairness testing, which aims at finding discriminatory samples empirically, and fairness certification, which develops sound but not complete analysis to certify the fairness of DNNs. Nevertheless, how to formally compute discriminatory samples and fairness scores (i.e., the percentage of fair input space), is still largely uninvestigated. In this paper, we propose DeepGemini, a novel fairness formal analysis technique for DNNs, which contains two key components: discriminatory sample discovery and fairness score computation. To uncover discriminatory samples, we encode the fairness of DNNs as safety properties and search for discriminatory samples by means of state-of-the-art verification techniques for DNNs. This reduction enables us to be the first to formally compute discriminatory samples. To compute the fairness score, we develop counterexample guided fairness analysis, which utilizes four heuristics to efficiently approximate a lower bound of fairness score. Extensive experimental evaluations demonstrate the effectiveness and efficiency of DeepGemini on commonly-used benchmarks, and DeepGemini outperforms state-of-the-art DNN fairness certification approaches in terms of both efficiency and scalability.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Xuan and Zhang, Fuyuan and Hu, Xinwen and Ma, Lei}, year={2023}, month={Jun.}, pages={15251-15259} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26779/26551", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26779", + "pdf_size": 233882, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11486494331099975117&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0+1+3", + "aff_unique_norm": "University of Alberta;Kyushu University;Hunan Normal University;The University of Tokyo", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ualberta.ca;https://www.kyushu-u.ac.jp;http://www.hnu.edu.cn;https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "UAlberta;Kyushu U;HNU;UTokyo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0+1+1", + "aff_country_unique": "Canada;Japan;China" + }, + { + "id": "article-25658", + "title": "Deepfake Video Detection via Facial Action Dependencies Estimation", + "track": "main", + "status": "Technical", + "abstract": "Deepfake video detection has drawn significant attention from researchers due to the security issues induced by deepfake videos. Unfortunately, most of the existing deepfake detection approaches have not competently modeled the natural structures and movements of human faces. In this paper, we formulate the deepfake video detection problem into a graph classification task, and propose a novel paradigm named Facial Action Dependencies Estimation (FADE) for deepfake video detection. We propose a Multi-Dependency Graph Module (MDGM) to capture abundant dependencies among facial action units, and extracts subtle clues in these dependencies. MDGM can be easily integrated into the existing frame-level detection schemes to provide significant performance gains. Extensive experiments demonstrate the superiority of our method against the state-of-the-art methods.", + "primary_area": "domain s of application", + "author": "Lingfeng Tan; Yunhong Wang; Junfu Wang; Liang Yang; Xunxun Chen; Yuanfang Guo", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, China; School of Computer Science and Engineering, Beihang University, China; School of Computer Science and Engineering, Beihang University, China; School of Artificial Intelligence, Hebei University of Technology, China; CNCERT/CC, Beijing, China; School of Computer Science and Engineering, Beihang University, China + Zhongguancun Laboratory, Beijing, China", + "bibtex": "@article{Tan_Wang_Wang_Yang_Chen_Guo_2023, title={Deepfake Video Detection via Facial Action Dependencies Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25658}, DOI={10.1609/aaai.v37i4.25658}, abstractNote={Deepfake video detection has drawn significant attention from researchers due to the security issues induced by deepfake videos. Unfortunately, most of the existing deepfake detection approaches have not competently modeled the natural structures and movements of human faces. In this paper, we formulate the deepfake video detection problem into a graph classification task, and propose a novel paradigm named Facial Action Dependencies Estimation (FADE) for deepfake video detection. We propose a Multi-Dependency Graph Module (MDGM) to capture abundant dependencies among facial action units, and extracts subtle clues in these dependencies. MDGM can be easily integrated into the existing frame-level detection schemes to provide significant performance gains. Extensive experiments demonstrate the superiority of our method against the state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tan, Lingfeng and Wang, Yunhong and Wang, Junfu and Yang, Liang and Chen, Xunxun and Guo, Yuanfang}, year={2023}, month={Jun.}, pages={5276-5284} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25658/25430", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25658", + "pdf_size": 1195032, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18257557392260542404&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;vip.qq.com;cert.org.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;vip.qq.com;cert.org.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;0+3", + "aff_unique_norm": "Beihang University;Hebei University of Technology;China National Cyber Emergency Response Team/Coordination Center;Zhongguancun Laboratory", + "aff_unique_dep": "School of Computer Science and Engineering;School of Artificial Intelligence;;", + "aff_unique_url": "http://www.buaa.edu.cn;;http://www.cncert.org.cn;", + "aff_unique_abbr": "Beihang;;CNCERT/CC;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25125", + "title": "Defending Backdoor Attacks on Vision Transformer via Patch Processing", + "track": "main", + "status": "Technical", + "abstract": "Vision Transformers (ViTs) have a radically different architecture with significantly less inductive bias than Convolutional Neural Networks. Along with the improvement in performance, security and robustness of ViTs are also of great importance to study. In contrast to many recent works that exploit the robustness of ViTs against adversarial examples, this paper investigates a representative causative attack, i.e., backdoor. We first examine the vulnerability of ViTs against various backdoor attacks and find that ViTs are also quite vulnerable to existing attacks. However, we observe that the clean-data accuracy and backdoor attack success rate of ViTs respond distinctively to patch transformations before the positional encoding. Then, based on this finding, we propose an effective method for ViTs to defend both patch-based and blending-based trigger backdoor attacks via patch processing.\nThe performances are evaluated on several benchmark datasets, including CIFAR10, GTSRB, and TinyImageNet, which show the proposedds defense is very successful in mitigating backdoor attacks for ViTs. To the best of our knowledge, this paper presents the first defensive strategy that utilizes a unique characteristic of ViTs against backdoor attacks.", + "primary_area": "computer vision i", + "author": "Khoa D. Doan; Yingjie Lao; Peng Yang; Ping Li", + "authorids": "", + "aff": "College of Engineering and Computer Science, VinUniversity; Electrical and Computer Engineering, Clemson University, Clemson, SC 29634, USA; Meta Corporation, Bellevue, WA 98004, USA; LinkedIn Corporation, Bellevue, WA 98004, USA", + "bibtex": "@article{Doan_Lao_Yang_Li_2023, title={Defending Backdoor Attacks on Vision Transformer via Patch Processing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25125}, DOI={10.1609/aaai.v37i1.25125}, abstractNote={Vision Transformers (ViTs) have a radically different architecture with significantly less inductive bias than Convolutional Neural Networks. Along with the improvement in performance, security and robustness of ViTs are also of great importance to study. In contrast to many recent works that exploit the robustness of ViTs against adversarial examples, this paper investigates a representative causative attack, i.e., backdoor. We first examine the vulnerability of ViTs against various backdoor attacks and find that ViTs are also quite vulnerable to existing attacks. However, we observe that the clean-data accuracy and backdoor attack success rate of ViTs respond distinctively to patch transformations before the positional encoding. Then, based on this finding, we propose an effective method for ViTs to defend both patch-based and blending-based trigger backdoor attacks via patch processing.\nThe performances are evaluated on several benchmark datasets, including CIFAR10, GTSRB, and TinyImageNet, which show the proposedds defense is very successful in mitigating backdoor attacks for ViTs. To the best of our knowledge, this paper presents the first defensive strategy that utilizes a unique characteristic of ViTs against backdoor attacks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Doan, Khoa D. and Lao, Yingjie and Yang, Peng and Li, Ping}, year={2023}, month={Jun.}, pages={506-515} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25125/24897", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25125", + "pdf_size": 433594, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7778854171544576898&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "vinuni.edu.vn;clemson.edu;gmail.com;linkedin.com", + "email": "vinuni.edu.vn;clemson.edu;gmail.com;linkedin.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "VinUniversity;Clemson University;Meta Corporation;LinkedIn Corporation", + "aff_unique_dep": "College of Engineering and Computer Science;Electrical and Computer Engineering;;", + "aff_unique_url": "https://vinuni.edu.vn;https://www.clemson.edu;https://www.meta.com;https://www.linkedin.com", + "aff_unique_abbr": "VinUni;Clemson;Meta;LinkedIn", + "aff_campus_unique_index": "1;2;2", + "aff_campus_unique": ";Clemson;Bellevue", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "Vietnam;United States" + }, + { + "id": "article-25352", + "title": "Defending Black-Box Skeleton-Based Human Activity Classifiers", + "track": "main", + "status": "Technical", + "abstract": "Skeletal motions have been heavily relied upon for human activity recognition (HAR). Recently, a universal vulnerability of skeleton-based HAR has been identified across a variety of classifiers and data, calling for mitigation. To this end, we propose the first black-box defense method for skeleton-based HAR to our best knowledge. Our method is featured by full Bayesian treatments of the clean data, the adversaries and the classifier, leading to (1) a new Bayesian Energy-based formulation of robust discriminative classifiers, (2) a new adversary sampling scheme based on natural motion manifolds, and (3) a new post-train Bayesian strategy for black-box defense. We name our framework Bayesian Energy-based Adversarial Training or BEAT. BEAT is straightforward but elegant, which turns vulnerable black-box classifiers into robust ones without sacrificing accuracy. It demonstrates surprising and universal effectiveness across a wide range of skeletal HAR classifiers and datasets, under various attacks. Appendix and code are available.", + "primary_area": "computer vision ii", + "author": "He Wang; Yunfeng Diao; Zichang Tan; Guodong Guo", + "authorids": "", + "aff": "University of Leeds, UK; Hefei University of Technology, Hefei China; Institute of Deep Learning, Baidu Research, Beijing China; Institute of Deep Learning, Baidu Research, Beijing China", + "bibtex": "@article{Wang_Diao_Tan_Guo_2023, title={Defending Black-Box Skeleton-Based Human Activity Classifiers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25352}, DOI={10.1609/aaai.v37i2.25352}, abstractNote={Skeletal motions have been heavily relied upon for human activity recognition (HAR). Recently, a universal vulnerability of skeleton-based HAR has been identified across a variety of classifiers and data, calling for mitigation. To this end, we propose the first black-box defense method for skeleton-based HAR to our best knowledge. Our method is featured by full Bayesian treatments of the clean data, the adversaries and the classifier, leading to (1) a new Bayesian Energy-based formulation of robust discriminative classifiers, (2) a new adversary sampling scheme based on natural motion manifolds, and (3) a new post-train Bayesian strategy for black-box defense. We name our framework Bayesian Energy-based Adversarial Training or BEAT. BEAT is straightforward but elegant, which turns vulnerable black-box classifiers into robust ones without sacrificing accuracy. It demonstrates surprising and universal effectiveness across a wide range of skeletal HAR classifiers and datasets, under various attacks. Appendix and code are available.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, He and Diao, Yunfeng and Tan, Zichang and Guo, Guodong}, year={2023}, month={Jun.}, pages={2546-2554} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25352/25124", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25352", + "pdf_size": 4353512, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=391335316725236051&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "leeds.ac.uk;hfut.edu.cn;baidu.com;mail.wvu.edu", + "email": "leeds.ac.uk;hfut.edu.cn;baidu.com;mail.wvu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2", + "aff_unique_norm": "University of Leeds;Hefei University of Technology;Baidu Research", + "aff_unique_dep": ";;Institute of Deep Learning", + "aff_unique_url": "https://www.leeds.ac.uk;http://www.hfut.edu.cn;https://baidu.com", + "aff_unique_abbr": "Leeds;HUT;Baidu", + "aff_campus_unique_index": "1;2;2", + "aff_campus_unique": ";Hefei;Beijing", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "article-25656", + "title": "Defending against Backdoor Attacks in Natural Language Generation", + "track": "main", + "status": "Technical", + "abstract": "The frustratingly fragile nature of neural network models make current natural language generation (NLG) systems prone to backdoor attacks and generate malicious sequences that could be sexist or offensive. Unfortunately, little effort has been invested to how backdoor attacks can affect current NLG models and how to defend against these attacks. In this work, by giving a formal definition of backdoor attack and defense, we investigate this problem on two important NLG tasks, machine translation and dialog generation. Tailored to the inherent nature of NLG models (e.g., producing a sequence of coherent words given contexts), we design defending strategies against attacks. \nWe find that testing the backward probability of generating sources given targets yields effective defense performance against all different types of attacks, and is able to handle the one-to-many issue in many NLG tasks such as dialog generation. We hope that this work can raise the awareness of backdoor risks concealed in deep NLG systems and inspire more future work (both attack and defense) towards this direction.", + "primary_area": "domain s of application", + "author": "Xiaofei Sun; Xiaoya Li; Yuxian Meng; Xiang Ao; Lingjuan Lyu; Jiwei Li; Tianwei Zhang", + "authorids": "", + "aff": "Zhejiang University; Shannon.AI; Chinese Academy of Sciences; Sony AI; Nanyang Technological University; Zhejiang University+Shannon.AI; Nanyang Technological University", + "bibtex": "@article{Sun_Li_Meng_Ao_Lyu_Li_Zhang_2023, title={Defending against Backdoor Attacks in Natural Language Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25656}, DOI={10.1609/aaai.v37i4.25656}, abstractNote={The frustratingly fragile nature of neural network models make current natural language generation (NLG) systems prone to backdoor attacks and generate malicious sequences that could be sexist or offensive. Unfortunately, little effort has been invested to how backdoor attacks can affect current NLG models and how to defend against these attacks. In this work, by giving a formal definition of backdoor attack and defense, we investigate this problem on two important NLG tasks, machine translation and dialog generation. Tailored to the inherent nature of NLG models (e.g., producing a sequence of coherent words given contexts), we design defending strategies against attacks. We find that testing the backward probability of generating sources given targets yields effective defense performance against all different types of attacks, and is able to handle the one-to-many issue in many NLG tasks such as dialog generation. We hope that this work can raise the awareness of backdoor risks concealed in deep NLG systems and inspire more future work (both attack and defense) towards this direction.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Xiaofei and Li, Xiaoya and Meng, Yuxian and Ao, Xiang and Lyu, Lingjuan and Li, Jiwei and Zhang, Tianwei}, year={2023}, month={Jun.}, pages={5257-5265} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25656/25428", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25656", + "pdf_size": 146157, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16087447509092631&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn; ; ; ; ;shannonai.com; ", + "email": "zju.edu.cn; ; ; ; ;shannonai.com; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;4;0+1;4", + "aff_unique_norm": "Zhejiang University;Shannon.AI;Chinese Academy of Sciences;Sony;Nanyang Technological University", + "aff_unique_dep": ";;;Sony AI;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.shannon.ai;https://www.cas.cn;https://www.sony.com;https://www.ntu.edu.sg", + "aff_unique_abbr": "ZJU;Shannon.AI;CAS;Sony AI;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2;3;0+1;3", + "aff_country_unique": "China;United States;Japan;Singapore" + }, + { + "id": "article-26758", + "title": "Defending from Physically-Realizable Adversarial Attacks through Internal Over-Activation Analysis", + "track": "aaai special track", + "status": "Technical", + "abstract": "This work presents Z-Mask, an effective and deterministic strategy to improve the adversarial robustness of convolutional networks against physically-realizable adversarial attacks.\nThe presented defense relies on specific Z-score analysis performed on the internal network features to detect and mask the pixels corresponding to adversarial objects in the input image. To this end, spatially contiguous activations are examined in shallow and deep layers to suggest potential adversarial regions. Such proposals are then aggregated through a multi-thresholding mechanism.\nThe effectiveness of Z-Mask is evaluated with an extensive set of experiments carried out on models for semantic segmentation and object detection. The evaluation is performed with both digital patches added to the input images and printed patches in the real world.\nThe results confirm that Z-Mask outperforms the state-of-the-art methods in terms of detection accuracy and overall performance of the networks under attack.\nFurthermore, Z-Mask preserves its robustness against defense-aware attacks, making it suitable for safe and secure AI applications.", + "primary_area": "safe and robust ai", + "author": "Giulio Rossolini; Federico Nesti; Fabio Brau; Alessandro Biondi; Giorgio Buttazzo", + "authorids": "", + "aff": "Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa, Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa, Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa, Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa, Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa, Italy", + "bibtex": "@article{Rossolini_Nesti_Brau_Biondi_Buttazzo_2023, title={Defending from Physically-Realizable Adversarial Attacks through Internal Over-Activation Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26758}, DOI={10.1609/aaai.v37i12.26758}, abstractNote={This work presents Z-Mask, an effective and deterministic strategy to improve the adversarial robustness of convolutional networks against physically-realizable adversarial attacks.\nThe presented defense relies on specific Z-score analysis performed on the internal network features to detect and mask the pixels corresponding to adversarial objects in the input image. To this end, spatially contiguous activations are examined in shallow and deep layers to suggest potential adversarial regions. Such proposals are then aggregated through a multi-thresholding mechanism.\nThe effectiveness of Z-Mask is evaluated with an extensive set of experiments carried out on models for semantic segmentation and object detection. The evaluation is performed with both digital patches added to the input images and printed patches in the real world.\nThe results confirm that Z-Mask outperforms the state-of-the-art methods in terms of detection accuracy and overall performance of the networks under attack.\nFurthermore, Z-Mask preserves its robustness against defense-aware attacks, making it suitable for safe and secure AI applications.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rossolini, Giulio and Nesti, Federico and Brau, Fabio and Biondi, Alessandro and Buttazzo, Giorgio}, year={2023}, month={Jun.}, pages={15064-15072} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26758/26530", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26758", + "pdf_size": 9655495, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8121144121187503071&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it", + "email": "santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Scuola Superiore Sant\u2019Anna", + "aff_unique_dep": "Department of Excellence in Robotics and AI", + "aff_unique_url": "https://www.sssup.it", + "aff_unique_abbr": "SSSA", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Pisa", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25179", + "title": "Delving Deep into Pixel Alignment Feature for Accurate Multi-View Human Mesh Recovery", + "track": "main", + "status": "Technical", + "abstract": "Regression-based methods have shown high efficiency and effectiveness for multi-view human mesh recovery. The key components of a typical regressor lie in the feature extraction of input views and the fusion of multi-view features. In this paper, we present Pixel-aligned Feedback Fusion (PaFF) for accurate yet efficient human mesh recovery from multi-view images. PaFF is an iterative regression framework that performs feature extraction and fusion alternately. At each iteration, PaFF extracts pixel-aligned feedback features from each input view according to the reprojection of the current estimation and fuses them together with respect to each vertex of the downsampled mesh. In this way, our regressor can not only perceive the misalignment status of each view from the feedback features but also correct the mesh parameters more effectively based on the feature fusion on mesh vertices. Additionally, our regressor disentangles the global orientation and translation of the body mesh from the estimation of mesh parameters such that the camera parameters of input views can be better utilized in the regression process. The efficacy of our method is validated in the Human3.6M dataset via comprehensive ablation experiments, where PaFF achieves 33.02 MPJPE and brings significant improvements over the previous best solutions by more than 29%. The project page with code and video results can be found at https://kairobo.github.io/PaFF/.", + "primary_area": "computer vision i", + "author": "Kai Jia; Hongwen Zhang; Liang An; Yebin Liu", + "authorids": "", + "aff": "Department of Automation, Tsinghua University, Beijing, China; Department of Automation, Tsinghua University, Beijing, China; Department of Automation, Tsinghua University, Beijing, China; Department of Automation, Tsinghua University, Beijing, China", + "bibtex": "@article{Jia_Zhang_An_Liu_2023, title={Delving Deep into Pixel Alignment Feature for Accurate Multi-View Human Mesh Recovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25179}, DOI={10.1609/aaai.v37i1.25179}, abstractNote={Regression-based methods have shown high efficiency and effectiveness for multi-view human mesh recovery. The key components of a typical regressor lie in the feature extraction of input views and the fusion of multi-view features. In this paper, we present Pixel-aligned Feedback Fusion (PaFF) for accurate yet efficient human mesh recovery from multi-view images. PaFF is an iterative regression framework that performs feature extraction and fusion alternately. At each iteration, PaFF extracts pixel-aligned feedback features from each input view according to the reprojection of the current estimation and fuses them together with respect to each vertex of the downsampled mesh. In this way, our regressor can not only perceive the misalignment status of each view from the feedback features but also correct the mesh parameters more effectively based on the feature fusion on mesh vertices. Additionally, our regressor disentangles the global orientation and translation of the body mesh from the estimation of mesh parameters such that the camera parameters of input views can be better utilized in the regression process. The efficacy of our method is validated in the Human3.6M dataset via comprehensive ablation experiments, where PaFF achieves 33.02 MPJPE and brings significant improvements over the previous best solutions by more than 29%. The project page with code and video results can be found at https://kairobo.github.io/PaFF/.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Kai and Zhang, Hongwen and An, Liang and Liu, Yebin}, year={2023}, month={Jun.}, pages={989-997} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25179/24951", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25179", + "pdf_size": 6608657, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3293973517831598514&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "umich.edu;mail.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn", + "email": "umich.edu;mail.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn", + "github": "", + "project": "https://kairobo.github.io/PaFF/", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Department of Automation", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26331", + "title": "Delving into the Adversarial Robustness of Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "In Federated Learning (FL), models are as fragile as centrally trained models against adversarial examples. However, the adversarial robustness of federated learning remains largely unexplored. This paper casts light on the challenge of adversarial robustness of federated learning. To facilitate a better understanding of the adversarial vulnerability of the existing FL methods, we conduct comprehensive robustness evaluations on various attacks and adversarial training methods. Moreover, we reveal the negative impacts induced by directly adopting adversarial training in FL, which seriously hurts the test accuracy, especially in non-IID settings. In this work, we propose a novel algorithm called Decision Boundary based Federated Adversarial Training (DBFAT), which consists of two components (local re-weighting and global regularization) to improve both accuracy and robustness of FL systems. Extensive experiments on multiple datasets demonstrate that DBFAT consistently outperforms other baselines under both IID and non-IID settings.", + "primary_area": "machine learning iv", + "author": "Jie Zhang; Bo Li; Chen Chen; Lingjuan Lyu; Shuang Wu; Shouhong Ding; Chao Wu", + "authorids": "", + "aff": "Zhejiang University; Youtu Lab, Tencent; Sony AI; Youtu Lab, Tencent; Youtu Lab, Tencent; Youtu Lab, Tencent; Zhejiang University", + "bibtex": "@article{Zhang_Li_Chen_Lyu_Wu_Ding_Wu_2023, title={Delving into the Adversarial Robustness of Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26331}, DOI={10.1609/aaai.v37i9.26331}, abstractNote={In Federated Learning (FL), models are as fragile as centrally trained models against adversarial examples. However, the adversarial robustness of federated learning remains largely unexplored. This paper casts light on the challenge of adversarial robustness of federated learning. To facilitate a better understanding of the adversarial vulnerability of the existing FL methods, we conduct comprehensive robustness evaluations on various attacks and adversarial training methods. Moreover, we reveal the negative impacts induced by directly adopting adversarial training in FL, which seriously hurts the test accuracy, especially in non-IID settings. In this work, we propose a novel algorithm called Decision Boundary based Federated Adversarial Training (DBFAT), which consists of two components (local re-weighting and global regularization) to improve both accuracy and robustness of FL systems. Extensive experiments on multiple datasets demonstrate that DBFAT consistently outperforms other baselines under both IID and non-IID settings.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jie and Li, Bo and Chen, Chen and Lyu, Lingjuan and Wu, Shuang and Ding, Shouhong and Wu, Chao}, year={2023}, month={Jun.}, pages={11245-11253} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26331/26103", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26331", + "pdf_size": 316985, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16470839913423507405&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;tencent.com;sony.com;sony.com;tencent.com;tencent.com;zju.edu.cn", + "email": "zju.edu.cn;tencent.com;sony.com;sony.com;tencent.com;tencent.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;1;1;0", + "aff_unique_norm": "Zhejiang University;Tencent;Sony", + "aff_unique_dep": ";Youtu Lab;Sony AI", + "aff_unique_url": "https://www.zju.edu.cn;https://www.tencent.com;https://www.sony.com", + "aff_unique_abbr": "ZJU;Tencent;Sony AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0;0", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-27085", + "title": "Demo Alleviate: Demonstrating Artificial Intelligence Enabled Virtual Assistance for Telehealth: The Mental Health Case", + "track": "demonstrations", + "status": "Technical", + "abstract": "After the pandemic, artificial intelligence (AI) powered support for mental health care has become increasingly important. The breadth and complexity of significant challenges required to provide adequate care involve:\n(a) Personalized patient understanding, (b) Safety-constrained and medically validated chatbot patient interactions, and (c) Support for continued feedback-based refinements in design using chatbot-patient interactions. \nWe propose Alleviate, a chatbot designed to assist patients suffering from mental health challenges with personalized care and assist clinicians with understanding their patients better. Alleviate draws from an array of publicly available clinically valid mental-health texts and databases, allowing Alleviate to make medically sound and informed decisions. In addition, Alleviate's modular design and explainable decision-making lends itself to robust and continued feedback-based refinements to its design. In this paper, we explain the different modules of Alleviate and submit a short video demonstrating Alleviate's capabilities to help patients and clinicians understand each other better to facilitate optimal care strategies.", + "primary_area": "", + "author": "Kaushik Roy; Vedant Khandelwal; Raxit Goswami; Nathan Dolbir; Jinendra Malekar; Amit Sheth", + "authorids": "", + "aff": "Artificial Intelligence Institute, University of South Carolina; Artificial Intelligence Institute, University of South Carolina; Artificial Intelligence Institute, University of South Carolina; Artificial Intelligence Institute, University of South Carolina; Artificial Intelligence Institute, University of South Carolina; Artificial Intelligence Institute, University of South Carolina", + "bibtex": "@article{Roy_Khandelwal_Goswami_Dolbir_Malekar_Sheth_2024, title={Demo Alleviate: Demonstrating Artificial Intelligence Enabled Virtual Assistance for Telehealth: The Mental Health Case}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27085}, DOI={10.1609/aaai.v37i13.27085}, abstractNote={After the pandemic, artificial intelligence (AI) powered support for mental health care has become increasingly important. The breadth and complexity of significant challenges required to provide adequate care involve:\n(a) Personalized patient understanding, (b) Safety-constrained and medically validated chatbot patient interactions, and (c) Support for continued feedback-based refinements in design using chatbot-patient interactions. We propose Alleviate, a chatbot designed to assist patients suffering from mental health challenges with personalized care and assist clinicians with understanding their patients better. Alleviate draws from an array of publicly available clinically valid mental-health texts and databases, allowing Alleviate to make medically sound and informed decisions. In addition, Alleviate\u2019s modular design and explainable decision-making lends itself to robust and continued feedback-based refinements to its design. In this paper, we explain the different modules of Alleviate and submit a short video demonstrating Alleviate\u2019s capabilities to help patients and clinicians understand each other better to facilitate optimal care strategies.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Roy, Kaushik and Khandelwal, Vedant and Goswami, Raxit and Dolbir, Nathan and Malekar, Jinendra and Sheth, Amit}, year={2024}, month={Jul.}, pages={16479-16481} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27085/26857", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27085", + "pdf_size": 702774, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2530632337934389966&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "email.sc.edu;email.sc.edu;email.sc.edu;email.sc.edu;mailbox.sc.edu;sc.edu", + "email": "email.sc.edu;email.sc.edu;email.sc.edu;email.sc.edu;mailbox.sc.edu;sc.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of South Carolina", + "aff_unique_dep": "Artificial Intelligence Institute", + "aff_unique_url": "https://www.sc.edu", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26961", + "title": "Demystify the Gravity Well in the Optimization Landscape (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We provide both empirical and theoretical insights to demystify the gravity well phenomenon in the optimization landscape. We start from describe the problem setup and theoretical results (an escape time lower bound) of the Softmax Gravity Well (SGW) in the literature. Then we move toward the understanding of a recent observation called ASR gravity well. We provide an explanation of why normal distribution with high variance can lead to suboptimal plateaus from an energy function point of view. We also contribute to the empirical insights of curriculum learning by comparison of policy initialization by different normal distributions. Furthermore, we provide the ASR escape time lower bound to understand the ASR gravity well theoretically. Future work includes more specific modeling of the reward as a function of time and quantitative evaluation of normal distribution\u2019s influence on policy initialization.", + "primary_area": "", + "author": "Jason Xiaotian Dou; Runxue Bao; Susan Song; Shuran Yang; Yanfu Zhang; Paul Pu Liang; Haiyi Harry Mao", + "authorids": "", + "aff": "University of Pittsburgh; University of Pittsburgh; Carnegie Mellon University; University of California, Berkeley; University of Pittsburgh; Carnegie Mellon University; University of Pittsburgh", + "bibtex": "@article{Dou_Bao_Song_Yang_Zhang_Liang_Mao_2024, title={Demystify the Gravity Well in the Optimization Landscape (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26961}, DOI={10.1609/aaai.v37i13.26961}, abstractNote={We provide both empirical and theoretical insights to demystify the gravity well phenomenon in the optimization landscape. We start from describe the problem setup and theoretical results (an escape time lower bound) of the Softmax Gravity Well (SGW) in the literature. Then we move toward the understanding of a recent observation called ASR gravity well. We provide an explanation of why normal distribution with high variance can lead to suboptimal plateaus from an energy function point of view. We also contribute to the empirical insights of curriculum learning by comparison of policy initialization by different normal distributions. Furthermore, we provide the ASR escape time lower bound to understand the ASR gravity well theoretically. Future work includes more specific modeling of the reward as a function of time and quantitative evaluation of normal distribution\u2019s influence on policy initialization.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dou, Jason Xiaotian and Bao, Runxue and Song, Susan and Yang, Shuran and Zhang, Yanfu and Liang, Paul Pu and Mao, Haiyi Harry}, year={2024}, month={Jul.}, pages={16202-16203} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26961/26733", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26961", + "pdf_size": 1218987, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=550770107116727579&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com; ; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;0;1;0", + "aff_unique_norm": "University of Pittsburgh;Carnegie Mellon University;University of California, Berkeley", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.pitt.edu;https://www.cmu.edu;https://www.berkeley.edu", + "aff_unique_abbr": "Pitt;CMU;UC Berkeley", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Berkeley", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26022", + "title": "Demystifying Randomly Initialized Networks for Evaluating Generative Models", + "track": "main", + "status": "Technical", + "abstract": "Evaluation of generative models is mostly based on the comparison between the estimated distribution and the ground truth distribution in a certain feature space. To embed samples into informative features, previous works often use convolutional neural networks optimized for classification, which is criticized by recent studies. Therefore, various feature spaces have been explored to discover alternatives. Among them, a surprising approach is to use a randomly initialized neural network for feature embedding. However, the fundamental basis to employ the random features has not been sufficiently justified. In this paper, we rigorously investigate the feature space of models with random weights in comparison to that of trained models. Furthermore, we provide an empirical evidence to choose networks for random features to obtain consistent and reliable results. Our results indicate that the features from random networks can evaluate generative models well similarly to those from trained networks, and furthermore, the two types of features can be used together in a complementary way.", + "primary_area": "machine learning ii", + "author": "Junghyuk Lee; Jun-Hyuk Kim; Jong-Seok Lee", + "authorids": "", + "aff": "Yonsei University, South Korea; Yonsei University, South Korea; Yonsei University, South Korea", + "bibtex": "@article{Lee_Kim_Lee_2023, title={Demystifying Randomly Initialized Networks for Evaluating Generative Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26022}, DOI={10.1609/aaai.v37i7.26022}, abstractNote={Evaluation of generative models is mostly based on the comparison between the estimated distribution and the ground truth distribution in a certain feature space. To embed samples into informative features, previous works often use convolutional neural networks optimized for classification, which is criticized by recent studies. Therefore, various feature spaces have been explored to discover alternatives. Among them, a surprising approach is to use a randomly initialized neural network for feature embedding. However, the fundamental basis to employ the random features has not been sufficiently justified. In this paper, we rigorously investigate the feature space of models with random weights in comparison to that of trained models. Furthermore, we provide an empirical evidence to choose networks for random features to obtain consistent and reliable results. Our results indicate that the features from random networks can evaluate generative models well similarly to those from trained networks, and furthermore, the two types of features can be used together in a complementary way.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Junghyuk and Kim, Jun-Hyuk and Lee, Jong-Seok}, year={2023}, month={Jun.}, pages={8482-8490} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26022/25794", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26022", + "pdf_size": 5023210, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=919327016872114309&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Yonsei University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.yonsei.ac.kr", + "aff_unique_abbr": "Yonsei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26324", + "title": "Denoising Multi-Similarity Formulation: A Self-Paced Curriculum-Driven Approach for Robust Metric Learning", + "track": "main", + "status": "Technical", + "abstract": "Deep Metric Learning (DML) is a group of techniques that aim to measure the similarity between objects through the neural network. Although the number of DML methods has rapidly increased in recent years, most previous studies cannot effectively handle noisy data, which commonly exists in practical applications and often leads to serious performance deterioration. To overcome this limitation, in this paper, we build a connection between noisy samples and hard samples in the framework of self-paced learning, and propose a Balanced Self-Paced Metric Learning (BSPML) algorithm with a denoising multi-similarity formulation, where noisy samples are treated as extremely hard samples and adaptively excluded from the model training by sample weighting. Especially, due to the pairwise relationship and a new balance regularization term, the sub-problem w.r.t. sample weights is a nonconvex quadratic function. To efficiently solve this nonconvex quadratic problem, we propose a doubly stochastic projection coordinate gradient algorithm. Importantly, we theoretically prove the convergence not only for the doubly stochastic projection coordinate gradient algorithm, but also for our BSPML algorithm. Experimental results on several standard data sets demonstrate that our BSPML algorithm has better generalization ability and robustness than the state-of-the-art robust DML approaches.", + "primary_area": "machine learning iv", + "author": "Chenkang Zhang; Lei Luo; Bin Gu", + "authorids": "", + "aff": "School of Computer and Software, Nanjing University of Information Science and Technology, P.R.China; School of Computer Science and Engineering, Nanjing University of Science and Technology, P.R.China; School of Computer and Software, Nanjing University of Information Science and Technology, P.R.China + MBZUAI, United Arab Emirates", + "bibtex": "@article{Zhang_Luo_Gu_2023, title={Denoising Multi-Similarity Formulation: A Self-Paced Curriculum-Driven Approach for Robust Metric Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26324}, DOI={10.1609/aaai.v37i9.26324}, abstractNote={Deep Metric Learning (DML) is a group of techniques that aim to measure the similarity between objects through the neural network. Although the number of DML methods has rapidly increased in recent years, most previous studies cannot effectively handle noisy data, which commonly exists in practical applications and often leads to serious performance deterioration. To overcome this limitation, in this paper, we build a connection between noisy samples and hard samples in the framework of self-paced learning, and propose a Balanced Self-Paced Metric Learning (BSPML) algorithm with a denoising multi-similarity formulation, where noisy samples are treated as extremely hard samples and adaptively excluded from the model training by sample weighting. Especially, due to the pairwise relationship and a new balance regularization term, the sub-problem w.r.t. sample weights is a nonconvex quadratic function. To efficiently solve this nonconvex quadratic problem, we propose a doubly stochastic projection coordinate gradient algorithm. Importantly, we theoretically prove the convergence not only for the doubly stochastic projection coordinate gradient algorithm, but also for our BSPML algorithm. Experimental results on several standard data sets demonstrate that our BSPML algorithm has better generalization ability and robustness than the state-of-the-art robust DML approaches.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chenkang and Luo, Lei and Gu, Bin}, year={2023}, month={Jun.}, pages={11183-11191} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26324/26096", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26324", + "pdf_size": 786467, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6696353157300437090&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "nuist.edu.cn;gmail.com;gmail.com", + "email": "nuist.edu.cn;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "Nanjing University of Information Science and Technology;Nanjing University of Science and Technology;Mohamed Bin Zayed University of Artificial Intelligence", + "aff_unique_dep": "School of Computer and Software;School of Computer Science and Engineering;", + "aff_unique_url": "http://www.nuist.edu.cn;http://www.nust.edu.cn;https://www.mbzuai.ac.ae", + "aff_unique_abbr": ";NUST;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "China;United Arab Emirates" + }, + { + "id": "article-26508", + "title": "Denoising Pre-training for Machine Translation Quality Estimation with Curriculum Learning", + "track": "main", + "status": "Technical", + "abstract": "Quality estimation (QE) aims to assess the quality of machine translations when reference translations are unavailable. QE plays a crucial role in many real-world applications of machine translation. Because labeled QE data are usually limited in scale, recent research, such as DirectQE, pre-trains QE models with pseudo QE data and obtains remarkable performance. However, there tends to be inevitable noise in the pseudo data, hindering models from learning QE accurately. Our study shows that the noise mainly comes from the differences between pseudo and real translation outputs. To handle this problem, we propose CLQE, a denoising pre-training framework for QE based on curriculum learning. More specifically, we propose to measure the degree of noise in the pseudo QE data with some metrics based on statistical or distributional features. With the guidance of these metrics, CLQE gradually pre-trains the QE model using data from cleaner to noisier. Experiments on various benchmarks reveal that CLQE outperforms DirectQE and other strong baselines. We also show that with our framework, pre-training converges faster than directly using the pseudo data. We make our CLQE code available (https://github.com/NJUNLP/njuqe).", + "primary_area": "speech natural language processing", + "author": "Xiang Geng; Yu Zhang; Jiahuan Li; Shujian Huang; Hao Yang; Shimin Tao; Yimeng Chen; Ning Xie; Jiajun Chen", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; Huawei Translation Services Center, Beijing, China; Huawei Translation Services Center, Beijing, China; Huawei Translation Services Center, Beijing, China; Huawei Translation Services Center, Beijing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China", + "bibtex": "@article{Geng_Zhang_Li_Huang_Yang_Tao_Chen_Xie_Chen_2023, title={Denoising Pre-training for Machine Translation Quality Estimation with Curriculum Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26508}, DOI={10.1609/aaai.v37i11.26508}, abstractNote={Quality estimation (QE) aims to assess the quality of machine translations when reference translations are unavailable. QE plays a crucial role in many real-world applications of machine translation. Because labeled QE data are usually limited in scale, recent research, such as DirectQE, pre-trains QE models with pseudo QE data and obtains remarkable performance. However, there tends to be inevitable noise in the pseudo data, hindering models from learning QE accurately. Our study shows that the noise mainly comes from the differences between pseudo and real translation outputs. To handle this problem, we propose CLQE, a denoising pre-training framework for QE based on curriculum learning. More specifically, we propose to measure the degree of noise in the pseudo QE data with some metrics based on statistical or distributional features. With the guidance of these metrics, CLQE gradually pre-trains the QE model using data from cleaner to noisier. Experiments on various benchmarks reveal that CLQE outperforms DirectQE and other strong baselines. We also show that with our framework, pre-training converges faster than directly using the pseudo data. We make our CLQE code available (https://github.com/NJUNLP/njuqe).}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Geng, Xiang and Zhang, Yu and Li, Jiahuan and Huang, Shujian and Yang, Hao and Tao, Shimin and Chen, Yimeng and Xie, Ning and Chen, Jiajun}, year={2023}, month={Jun.}, pages={12827-12835} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26508/26280", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26508", + "pdf_size": 374320, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16913754588851506380&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;nju.edu.cn", + "github": "https://github.com/NJUNLP/njuqe", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;1;1;1;1;0", + "aff_unique_norm": "Nanjing University;Huawei", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;Translation Services Center", + "aff_unique_url": "http://www.nju.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "Nanjing U;Huawei", + "aff_campus_unique_index": "0;0;0;0;1;1;1;1;0", + "aff_campus_unique": "Nanjing;Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25088", + "title": "Denoising after Entropy-Based Debiasing a Robust Training Method for Dataset Bias with Noisy Labels", + "track": "main", + "status": "Technical", + "abstract": "Improperly constructed datasets can result in inaccurate inferences. For instance, models trained on biased datasets perform poorly in terms of generalization (i.e., dataset bias). Recent debiasing techniques have successfully achieved generalization performance by underestimating easy-to-learn samples (i.e., bias-aligned samples) and highlighting difficult-to-learn samples (i.e., bias-conflicting samples). However, these techniques may fail owing to noisy labels, because the trained model recognizes noisy labels as difficult-to-learn and thus highlights them. In this study, we find that earlier approaches that used the provided labels to quantify difficulty could be affected by the small proportion of noisy labels. Furthermore, we find that running denoising algorithms before debiasing is ineffective because denoising algorithms reduce the impact of difficult-to-learn samples, including valuable bias-conflicting samples. Therefore, we propose an approach called denoising after entropy-based debiasing, i.e., DENEB, which has three main stages. (1) The prejudice model is trained by emphasizing (bias-aligned, clean) samples, which are selected using a Gaussian Mixture Model. (2) Using the per-sample entropy from the output of the prejudice model, the sampling probability of each sample that is proportional to the entropy is computed. (3) The final model is trained using existing denoising algorithms with the mini-batches constructed by following the computed sampling probability. Compared to existing debiasing and denoising algorithms, our method achieves better debiasing performance on multiple benchmarks.", + "primary_area": "computer vision i", + "author": "Sumyeong Ahn; Se-Young Yun", + "authorids": "", + "aff": "Kim Jaechul Graduate School of AI, KAIST, Seoul, Republic of Korea; Kim Jaechul Graduate School of AI, KAIST, Seoul, Republic of Korea", + "bibtex": "@article{Ahn_Yun_2023, title={Denoising after Entropy-Based Debiasing a Robust Training Method for Dataset Bias with Noisy Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25088}, DOI={10.1609/aaai.v37i1.25088}, abstractNote={Improperly constructed datasets can result in inaccurate inferences. For instance, models trained on biased datasets perform poorly in terms of generalization (i.e., dataset bias). Recent debiasing techniques have successfully achieved generalization performance by underestimating easy-to-learn samples (i.e., bias-aligned samples) and highlighting difficult-to-learn samples (i.e., bias-conflicting samples). However, these techniques may fail owing to noisy labels, because the trained model recognizes noisy labels as difficult-to-learn and thus highlights them. In this study, we find that earlier approaches that used the provided labels to quantify difficulty could be affected by the small proportion of noisy labels. Furthermore, we find that running denoising algorithms before debiasing is ineffective because denoising algorithms reduce the impact of difficult-to-learn samples, including valuable bias-conflicting samples. Therefore, we propose an approach called denoising after entropy-based debiasing, i.e., DENEB, which has three main stages. (1) The prejudice model is trained by emphasizing (bias-aligned, clean) samples, which are selected using a Gaussian Mixture Model. (2) Using the per-sample entropy from the output of the prejudice model, the sampling probability of each sample that is proportional to the entropy is computed. (3) The final model is trained using existing denoising algorithms with the mini-batches constructed by following the computed sampling probability. Compared to existing debiasing and denoising algorithms, our method achieves better debiasing performance on multiple benchmarks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ahn, Sumyeong and Yun, Se-Young}, year={2023}, month={Jun.}, pages={169-177} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25088/24860", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25088", + "pdf_size": 494843, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14154544690690053774&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "Kim Jaechul Graduate School of AI", + "aff_unique_url": "https://www.kaist.edu", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-25415", + "title": "DesNet: Decomposed Scale-Consistent Network for Unsupervised Depth Completion", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised depth completion aims to recover dense depth from the sparse one without using the ground-truth annotation. Although depth measurement obtained from LiDAR is usually sparse, it contains valid and real distance information, i.e., scale-consistent absolute depth values. Meanwhile, scale-agnostic counterparts seek to estimate relative depth and have achieved impressive performance. To leverage both the inherent characteristics, we thus suggest to model scale-consistent depth upon unsupervised scale-agnostic frameworks. Specifically, we propose the decomposed scale-consistent learning (DSCL) strategy, which disintegrates the absolute depth into relative depth prediction and global scale estimation, contributing to individual learning benefits. But unfortunately, most existing unsupervised scale-agnostic frameworks heavily suffer from depth holes due to the extremely sparse depth input and weak supervisory signal. To tackle this issue, we introduce the global depth guidance (GDG) module, which attentively propagates dense depth reference into the sparse target via novel dense-to-sparse attention. Extensive experiments show the superiority of our method on outdoor KITTI, ranking 1st and outperforming the best KBNet more than 12% in RMSE. Additionally, our approach achieves state-of-the-art performance on indoor NYUv2 benchmark as well.", + "primary_area": "computer vision iii", + "author": "Zhiqiang Yan; Kun Wang; Xiang Li; Zhenyu Zhang; Jun Li; Jian Yang", + "authorids": "", + "aff": "PCA Lab, Nanjing University of Science and Technology, China; PCA Lab, Nanjing University of Science and Technology, China; PCA Lab, Nanjing University of Science and Technology, China; PCA Lab, Nanjing University of Science and Technology, China; PCA Lab, Nanjing University of Science and Technology, China; PCA Lab, Nanjing University of Science and Technology, China", + "bibtex": "@article{Yan_Wang_Li_Zhang_Li_Yang_2023, title={DesNet: Decomposed Scale-Consistent Network for Unsupervised Depth Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25415}, DOI={10.1609/aaai.v37i3.25415}, abstractNote={Unsupervised depth completion aims to recover dense depth from the sparse one without using the ground-truth annotation. Although depth measurement obtained from LiDAR is usually sparse, it contains valid and real distance information, i.e., scale-consistent absolute depth values. Meanwhile, scale-agnostic counterparts seek to estimate relative depth and have achieved impressive performance. To leverage both the inherent characteristics, we thus suggest to model scale-consistent depth upon unsupervised scale-agnostic frameworks. Specifically, we propose the decomposed scale-consistent learning (DSCL) strategy, which disintegrates the absolute depth into relative depth prediction and global scale estimation, contributing to individual learning benefits. But unfortunately, most existing unsupervised scale-agnostic frameworks heavily suffer from depth holes due to the extremely sparse depth input and weak supervisory signal. To tackle this issue, we introduce the global depth guidance (GDG) module, which attentively propagates dense depth reference into the sparse target via novel dense-to-sparse attention. Extensive experiments show the superiority of our method on outdoor KITTI, ranking 1st and outperforming the best KBNet more than 12% in RMSE. Additionally, our approach achieves state-of-the-art performance on indoor NYUv2 benchmark as well.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Zhiqiang and Wang, Kun and Li, Xiang and Zhang, Zhenyu and Li, Jun and Yang, Jian}, year={2023}, month={Jun.}, pages={3109-3117} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25415/25187", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25415", + "pdf_size": 1029406, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12249727867843759437&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;foxmail.com;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;foxmail.com;njust.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University of Science and Technology", + "aff_unique_dep": "PCA Lab", + "aff_unique_url": "http://www.nust.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25992", + "title": "Design Amortization for Bayesian Optimal Experimental Design", + "track": "main", + "status": "Technical", + "abstract": "Bayesian optimal experimental design is a sub-field of statistics focused on developing methods to make efficient use of experimental resources. Any potential design is evaluated in terms of a utility function, such as the (theoretically well-justified) expected information gain (EIG); unfortunately however, under most circumstances the EIG is intractable to evaluate. In this work we build off of successful variational approaches, which optimize a parameterized variational model with respect to bounds on the EIG. Past work focused on learning a new variational model from scratch for each new design considered. Here we present a novel neural architecture that allows experimenters to optimize a single variational model that can estimate the EIG for potentially infinitely many designs. To further improve computational efficiency, we also propose to train the variational model on a significantly cheaper-to-evaluate lower bound, and show empirically that the resulting model provides an excellent guide for more accurate, but expensive to evaluate bounds on the EIG. We demonstrate the effectiveness of our technique on generalized linear models, a class of statistical models that is widely used in the analysis of controlled experiments. Experiments show that our method is able to greatly improve accuracy over existing approximation strategies, and achieve these results with far better sample efficiency.", + "primary_area": "machine learning ii", + "author": "Noble Kennamer; Steven Walton; Alexander Ihler", + "authorids": "", + "aff": "Department of Computer Science, University of California Irvine; Department of Computer Science, University of Oregon; Department of Computer Science, University of California Irvine", + "bibtex": "@article{Kennamer_Walton_Ihler_2023, title={Design Amortization for Bayesian Optimal Experimental Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25992}, DOI={10.1609/aaai.v37i7.25992}, abstractNote={Bayesian optimal experimental design is a sub-field of statistics focused on developing methods to make efficient use of experimental resources. Any potential design is evaluated in terms of a utility function, such as the (theoretically well-justified) expected information gain (EIG); unfortunately however, under most circumstances the EIG is intractable to evaluate. In this work we build off of successful variational approaches, which optimize a parameterized variational model with respect to bounds on the EIG. Past work focused on learning a new variational model from scratch for each new design considered. Here we present a novel neural architecture that allows experimenters to optimize a single variational model that can estimate the EIG for potentially infinitely many designs. To further improve computational efficiency, we also propose to train the variational model on a significantly cheaper-to-evaluate lower bound, and show empirically that the resulting model provides an excellent guide for more accurate, but expensive to evaluate bounds on the EIG. We demonstrate the effectiveness of our technique on generalized linear models, a class of statistical models that is widely used in the analysis of controlled experiments. Experiments show that our method is able to greatly improve accuracy over existing approximation strategies, and achieve these results with far better sample efficiency.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kennamer, Noble and Walton, Steven and Ihler, Alexander}, year={2023}, month={Jun.}, pages={8220-8227} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25992/25764", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25992", + "pdf_size": 854946, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10106575070085656812&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "uci.edu; ; ", + "email": "uci.edu; ; ", + "github": "https://github.com/NobleKennamer/amortized_boed", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of California, Irvine;University of Oregon", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.uci.edu;https://www.uoregon.edu", + "aff_unique_abbr": "UCI;UO", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26872", + "title": "DetAIL: A Tool to Automatically Detect and Analyze Drift in Language", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Machine learning and deep learning-based decision making has become part of today's software. The goal of this work is to ensure that machine learning and deep learning-based systems are as trusted as traditional software. Traditional software is made dependable by following rigorous practice like static analysis, testing, debugging, verifying, and repairing throughout the development and maintenance life-cycle. Similarly for machine learning systems, we need to keep these models up to date so that their performance is not compromised. For this, current systems rely on scheduled re-training of these models as new data kicks in. In this work, we propose DetAIL, a tool to measure the data drift that takes place when new data kicks in so that one can adaptively re-train the models whenever re-training is actually required irrespective of schedules. In addition to that, we generate various explanations at sentence level and dataset level to capture why a given payload text has drifted.", + "primary_area": "innovative tools for enabling ai application", + "author": "Nishtha Madaan; Adithya Manjunatha; Hrithik Nambiar; Aviral Goel; Harivansh Kumar; Diptikalyan Saha; Srikanta Bedathur", + "authorids": "", + "aff": "IBM Research India+Indian Institute of Technology Delhi; Birla Institute of Technology and Science, Goa, India; Birla Institute of Technology and Science, Goa, India; Birla Institute of Technology and Science, Goa, India; IBM Watson Openscale, India; IBM Research India; Indian Institute of Technology Delhi, India", + "bibtex": "@article{Madaan_Manjunatha_Nambiar_Goel_Kumar_Saha_Bedathur_2024, title={DetAIL: A Tool to Automatically Detect and Analyze Drift in Language}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26872}, DOI={10.1609/aaai.v37i13.26872}, abstractNote={Machine learning and deep learning-based decision making has become part of today\u2019s software. The goal of this work is to ensure that machine learning and deep learning-based systems are as trusted as traditional software. Traditional software is made dependable by following rigorous practice like static analysis, testing, debugging, verifying, and repairing throughout the development and maintenance life-cycle. Similarly for machine learning systems, we need to keep these models up to date so that their performance is not compromised. For this, current systems rely on scheduled re-training of these models as new data kicks in. In this work, we propose DetAIL, a tool to measure the data drift that takes place when new data kicks in so that one can adaptively re-train the models whenever re-training is actually required irrespective of schedules. In addition to that, we generate various explanations at sentence level and dataset level to capture why a given payload text has drifted.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Madaan, Nishtha and Manjunatha, Adithya and Nambiar, Hrithik and Goel, Aviral and Kumar, Harivansh and Saha, Diptikalyan and Bedathur, Srikanta}, year={2024}, month={Jul.}, pages={15767-15773} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26872/26644", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26872", + "pdf_size": 1257818, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15380960095306281833&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "in.ibm.com;goa.bits-pilani.ac.in;goa.bits-pilani.ac.in;goa.bits-pilani.ac.in;in.ibm.com;in.ibm.com;cse.iitd.ac.in", + "email": "in.ibm.com;goa.bits-pilani.ac.in;goa.bits-pilani.ac.in;goa.bits-pilani.ac.in;in.ibm.com;in.ibm.com;cse.iitd.ac.in", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;2;2;3;0;1", + "aff_unique_norm": "IBM Research;Indian Institute of Technology Delhi;Birla Institute of Technology and Science;IBM Watson Openscale", + "aff_unique_dep": "Research;;;", + "aff_unique_url": "https://www.ibm.com/research/in;https://www.iitd.ac.in;https://www.bits-goa.ac.in;https://www.ibm.com/watson", + "aff_unique_abbr": "IBM;IIT Delhi;BITS Goa;IBM Watson", + "aff_campus_unique_index": "1;2;2;2;1", + "aff_campus_unique": ";Delhi;Goa", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26692", + "title": "Detecting Anomalous Networks of Opioid Prescribers and Dispensers in Prescription Drug Data", + "track": "aaai special track", + "status": "Technical", + "abstract": "The opioid overdose epidemic represents a serious public health crisis, with fatality rates rising considerably over the past several years. To help address the abuse of prescription opioids, state governments collect data on dispensed prescriptions, yet the use of these data is typically limited to manual searches. In this paper, we propose a novel graph-based framework for detecting anomalous opioid prescribing patterns in state Prescription Drug Monitoring Program (PDMP) data, which could aid governments in deterring opioid diversion and abuse. Specifically, we seek to identify connected networks of opioid prescribers and dispensers who engage in high-risk and possibly illicit activity. We develop and apply a novel extension of the Non-Parametric Heterogeneous Graph Scan (NPHGS) to two years of de-identified PDMP data from the state of Kansas, and find that NPHGS identifies subgraphs that are significantly more anomalous than those detected by other graph-based methods. NPHGS also reveals clusters of potentially illicit activity, which may strengthen state law enforcement and regulatory capabilities. Our paper is the first to demonstrate how prescription data can systematically identify anomalous opioid prescribers and dispensers, as well as illustrating the efficacy of a network-based approach. Additionally, our technical extensions to NPHGS offer both improved flexibility and graph density reduction, enabling the framework to be replicated across jurisdictions and extended to other problem domains.", + "primary_area": "ai for social impact", + "author": "Katie Rosman; Daniel B. Neill", + "authorids": "", + "aff": "Machine Learning for Good Laboratory, New York University; Machine Learning for Good Laboratory, New York University", + "bibtex": "@article{Rosman_Neill_2023, title={Detecting Anomalous Networks of Opioid Prescribers and Dispensers in Prescription Drug Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26692}, DOI={10.1609/aaai.v37i12.26692}, abstractNote={The opioid overdose epidemic represents a serious public health crisis, with fatality rates rising considerably over the past several years. To help address the abuse of prescription opioids, state governments collect data on dispensed prescriptions, yet the use of these data is typically limited to manual searches. In this paper, we propose a novel graph-based framework for detecting anomalous opioid prescribing patterns in state Prescription Drug Monitoring Program (PDMP) data, which could aid governments in deterring opioid diversion and abuse. Specifically, we seek to identify connected networks of opioid prescribers and dispensers who engage in high-risk and possibly illicit activity. We develop and apply a novel extension of the Non-Parametric Heterogeneous Graph Scan (NPHGS) to two years of de-identified PDMP data from the state of Kansas, and find that NPHGS identifies subgraphs that are significantly more anomalous than those detected by other graph-based methods. NPHGS also reveals clusters of potentially illicit activity, which may strengthen state law enforcement and regulatory capabilities. Our paper is the first to demonstrate how prescription data can systematically identify anomalous opioid prescribers and dispensers, as well as illustrating the efficacy of a network-based approach. Additionally, our technical extensions to NPHGS offer both improved flexibility and graph density reduction, enabling the framework to be replicated across jurisdictions and extended to other problem domains.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rosman, Katie and Neill, Daniel B.}, year={2023}, month={Jun.}, pages={14470-14477} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26692/26464", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26692", + "pdf_size": 186260, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:EE4lcfx9Kw8J:scholar.google.com/&scioq=Detecting+Anomalous+Networks+of+Opioid+Prescribers+and+Dispensers+in+Prescription+Drug+Data&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "nyu.edu;nyu.edu", + "email": "nyu.edu;nyu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "Machine Learning for Good Laboratory", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "New York", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26895", + "title": "Detecting Exclusive Language during Pair Programming", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "Inclusive team participation is one of the most important factors that aids effective collaboration and pair programming. In this paper, we investigated the ability of linguistic features and a transformer-based language model to detect exclusive and inclusive language. The task of detecting exclusive language was approached as a text classification problem. We created a research community resource consisting of a dataset of 40,490 labeled utterances obtained from three programming assignments involving 34 students pair programming in a remote environment. This research involves the first successful automated detection of exclusive language during pair programming. Additionally, this is the first work to perform a computational linguistic analysis on the verbal interaction common in the context of inclusive and exclusive language during pair programming.", + "primary_area": "", + "author": "Solomon Ubani; Rodney Nielsen; Helen Li", + "authorids": "", + "aff": "Department of Computer Science and Engineering, University of North Texas, USA; Department of Computer Science and Engineering, University of North Texas, USA; Department of Computer Science and Engineering, University of North Texas, USA", + "bibtex": "@article{Ubani_Nielsen_Li_2024, title={Detecting Exclusive Language during Pair Programming}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26895}, DOI={10.1609/aaai.v37i13.26895}, abstractNote={Inclusive team participation is one of the most important factors that aids effective collaboration and pair programming. In this paper, we investigated the ability of linguistic features and a transformer-based language model to detect exclusive and inclusive language. The task of detecting exclusive language was approached as a text classification problem. We created a research community resource consisting of a dataset of 40,490 labeled utterances obtained from three programming assignments involving 34 students pair programming in a remote environment. This research involves the first successful automated detection of exclusive language during pair programming. Additionally, this is the first work to perform a computational linguistic analysis on the verbal interaction common in the context of inclusive and exclusive language during pair programming.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ubani, Solomon and Nielsen, Rodney and Li, Helen}, year={2024}, month={Jul.}, pages={15964-15971} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26895/26667", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26895", + "pdf_size": 348425, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17828254019756352293&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "my.unt.edu;unt.edu;my.unt.edu", + "email": "my.unt.edu;unt.edu;my.unt.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of North Texas", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.unt.edu", + "aff_unique_abbr": "UNT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25623", + "title": "Detecting Multivariate Time Series Anomalies with Zero Known Label", + "track": "main", + "status": "Technical", + "abstract": "Multivariate time series anomaly detection has been extensively studied under the one-class classification setting, where a training dataset with all normal instances is required. However, preparing such a dataset is very laborious since each single data instance should be fully guaranteed to be normal. It is, therefore, desired to explore multivariate time series anomaly detection methods based on the dataset without any label knowledge. In this paper, we propose MTGFlow, an unsupervised anomaly detection approach forMultivariate Time series anomaly detection via dynamic Graph and entityaware normalizing Flow, leaning only on a widely accepted hypothesis that abnormal instances exhibit sparse densities than the normal. However, the complex interdependencies among entities and the diverse inherent characteristics of each entity pose significant challenges to density estimation, let alone to detect anomalies based on the estimated possibility distribution. To tackle these problems, we propose to learn the mutual and dynamic relations among entities via a graph structure learning model, which helps to model the accurate distribution of multivariate time series. Moreover, taking account of distinct characteristics of the individual entities, an entity-aware normalizing flow is developed to describe each entity into a parameterized normal distribution, thereby producing fine-grained density estimation. Incorporating these two strategies, MTGFlow achieves superior anomaly detection performance. Experiments on five public datasets with seven baselines are conducted, MTGFlow outperforms the SOTA methods by up to 5.0 AUROC%.", + "primary_area": "data mining and knowledge management", + "author": "Qihang Zhou; Jiming Chen; Haoyu Liu; Shibo He; Wenchao Meng", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Zhejiang University + NetEase Fuxi AI Lab; Zhejiang University + Key Laboratory of Collaborative Sensing and Autonomous Unmanned Systems of Zhejiang Province; Zhejiang University", + "bibtex": "@article{Zhou_Chen_Liu_He_Meng_2023, title={Detecting Multivariate Time Series Anomalies with Zero Known Label}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25623}, DOI={10.1609/aaai.v37i4.25623}, abstractNote={Multivariate time series anomaly detection has been extensively studied under the one-class classification setting, where a training dataset with all normal instances is required. However, preparing such a dataset is very laborious since each single data instance should be fully guaranteed to be normal. It is, therefore, desired to explore multivariate time series anomaly detection methods based on the dataset without any label knowledge. In this paper, we propose MTGFlow, an unsupervised anomaly detection approach forMultivariate Time series anomaly detection via dynamic Graph and entityaware normalizing Flow, leaning only on a widely accepted hypothesis that abnormal instances exhibit sparse densities than the normal. However, the complex interdependencies among entities and the diverse inherent characteristics of each entity pose significant challenges to density estimation, let alone to detect anomalies based on the estimated possibility distribution. To tackle these problems, we propose to learn the mutual and dynamic relations among entities via a graph structure learning model, which helps to model the accurate distribution of multivariate time series. Moreover, taking account of distinct characteristics of the individual entities, an entity-aware normalizing flow is developed to describe each entity into a parameterized normal distribution, thereby producing fine-grained density estimation. Incorporating these two strategies, MTGFlow achieves superior anomaly detection performance. Experiments on five public datasets with seven baselines are conducted, MTGFlow outperforms the SOTA methods by up to 5.0 AUROC%.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Qihang and Chen, Jiming and Liu, Haoyu and He, Shibo and Meng, Wenchao}, year={2023}, month={Jun.}, pages={4963-4971} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25623/25395", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25623", + "pdf_size": 612334, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12988091309655552523&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;0+2;0", + "aff_unique_norm": "Zhejiang University;NetEase;Zhejiang Province Key Laboratory of Collaborative Sensing and Autonomous Unmanned Systems", + "aff_unique_dep": ";Fuxi AI Lab;Key Laboratory of Collaborative Sensing and Autonomous Unmanned Systems", + "aff_unique_url": "https://www.zju.edu.cn;https://www.163.com;", + "aff_unique_abbr": "ZJU;NetEase;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25554", + "title": "Detecting Sources of Healthcare Associated Infections", + "track": "main", + "status": "Technical", + "abstract": "Healthcare acquired infections (HAIs) (e.g., Methicillin-resistant Staphylococcus aureus infection) have complex transmission pathways, spreading not just via direct person-to-person contacts, but also via contaminated surfaces. Prior work in mathematical epidemiology has led to a class of models \u2013 which we call load sharing models \u2013 that provide a discrete-time, stochastic formalization of HAI-spread on temporal contact networks. The focus of this paper is the source detection problem for the load sharing model. The source detection problem has been studied extensively in SEIR type models, but this prior work does not apply to load sharing models.\nWe show that a natural formulation of the source detection problem for the load sharing model is computationally hard, even to approximate. We then present two alternate formulations that are much more tractable. The tractability of our problems depends crucially on the submodularity of the expected number of infections as a function of the source set. Prior techniques for showing submodularity, such as the \"live graph\" technique are not applicable for the load sharing model and our key technical contribution is to use a more sophisticated \"coupling\" technique to show the submodularity result. We propose algorithms for our two problem formulations by extending existing algorithmic results from submodular optimization and combining these with an expectation propagation heuristic for the load sharing model that leads to orders-of-magnitude speedup. We present experimental results on temporal contact networks based on fine-grained EMR data from three different hospitals. Our results on synthetic outbreaks on these networks show that our algorithms outperform baselines by up to 5.97 times. Furthermore, case studies based on hospital outbreaks of Clostridioides difficile infection show that our algorithms identify clinically meaningful sources.", + "primary_area": "data mining and knowledge management", + "author": "Hankyu Jang; Andrew Fu; Jiaming Cui; Methun Kamruzzaman; B. Aditya Prakash; Anil Vullikanti; Bijaya Adhikari; Sriram V. Pemmaraju", + "authorids": "", + "aff": "Department of Computer Science, University of Iowa; Department of Computer Science, University of Virginia; College of Computing, Georgia Institute of Technology; Biocomplexity Institute, University of Virginia; College of Computing, Georgia Institute of Technology; Department of Computer Science, University of Virginia+Biocomplexity Institute, University of Virginia; Department of Computer Science, University of Iowa; Department of Computer Science, University of Iowa", + "bibtex": "@article{Jang_Fu_Cui_Kamruzzaman_Prakash_Vullikanti_Adhikari_Pemmaraju_2023, title={Detecting Sources of Healthcare Associated Infections}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25554}, DOI={10.1609/aaai.v37i4.25554}, abstractNote={Healthcare acquired infections (HAIs) (e.g., Methicillin-resistant Staphylococcus aureus infection) have complex transmission pathways, spreading not just via direct person-to-person contacts, but also via contaminated surfaces. Prior work in mathematical epidemiology has led to a class of models \u2013 which we call load sharing models \u2013 that provide a discrete-time, stochastic formalization of HAI-spread on temporal contact networks. The focus of this paper is the source detection problem for the load sharing model. The source detection problem has been studied extensively in SEIR type models, but this prior work does not apply to load sharing models.\nWe show that a natural formulation of the source detection problem for the load sharing model is computationally hard, even to approximate. We then present two alternate formulations that are much more tractable. The tractability of our problems depends crucially on the submodularity of the expected number of infections as a function of the source set. Prior techniques for showing submodularity, such as the "live graph" technique are not applicable for the load sharing model and our key technical contribution is to use a more sophisticated "coupling" technique to show the submodularity result. We propose algorithms for our two problem formulations by extending existing algorithmic results from submodular optimization and combining these with an expectation propagation heuristic for the load sharing model that leads to orders-of-magnitude speedup. We present experimental results on temporal contact networks based on fine-grained EMR data from three different hospitals. Our results on synthetic outbreaks on these networks show that our algorithms outperform baselines by up to 5.97 times. Furthermore, case studies based on hospital outbreaks of Clostridioides difficile infection show that our algorithms identify clinically meaningful sources.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jang, Hankyu and Fu, Andrew and Cui, Jiaming and Kamruzzaman, Methun and Prakash, B. Aditya and Vullikanti, Anil and Adhikari, Bijaya and Pemmaraju, Sriram V.}, year={2023}, month={Jun.}, pages={4347-4355} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25554/25326", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25554", + "pdf_size": 1178773, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4234071562377888580&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "uiowa.edu;virginia.edu;gatech.edu;virginia.edu;gatech.edu;virginia.edu;uiowa.edu;uiowa.edu", + "email": "uiowa.edu;virginia.edu;gatech.edu;virginia.edu;gatech.edu;virginia.edu;uiowa.edu;uiowa.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;1;2;1+1;0;0", + "aff_unique_norm": "University of Iowa;University of Virginia;Georgia Institute of Technology", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;College of Computing", + "aff_unique_url": "https://www.uiowa.edu;https://www.virginia.edu;https://www.gatech.edu", + "aff_unique_abbr": "UIowa;UVA;Georgia Tech", + "aff_campus_unique_index": "1;1;", + "aff_campus_unique": ";Atlanta", + "aff_country_unique_index": "0;0;0;0;0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26840", + "title": "Detecting VoIP Data Streams: Approaches Using Hidden Representation Learning", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The use of voice-over-IP technology has rapidly expanded over the past several years, and has thus become a significant portion of traffic in the real, complex network environment. Deep packet inspection and middlebox technologies need to analyze call flows in order to perform network management, load-balancing, content monitoring, forensic analysis, and intelligence gathering. Because the session setup and management data can be sent on different ports or out of sync with VoIP call data over the Real-time Transport Protocol (RTP) with low latency, inspection software may miss calls or parts of calls. To solve this problem, we engineered two different deep learning models based on hidden representation learning. MAPLE, a matrix-based encoder which transforms packets into an image representation, uses convolutional neural networks to determine RTP packets from data flow. DATE is a density-analysis based tensor encoder which transforms packet data into a three-dimensional point cloud representation. We then perform density-based clustering over the point clouds as latent representations of the data, and classify packets as RTP or non-RTP based on their statistical clustering features. In this research, we show that these tools may allow a data collection and analysis pipeline to begin detecting and buffering RTP streams for later session association, solving the initial drop problem. MAPLE achieves over ninety-nine percent accuracy in RTP/non-RTP detection. The results of our experiments show that both models can not only classify RTP versus non-RTP packet streams, but could extend to other network traffic classification problems in real deployments of network analysis pipelines.", + "primary_area": "emerging applications of ai", + "author": "Maya Kapoor; Michael Napolitano; Jonathan Quance; Thomas Moyer; Siddharth Krishnan", + "authorids": "", + "aff": "Defense and Intelligence Sector, Parsons Corporation; Defense and Intelligence Sector, Parsons Corporation; Defense and Intelligence Sector, Parsons Corporation; University of North Carolina at Charlotte; University of North Carolina at Charlotte", + "bibtex": "@article{Kapoor_Napolitano_Quance_Moyer_Krishnan_2024, title={Detecting VoIP Data Streams: Approaches Using Hidden Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26840}, DOI={10.1609/aaai.v37i13.26840}, abstractNote={The use of voice-over-IP technology has rapidly expanded over the past several years, and has thus become a significant portion of traffic in the real, complex network environment. Deep packet inspection and middlebox technologies need to analyze call flows in order to perform network management, load-balancing, content monitoring, forensic analysis, and intelligence gathering. Because the session setup and management data can be sent on different ports or out of sync with VoIP call data over the Real-time Transport Protocol (RTP) with low latency, inspection software may miss calls or parts of calls. To solve this problem, we engineered two different deep learning models based on hidden representation learning. MAPLE, a matrix-based encoder which transforms packets into an image representation, uses convolutional neural networks to determine RTP packets from data flow. DATE is a density-analysis based tensor encoder which transforms packet data into a three-dimensional point cloud representation. We then perform density-based clustering over the point clouds as latent representations of the data, and classify packets as RTP or non-RTP based on their statistical clustering features. In this research, we show that these tools may allow a data collection and analysis pipeline to begin detecting and buffering RTP streams for later session association, solving the initial drop problem. MAPLE achieves over ninety-nine percent accuracy in RTP/non-RTP detection. The results of our experiments show that both models can not only classify RTP versus non-RTP packet streams, but could extend to other network traffic classification problems in real deployments of network analysis pipelines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kapoor, Maya and Napolitano, Michael and Quance, Jonathan and Moyer, Thomas and Krishnan, Siddharth}, year={2024}, month={Jul.}, pages={15519-15527} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26840/26612", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26840", + "pdf_size": 702741, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17866888981328626709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "parsons.com;parsons.com;parsons.com;uncc.edu;uncc.edu", + "email": "parsons.com;parsons.com;parsons.com;uncc.edu;uncc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;1", + "aff_unique_norm": "Parsons Corporation;University of North Carolina at Charlotte", + "aff_unique_dep": "Defense and Intelligence Sector;", + "aff_unique_url": "https://www.parsons.com;https://www.uncc.edu", + "aff_unique_abbr": "Parsons;UNCC", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Charlotte", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26551", + "title": "Detecting and Grounding Important Characters in Visual Stories", + "track": "main", + "status": "Technical", + "abstract": "Characters are essential to the plot of any story. Establishing the characters before writing a story can improve the clarity of the plot and the overall flow of the narrative. However, previous work on visual storytelling tends to focus on detecting objects in images and discovering relationships between them. In this approach, characters are not distinguished from other objects when they are fed into the generation pipeline. The result is a coherent sequence of events rather than a character-centric story. In order to address this limitation, we introduce the VIST-Character dataset, which provides rich character-centric annotations, including visual and textual co-reference chains and importance ratings for characters. Based on this dataset, we propose two new tasks: important character detection and character grounding in visual stories. For both tasks, we develop simple, unsupervised models based on distributional similarity and pre-trained vision-and-language models. Our new dataset, together with these models, can serve as the foundation for subsequent work on analysing and generating stories from a character-centric perspective.", + "primary_area": "speech natural language processing", + "author": "Danyang Liu; Frank Keller", + "authorids": "", + "aff": "Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh; Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh", + "bibtex": "@article{Liu_Keller_2023, title={Detecting and Grounding Important Characters in Visual Stories}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26551}, DOI={10.1609/aaai.v37i11.26551}, abstractNote={Characters are essential to the plot of any story. Establishing the characters before writing a story can improve the clarity of the plot and the overall flow of the narrative. However, previous work on visual storytelling tends to focus on detecting objects in images and discovering relationships between them. In this approach, characters are not distinguished from other objects when they are fed into the generation pipeline. The result is a coherent sequence of events rather than a character-centric story. In order to address this limitation, we introduce the VIST-Character dataset, which provides rich character-centric annotations, including visual and textual co-reference chains and importance ratings for characters. Based on this dataset, we propose two new tasks: important character detection and character grounding in visual stories. For both tasks, we develop simple, unsupervised models based on distributional similarity and pre-trained vision-and-language models. Our new dataset, together with these models, can serve as the foundation for subsequent work on analysing and generating stories from a character-centric perspective.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Danyang and Keller, Frank}, year={2023}, month={Jun.}, pages={13210-13218} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26551/26323", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26551", + "pdf_size": 1905123, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7829082175420381458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ed.ac.uk;inf.ed.ac.uk", + "email": "ed.ac.uk;inf.ed.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Edinburgh", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.ed.ac.uk", + "aff_unique_abbr": "Edinburgh", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Edinburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26904", + "title": "Develop AI Teaching and Learning Resources for Compulsory Education in China", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Artificial intelligence course has been required to take for compulsory education students in China. However, not all teachers and schools are fully prepared and ready. This is partially because of the lack of adequate teaching and learning resources, which requires a major expenditure of time and effort for schools and teachers to design and develop. To meet the challenge of lacking appropriate resources in teaching and learning AI from grade 1 to grade 9, we developed AI knowledge structure and instructional resources based on Chinese national curriculum for information science and technology. Our comprehensive AI syllabus contains 90 core concepts, 63 learning indicators, and 27 teaching and learning resources, which have been implemented. The resources have been taken as model courses in teacher training programs and an exemplary course has been implemented in primary schools that verified the effectiveness of our resources.", + "primary_area": "", + "author": "Jiachen Song; Jinglei Yu; Li Yan; Linan Zhang; Bei Liu; Yujin Zhang; Yu Lu", + "authorids": "", + "aff": "Advanced Innovation Center for Future Education, Faculty of Education, Beijing Normal University, Beijing, China; Advanced Innovation Center for Future Education, Faculty of Education, Beijing Normal University, Beijing, China; Advanced Innovation Center for Future Education, Faculty of Education, Beijing Normal University, Beijing, China; Liyuan Primary School (Liyuan Education Group), Shenzhen, China; Tencent Technology (Shenzhen) Company Limited, Shenzhen, China; Tencent Technology (Shenzhen) Company Limited, Shenzhen, China; Advanced Innovation Center for Future Education, Faculty of Education, Beijing Normal University, Beijing, China", + "bibtex": "@article{Song_Yu_Yan_Zhang_Liu_Zhang_Lu_2024, title={Develop AI Teaching and Learning Resources for Compulsory Education in China}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26904}, DOI={10.1609/aaai.v37i13.26904}, abstractNote={Artificial intelligence course has been required to take for compulsory education students in China. However, not all teachers and schools are fully prepared and ready. This is partially because of the lack of adequate teaching and learning resources, which requires a major expenditure of time and effort for schools and teachers to design and develop. To meet the challenge of lacking appropriate resources in teaching and learning AI from grade 1 to grade 9, we developed AI knowledge structure and instructional resources based on Chinese national curriculum for information science and technology. Our comprehensive AI syllabus contains 90 core concepts, 63 learning indicators, and 27 teaching and learning resources, which have been implemented. The resources have been taken as model courses in teacher training programs and an exemplary course has been implemented in primary schools that verified the effectiveness of our resources.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Jiachen and Yu, Jinglei and Yan, Li and Zhang, Linan and Liu, Bei and Zhang, Yujin and Lu, Yu}, year={2024}, month={Jul.}, pages={16033-16039} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26904/26676", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26904", + "pdf_size": 7009819, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13081394290829107909&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "bnu.edu.cn; ; ; ; ; ;bnu.edu.cn", + "email": "bnu.edu.cn; ; ; ; ; ;bnu.edu.cn", + "github": "", + "project": "https://coding.qq.com/ai/", + "author_num": 7, + "aff_unique_index": "0;0;0;1;2;2;0", + "aff_unique_norm": "Beijing Normal University;Liyuan Primary School;Tencent Technology", + "aff_unique_dep": "Faculty of Education;;", + "aff_unique_url": "https://www.bnu.edu.cn;;https://www.tencent.com", + "aff_unique_abbr": "BNU;;Tencent", + "aff_campus_unique_index": "0;0;0;2;2;0", + "aff_campus_unique": "Beijing;;Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26839", + "title": "Developing the Wheel Image Similarity Application with Deep Metric Learning: Hyundai Motor Company Case", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The global automobile market experiences quick changes in design preferences. In response to the demand shifts, manufacturers now try to apply new technologies to bring a novel design to market faster. In this paper, we introduce a novel application that performs a similarity verification task of wheel designs using an AI model and cloud computing technology. At Jan 2022, we successfully implemented the application to the wheel design process of Hyundai Motor Company\u2019s design team and shortened the similarity verification time by 90% to a maximum of 10 minutes. We believe that this study is the first to build a wheel image database and empirically prove that the cross-entropy loss does similar tasks as the pairwise losses do in the embedding space. As a result, we successfully automated Hyundai Motor\u2019s verification task of wheel design similarity. With a few clicks, the end-users in Hyundai Motor could take advantage of our application.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Kyung Pyo Kang; Ga Hyeon Jeong; Jeong Hoon Eom; Soon Beom Kwon; Jae Hong Park", + "authorids": "", + "aff": "KyungHee University; KyungHee University; KyungHee University; Hyundai Motor Company; KyungHee University", + "bibtex": "@article{Kang_Jeong_Eom_Kwon_Park_2024, title={Developing the Wheel Image Similarity Application with Deep Metric Learning: Hyundai Motor Company Case}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26839}, DOI={10.1609/aaai.v37i13.26839}, abstractNote={The global automobile market experiences quick changes in design preferences. In response to the demand shifts, manufacturers now try to apply new technologies to bring a novel design to market faster. In this paper, we introduce a novel application that performs a similarity verification task of wheel designs using an AI model and cloud computing technology. At Jan 2022, we successfully implemented the application to the wheel design process of Hyundai Motor Company\u2019s design team and shortened the similarity verification time by 90% to a maximum of 10 minutes. We believe that this study is the first to build a wheel image database and empirically prove that the cross-entropy loss does similar tasks as the pairwise losses do in the embedding space. As a result, we successfully automated Hyundai Motor\u2019s verification task of wheel design similarity. With a few clicks, the end-users in Hyundai Motor could take advantage of our application.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Kyung Pyo and Jeong, Ga Hyeon and Eom, Jeong Hoon and Kwon, Soon Beom and Park, Jae Hong}, year={2024}, month={Jul.}, pages={15512-15518} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26839/26611", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26839", + "pdf_size": 900876, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:4tN_4ieZ-r8J:scholar.google.com/&scioq=Developing+the+Wheel+Image+Similarity+Application+with+Deep+Metric+Learning:+Hyundai+Motor+Company+Case&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "khu.ac.kr;khu.ac.kr;khu.ac.kr;hyundai.com;khu.ac.kr", + "email": "khu.ac.kr;khu.ac.kr;khu.ac.kr;hyundai.com;khu.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Kyung Hee University;Hyundai Motor Company", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.khu.ac.kr;https://www.hyundai.com", + "aff_unique_abbr": "KHU;HMC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-27025", + "title": "Development of a Human-Agent Interaction System including Norm and Emotion in an Evacuation Situation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Agent-based modeling and simulation can provide a powerful test environment for crisis management scenarios. Human agent interaction has limitations in representing norms issued by an agent to a human agent that has emotions. In this study, we present an approach to the interaction between a virtual normative agent and a human agent in an evacuation scenario. Through simulation comparisons, it is shown that the method used in this study can more fully simulate the real-life out come of an emergency situation and also improves the au thenticity of the agent interaction.", + "primary_area": "", + "author": "Ephraim Sinyabe Pagou; Vivient Corneille Kamla; Igor Tchappi; Amro Najjar", + "authorids": "", + "aff": "University of Ngaoundere, PO Box 454, Cameroon; University of Ngaoundere, PO Box 454, Cameroon; University of Luxembourg, 4365 Esch-sur-Alzette, Luxembourg; Luxembourg Institute of Science and Technology, 4362 Esch-sur-Alzette, Luxembourg", + "bibtex": "@article{Sinyabe Pagou_Kamla_Tchappi_Najjar_2024, title={Development of a Human-Agent Interaction System including Norm and Emotion in an Evacuation Situation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27025}, DOI={10.1609/aaai.v37i13.27025}, abstractNote={Agent-based modeling and simulation can provide a powerful test environment for crisis management scenarios. Human agent interaction has limitations in representing norms issued by an agent to a human agent that has emotions. In this study, we present an approach to the interaction between a virtual normative agent and a human agent in an evacuation scenario. Through simulation comparisons, it is shown that the method used in this study can more fully simulate the real-life out come of an emergency situation and also improves the au thenticity of the agent interaction.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sinyabe Pagou, Ephraim and Kamla, Vivient Corneille and Tchappi, Igor and Najjar, Amro}, year={2024}, month={Jul.}, pages={16330-16331} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27025/26797", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27025", + "pdf_size": 205414, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:gAfR5y40i0MJ:scholar.google.com/&scioq=Development+of+a+Human-Agent+Interaction+System+including+Norm+and+Emotion+in+an+Evacuation+Situation+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;uni.lu;uni.lu", + "email": "gmail.com;gmail.com;uni.lu;uni.lu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "University of Ngaoundere;University of Luxembourg;Luxembourg Institute of Science and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": ";https://wwwen.unil.lu;https://www.list.lu", + "aff_unique_abbr": ";UniLu;LIST", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Esch-sur-Alzette", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "Cameroon;Luxembourg" + }, + { + "id": "article-25934", + "title": "DiFA: Differentiable Feature Acquisition", + "track": "main", + "status": "Technical", + "abstract": "Feature acquisition in predictive modeling is an important task in many practical applications. For example, in patient health prediction, we do not fully observe their personal features and need to dynamically select features to acquire. Our goal is to acquire a small subset of features that maximize prediction performance. Recently, some works reformulated feature acquisition as a Markov decision process and applied reinforcement learning (RL) algorithms, where the reward reflects both prediction performance and feature acquisition cost. However, RL algorithms only use zeroth-order information on the reward, which leads to slow empirical convergence, especially when there are many actions (number of features) to consider. For predictive modeling, it is possible to use first-order information on the reward, i.e., gradients, since we are often given an already collected dataset. Therefore, we propose differentiable feature acquisition (DiFA), which uses a differentiable representation of the feature selection policy to enable gradients to flow from the prediction loss to the policy parameters. We conduct extensive experiments on various real-world datasets and show that DiFA significantly outperforms existing feature acquisition methods when the number of features is large.", + "primary_area": "machine learning i", + "author": "Aritra Ghosh; Andrew Lan", + "authorids": "", + "aff": "University of Massachusetts Amherst; University of Massachusetts Amherst", + "bibtex": "@article{Ghosh_Lan_2023, title={DiFA: Differentiable Feature Acquisition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25934}, DOI={10.1609/aaai.v37i6.25934}, abstractNote={Feature acquisition in predictive modeling is an important task in many practical applications. For example, in patient health prediction, we do not fully observe their personal features and need to dynamically select features to acquire. Our goal is to acquire a small subset of features that maximize prediction performance. Recently, some works reformulated feature acquisition as a Markov decision process and applied reinforcement learning (RL) algorithms, where the reward reflects both prediction performance and feature acquisition cost. However, RL algorithms only use zeroth-order information on the reward, which leads to slow empirical convergence, especially when there are many actions (number of features) to consider. For predictive modeling, it is possible to use first-order information on the reward, i.e., gradients, since we are often given an already collected dataset. Therefore, we propose differentiable feature acquisition (DiFA), which uses a differentiable representation of the feature selection policy to enable gradients to flow from the prediction loss to the policy parameters. We conduct extensive experiments on various real-world datasets and show that DiFA significantly outperforms existing feature acquisition methods when the number of features is large.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosh, Aritra and Lan, Andrew}, year={2023}, month={Jun.}, pages={7705-7713} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25934/25706", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25934", + "pdf_size": 280790, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5467616096797325144&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.umass.edu;cs.umass.edu", + "email": "cs.umass.edu;cs.umass.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26619", + "title": "Dialogue Rewriting via Skeleton-Guided Generation", + "track": "main", + "status": "Technical", + "abstract": "Dialogue rewriting aims to transform multi-turn, context-dependent dialogues into well-formed, context-independent text for most NLP systems. Previous dialogue rewriting benchmarks and systems assume a fluent and informative utterance to rewrite. Unfortunately, dialogue utterances from real-world systems are frequently noisy and with various kinds of errors that can make them almost uninformative. In this paper, we first present Real-world Dialogue Rewriting Corpus (RealDia), a new benchmark to evaluate how well current dialogue rewriting systems can deal with real-world noisy and uninformative dialogue utterances. RealDia contains annotated multi-turn dialogues from real scenes with ASR errors, spelling errors, redundancies and other noises that are ignored by previous dialogue rewriting benchmarks. We show that previous dialogue rewriting approaches are neither effective nor data-efficient to resolve RealDia. Then this paper presents Skeleton-Guided Rewriter (SGR), which can resolve the task of dialogue rewriting via a skeleton-guided generation paradigm. Experiments show that RealDia is a much more challenging benchmark for real-world dialogue rewriting, and SGR can effectively resolve the task and outperform previous approaches by a large margin.", + "primary_area": "speech natural language processing", + "author": "Chunlei Xin; Hongyu Lin; Shan Wu; Xianpei Han; Bo Chen; Wen Dai; Shuai Chen; Bin Wang; Le Sun", + "authorids": "", + "aff": "Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China+State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China+School of Information Engineering, Minzu University of China, Beijing, China+National Language Resources Monitoring and Research Center for Minority Languages, Beijing, China; Xiaomi AI Lab, Xiaomi Inc., Beijing, China; Xiaomi AI Lab, Xiaomi Inc., Beijing, China; Xiaomi AI Lab, Xiaomi Inc., Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China+State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Xin_Lin_Wu_Han_Chen_Dai_Chen_Wang_Sun_2023, title={Dialogue Rewriting via Skeleton-Guided Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26619}, DOI={10.1609/aaai.v37i11.26619}, abstractNote={Dialogue rewriting aims to transform multi-turn, context-dependent dialogues into well-formed, context-independent text for most NLP systems. Previous dialogue rewriting benchmarks and systems assume a fluent and informative utterance to rewrite. Unfortunately, dialogue utterances from real-world systems are frequently noisy and with various kinds of errors that can make them almost uninformative. In this paper, we first present Real-world Dialogue Rewriting Corpus (RealDia), a new benchmark to evaluate how well current dialogue rewriting systems can deal with real-world noisy and uninformative dialogue utterances. RealDia contains annotated multi-turn dialogues from real scenes with ASR errors, spelling errors, redundancies and other noises that are ignored by previous dialogue rewriting benchmarks. We show that previous dialogue rewriting approaches are neither effective nor data-efficient to resolve RealDia. Then this paper presents Skeleton-Guided Rewriter (SGR), which can resolve the task of dialogue rewriting via a skeleton-guided generation paradigm. Experiments show that RealDia is a much more challenging benchmark for real-world dialogue rewriting, and SGR can effectively resolve the task and outperform previous approaches by a large margin.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xin, Chunlei and Lin, Hongyu and Wu, Shan and Han, Xianpei and Chen, Bo and Dai, Wen and Chen, Shuai and Wang, Bin and Sun, Le}, year={2023}, month={Jun.}, pages={13825-13833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26619/26391", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26619", + "pdf_size": 837015, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5500996778525461621&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;xiaomi.com;xiaomi.com;xiaomi.com;iscas.ac.cn", + "email": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;xiaomi.com;xiaomi.com;xiaomi.com;iscas.ac.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0;0+1;0+0;0+2+3;4;4;4;0+0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Minzu University of China;National Language Resources Monitoring and Research Center for Minority Languages;Xiaomi Inc.", + "aff_unique_dep": "Institute of Software;;School of Information Engineering;;Xiaomi AI Lab", + "aff_unique_url": "https://www.cas.cn;http://www.ucas.ac.cn;http://www.muc.edu.cn;;https://www.xiaomi.com", + "aff_unique_abbr": "CAS;UCAS;Minzu UC;;Xiaomi", + "aff_campus_unique_index": "0+0;0;0+0;0+0;0+0;0;0;0;0+0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0;0+0;0+0;0+0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26620", + "title": "Dialogue State Distillation Network with Inter-slot Contrastive Learning for Dialogue State Tracking", + "track": "main", + "status": "Technical", + "abstract": "In task-oriented dialogue systems, Dialogue State Tracking (DST) aims to extract users' intentions from the dialogue history. Currently, most existing approaches suffer from error propagation and are unable to dynamically select relevant information when utilizing previous dialogue states. Moreover, the relations between the updates of different slots provide vital clues for DST. However, the existing approaches rely only on predefined graphs to indirectly capture the relations. In this paper, we propose a Dialogue State Distillation Network (DSDN) to utilize relevant information of previous dialogue states and migrate the gap of utilization between training and testing. Thus, it can dynamically exploit previous dialogue states and avoid introducing error propagation simultaneously. Further, we propose an inter-slot contrastive learning loss to effectively capture the slot co-update relations from dialogue context. Experiments are conducted on the widely used MultiWOZ 2.0 and MultiWOZ 2.1 datasets. The experimental results show that our proposed model achieves the state-of-the-art performance for DST.", + "primary_area": "speech natural language processing", + "author": "Jing Xu; Dandan Song; Chong Liu; Siu Cheung Hui; Fei Li; Qiang Ju; Xiaonan He; Jian Xie", + "authorids": "", + "aff": "School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China; School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China; Baidu Inc., Beijing, China; Nanyang Technological University, Singapore; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China", + "bibtex": "@article{Xu_Song_Liu_Hui_Li_Ju_He_Xie_2023, title={Dialogue State Distillation Network with Inter-slot Contrastive Learning for Dialogue State Tracking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26620}, DOI={10.1609/aaai.v37i11.26620}, abstractNote={In task-oriented dialogue systems, Dialogue State Tracking (DST) aims to extract users\u2019 intentions from the dialogue history. Currently, most existing approaches suffer from error propagation and are unable to dynamically select relevant information when utilizing previous dialogue states. Moreover, the relations between the updates of different slots provide vital clues for DST. However, the existing approaches rely only on predefined graphs to indirectly capture the relations. In this paper, we propose a Dialogue State Distillation Network (DSDN) to utilize relevant information of previous dialogue states and migrate the gap of utilization between training and testing. Thus, it can dynamically exploit previous dialogue states and avoid introducing error propagation simultaneously. Further, we propose an inter-slot contrastive learning loss to effectively capture the slot co-update relations from dialogue context. Experiments are conducted on the widely used MultiWOZ 2.0 and MultiWOZ 2.1 datasets. The experimental results show that our proposed model achieves the state-of-the-art performance for DST.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Jing and Song, Dandan and Liu, Chong and Hui, Siu Cheung and Li, Fei and Ju, Qiang and He, Xiaonan and Xie, Jian}, year={2023}, month={Jun.}, pages={13834-13842} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26620/26392", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26620", + "pdf_size": 1048181, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14516898455496726739&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "bit.edu.cn;bit.edu.cn;baidu.com;ntu.edu.sg;baidu.com;baidu.com;baidu.com;baidu.com", + "email": "bit.edu.cn;bit.edu.cn;baidu.com;ntu.edu.sg;baidu.com;baidu.com;baidu.com;baidu.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;1;1;1;1", + "aff_unique_norm": "Beijing Institute of Technology;Baidu Inc.;Nanyang Technological University", + "aff_unique_dep": "School of Computer Science & Technology;;", + "aff_unique_url": "http://www.bit.edu.cn;https://www.baidu.com;https://www.ntu.edu.sg", + "aff_unique_abbr": "BIT;Baidu;NTU", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;1;0;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25663", + "title": "DiffMD: A Geometric Diffusion Model for Molecular Dynamics Simulations", + "track": "main", + "status": "Technical", + "abstract": "Molecular dynamics (MD) has long been the de facto choice for simulating complex atomistic systems from first principles. Recently deep learning models become a popular way to accelerate MD. Notwithstanding, existing models depend on intermediate variables such as the potential energy or force fields to update atomic positions, which requires additional computations to perform back-propagation. To waive this requirement, we propose a novel model called DiffMD by directly estimating the gradient of the log density of molecular conformations. DiffMD relies on a score-based denoising diffusion generative model that perturbs the molecular structure with a conditional noise depending on atomic accelerations and treats conformations at previous timeframes as the prior distribution for sampling. Another challenge of modeling such a conformation generation process is that a molecule is kinetic instead of static, which no prior works have strictly studied. To solve this challenge, we propose an equivariant geometric Transformer as the score function in the diffusion process to calculate corresponding gradients. It incorporates the directions and velocities of atomic motions via 3D spherical Fourier-Bessel representations. With multiple architectural improvements, we outperform state-of-the-art baselines on MD17 and isomers of C7O2H10 datasets. This work contributes to accelerating material and drug discovery.", + "primary_area": "domain s of application", + "author": "Fang Wu; Stan Z. Li", + "authorids": "", + "aff": "AI Research and Innovation Laboratory, School of Engineering, Westlake University+Institute of AI Industry Research, Tsinghua University; AI Research and Innovation Laboratory, School of Engineering, Westlake University", + "bibtex": "@article{Wu_Li_2023, title={DiffMD: A Geometric Diffusion Model for Molecular Dynamics Simulations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25663}, DOI={10.1609/aaai.v37i4.25663}, abstractNote={Molecular dynamics (MD) has long been the de facto choice for simulating complex atomistic systems from first principles. Recently deep learning models become a popular way to accelerate MD. Notwithstanding, existing models depend on intermediate variables such as the potential energy or force fields to update atomic positions, which requires additional computations to perform back-propagation. To waive this requirement, we propose a novel model called DiffMD by directly estimating the gradient of the log density of molecular conformations. DiffMD relies on a score-based denoising diffusion generative model that perturbs the molecular structure with a conditional noise depending on atomic accelerations and treats conformations at previous timeframes as the prior distribution for sampling. Another challenge of modeling such a conformation generation process is that a molecule is kinetic instead of static, which no prior works have strictly studied. To solve this challenge, we propose an equivariant geometric Transformer as the score function in the diffusion process to calculate corresponding gradients. It incorporates the directions and velocities of atomic motions via 3D spherical Fourier-Bessel representations. With multiple architectural improvements, we outperform state-of-the-art baselines on MD17 and isomers of C7O2H10 datasets. This work contributes to accelerating material and drug discovery.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Fang and Li, Stan Z.}, year={2023}, month={Jun.}, pages={5321-5329} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25663/25435", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25663", + "pdf_size": 375348, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=607184659436412449&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "columbia.edu;westlake.edu.cn", + "email": "columbia.edu;westlake.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0", + "aff_unique_norm": "Westlake University;Tsinghua University", + "aff_unique_dep": "School of Engineering;Institute of AI Industry Research", + "aff_unique_url": "https://www.westlake.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "WU;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25908", + "title": "Diffeomorphic Information Neural Estimation", + "track": "main", + "status": "Technical", + "abstract": "Mutual Information (MI) and Conditional Mutual Information (CMI) are multi-purpose tools from information theory that are able to naturally measure the statistical dependencies between random variables, thus they are usually of central interest in several statistical and machine learning tasks, such as conditional independence testing and representation learning. However, estimating CMI, or even MI, is infamously challenging due the intractable formulation. In this study, we introduce DINE (Diffeomorphic Information Neural Estimator)\u2013a novel approach for estimating CMI of continuous random variables, inspired by the invariance of CMI over diffeomorphic maps. We show that the variables of interest can be replaced with appropriate surrogates that follow simpler distributions, allowing the CMI to be efficiently evaluated via analytical solutions. Additionally, we demonstrate the quality of the proposed estimator in comparison with state-of-the-arts in three important tasks, including estimating MI, CMI, as well as its application in conditional independence testing. The empirical evaluations show that DINE consistently outperforms competitors in all tasks and is able to adapt very well to complex and high-dimensional relationships.", + "primary_area": "machine learning i", + "author": "Bao Duong; Thin Nguyen", + "authorids": "", + "aff": "Applied Artificial Intelligence Institute, Deakin University, Australia; Applied Artificial Intelligence Institute, Deakin University, Australia", + "bibtex": "@article{Duong_Nguyen_2023, title={Diffeomorphic Information Neural Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25908}, DOI={10.1609/aaai.v37i6.25908}, abstractNote={Mutual Information (MI) and Conditional Mutual Information (CMI) are multi-purpose tools from information theory that are able to naturally measure the statistical dependencies between random variables, thus they are usually of central interest in several statistical and machine learning tasks, such as conditional independence testing and representation learning. However, estimating CMI, or even MI, is infamously challenging due the intractable formulation. In this study, we introduce DINE (Diffeomorphic Information Neural Estimator)\u2013a novel approach for estimating CMI of continuous random variables, inspired by the invariance of CMI over diffeomorphic maps. We show that the variables of interest can be replaced with appropriate surrogates that follow simpler distributions, allowing the CMI to be efficiently evaluated via analytical solutions. Additionally, we demonstrate the quality of the proposed estimator in comparison with state-of-the-arts in three important tasks, including estimating MI, CMI, as well as its application in conditional independence testing. The empirical evaluations show that DINE consistently outperforms competitors in all tasks and is able to adapt very well to complex and high-dimensional relationships.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Duong, Bao and Nguyen, Thin}, year={2023}, month={Jun.}, pages={7468-7475} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25908/25680", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25908", + "pdf_size": 212140, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12166560004429275751&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "deakin.edu.au;deakin.edu.au", + "email": "deakin.edu.au;deakin.edu.au", + "github": "https://github.com/baosws/DINE", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Deakin University", + "aff_unique_dep": "Applied Artificial Intelligence Institute", + "aff_unique_url": "https://www.deakin.edu.au", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26026", + "title": "Differentiable Meta Multigraph Search with Partial Message Propagation on Heterogeneous Information Networks", + "track": "main", + "status": "Technical", + "abstract": "Heterogeneous information networks (HINs) are widely employed for describing real-world data with intricate entities and relationships. To automatically utilize their semantic information, graph neural architecture search has recently been developed for various tasks of HINs. Existing works, on the other hand, show weaknesses in instability and inflexibility. To address these issues, we propose a novel method called Partial Message Meta Multigraph search (PMMM) to automatically optimize the neural architecture design on HINs. Specifically, to learn how graph neural networks (GNNs) propagate messages along various types of edges, PMMM adopts an efficient differentiable framework to search for a meaningful meta multigraph, which can capture more flexible and complex semantic relations than a meta graph. The differentiable search typically suffers from performance instability, so we further propose a stable algorithm called partial message search to ensure that the searched meta multigraph consistently surpasses the manually designed meta-structures, i.e., meta-paths. Extensive experiments on six benchmark datasets over two representative tasks, including node classification and recommendation, demonstrate the effectiveness of the proposed method. Our approach outperforms the state-of-the-art heterogeneous GNNs, finds out meaningful meta multigraphs, and is significantly more stable. Our code is available at https://github.com/JHL-HUST/PMMM.", + "primary_area": "machine learning ii", + "author": "Chao Li; Hao Xu; Kun He", + "authorids": "", + "aff": "School of Computer Science, Huazhong University of Science and Technology, Wuhan 430074, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, Wuhan 430074, China; School of Computer Science, Huazhong University of Science and Technology, Wuhan 430074, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, Wuhan 430074, China; School of Computer Science, Huazhong University of Science and Technology, Wuhan 430074, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, Wuhan 430074, China", + "bibtex": "@article{Li_Xu_He_2023, title={Differentiable Meta Multigraph Search with Partial Message Propagation on Heterogeneous Information Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26026}, DOI={10.1609/aaai.v37i7.26026}, abstractNote={Heterogeneous information networks (HINs) are widely employed for describing real-world data with intricate entities and relationships. To automatically utilize their semantic information, graph neural architecture search has recently been developed for various tasks of HINs. Existing works, on the other hand, show weaknesses in instability and inflexibility. To address these issues, we propose a novel method called Partial Message Meta Multigraph search (PMMM) to automatically optimize the neural architecture design on HINs. Specifically, to learn how graph neural networks (GNNs) propagate messages along various types of edges, PMMM adopts an efficient differentiable framework to search for a meaningful meta multigraph, which can capture more flexible and complex semantic relations than a meta graph. The differentiable search typically suffers from performance instability, so we further propose a stable algorithm called partial message search to ensure that the searched meta multigraph consistently surpasses the manually designed meta-structures, i.e., meta-paths. Extensive experiments on six benchmark datasets over two representative tasks, including node classification and recommendation, demonstrate the effectiveness of the proposed method. Our approach outperforms the state-of-the-art heterogeneous GNNs, finds out meaningful meta multigraphs, and is significantly more stable. Our code is available at https://github.com/JHL-HUST/PMMM.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Chao and Xu, Hao and He, Kun}, year={2023}, month={Jun.}, pages={8518-8526} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26026/25798", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26026", + "pdf_size": 333910, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3077753926342470761&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/JHL-HUST/PMMM", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25714", + "title": "Differentially Private Condorcet Voting", + "track": "main", + "status": "Technical", + "abstract": "Designing private voting rules is an important and pressing problem for trustworthy democracy. In this paper, under the framework of differential privacy, we propose a novel famliy of randomized voting rules based on the well-known Condorcet method, and focus on three classes of voting rules in this family: Laplacian Condorcet method (CMLAP), exponential Condorcet method (CMEXP), and randomized response Condorcet method (CMRR), where \u03bb represents the level of noise. We prove that all of our rules satisfy absolute monotonicity, lexi-participation, probabilistic Pareto efficiency, approximate probabilistic Condorcet criterion, and approximate SD-strategyproofness. In addition, CMRR satisfies (non-approximate) probabilistic Condorcet criterion, while CMLAP and CMEXP satisfy strong lexi-participation. Finally, we regard differential privacy as a voting axiom, and discuss its relations to other axioms.", + "primary_area": "game theory and economic paradigms", + "author": "Zhechen Li; Ao Liu; Lirong Xia; Yongzhi Cao; Hanpin Wang", + "authorids": "", + "aff": "Key Laboratory of High Con\ufb01dence Software Technologies (MOE), School of Computer Science, Peking University, China; Department of Computer Science, Rensselaer Polytechnic Institute; Department of Computer Science, Rensselaer Polytechnic Institute; Key Laboratory of High Con\ufb01dence Software Technologies (MOE), School of Computer Science, Peking University, China; School of Computer Science and Cyber Engineering, Guangzhou University, China + Key Laboratory of High Con\ufb01dence Software Technologies (MOE), School of Computer Science, Peking University, China", + "bibtex": "@article{Li_Liu_Xia_Cao_Wang_2023, title={Differentially Private Condorcet Voting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25714}, DOI={10.1609/aaai.v37i5.25714}, abstractNote={Designing private voting rules is an important and pressing problem for trustworthy democracy. In this paper, under the framework of differential privacy, we propose a novel famliy of randomized voting rules based on the well-known Condorcet method, and focus on three classes of voting rules in this family: Laplacian Condorcet method (CMLAP), exponential Condorcet method (CMEXP), and randomized response Condorcet method (CMRR), where \u03bb represents the level of noise. We prove that all of our rules satisfy absolute monotonicity, lexi-participation, probabilistic Pareto efficiency, approximate probabilistic Condorcet criterion, and approximate SD-strategyproofness. In addition, CMRR satisfies (non-approximate) probabilistic Condorcet criterion, while CMLAP and CMEXP satisfy strong lexi-participation. Finally, we regard differential privacy as a voting axiom, and discuss its relations to other axioms.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zhechen and Liu, Ao and Xia, Lirong and Cao, Yongzhi and Wang, Hanpin}, year={2023}, month={Jun.}, pages={5755-5763} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25714/25486", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25714", + "pdf_size": 255285, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6835911744385191090&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;rpi.edu;gmail.com;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;rpi.edu;gmail.com;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;2+0", + "aff_unique_norm": "Peking University;Rensselaer Polytechnic Institute;Guangzhou University", + "aff_unique_dep": "School of Computer Science;Department of Computer Science;School of Computer Science and Cyber Engineering", + "aff_unique_url": "http://www.pku.edu.cn;https://www.rpi.edu;http://www.gzhu.edu.cn", + "aff_unique_abbr": "Peking U;RPI;GZHU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;1;1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25721", + "title": "Differentially Private Fair Division", + "track": "main", + "status": "Technical", + "abstract": "Fairness and privacy are two important concerns in social decision-making processes such as resource allocation. We study privacy in the fair allocation of indivisible resources using the well-established framework of differential privacy. We present algorithms for approximate envy-freeness and proportionality when two instances are considered to be adjacent if they differ only on the utility of a single agent for a single item. On the other hand, we provide strong negative results for both fairness criteria when the adjacency notion allows the entire utility function of a single agent to change.", + "primary_area": "game theory and economic paradigms", + "author": "Pasin Manurangsi; Warut Suksompong", + "authorids": "", + "aff": "Google Research; National University of Singapore", + "bibtex": "@article{Manurangsi_Suksompong_2023, title={Differentially Private Fair Division}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25721}, DOI={10.1609/aaai.v37i5.25721}, abstractNote={Fairness and privacy are two important concerns in social decision-making processes such as resource allocation. We study privacy in the fair allocation of indivisible resources using the well-established framework of differential privacy. We present algorithms for approximate envy-freeness and proportionality when two instances are considered to be adjacent if they differ only on the utility of a single agent for a single item. On the other hand, we provide strong negative results for both fairness criteria when the adjacency notion allows the entire utility function of a single agent to change.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Manurangsi, Pasin and Suksompong, Warut}, year={2023}, month={Jun.}, pages={5814-5822} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25721/25493", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25721", + "pdf_size": 169159, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1639283186268820387&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "; ", + "email": "; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Google;National University of Singapore", + "aff_unique_dep": "Google Research;", + "aff_unique_url": "https://research.google;https://www.nus.edu.sg", + "aff_unique_abbr": "Google Research;NUS", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Mountain View;", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "article-25933", + "title": "Differentially Private Heatmaps", + "track": "main", + "status": "Technical", + "abstract": "We consider the task of producing heatmaps from users' aggregated data while protecting their privacy. We give a differentially private (DP) algorithm for this task and demonstrate its advantages over previous algorithms on real-world datasets.\n\nOur core algorithmic primitive is a DP procedure that takes in a set of distributions and produces an output that is close in Earth Mover's Distance (EMD) to the average of the inputs. We prove theoretical bounds on the error of our algorithm under a certain sparsity assumption and that these are essentially optimal.", + "primary_area": "machine learning i", + "author": "Badih Ghazi; Junfeng He; Kai Kohlhoff; Ravi Kumar; Pasin Manurangsi; Vidhya Navalpakkam; Nachiappan Valliappan", + "authorids": "", + "aff": "Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA", + "bibtex": "@article{Ghazi_He_Kohlhoff_Kumar_Manurangsi_Navalpakkam_Valliappan_2023, title={Differentially Private Heatmaps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25933}, DOI={10.1609/aaai.v37i6.25933}, abstractNote={We consider the task of producing heatmaps from users\u2019 aggregated data while protecting their privacy. We give a differentially private (DP) algorithm for this task and demonstrate its advantages over previous algorithms on real-world datasets. Our core algorithmic primitive is a DP procedure that takes in a set of distributions and produces an output that is close in Earth Mover\u2019s Distance (EMD) to the average of the inputs. We prove theoretical bounds on the error of our algorithm under a certain sparsity assumption and that these are essentially optimal.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghazi, Badih and He, Junfeng and Kohlhoff, Kai and Kumar, Ravi and Manurangsi, Pasin and Navalpakkam, Vidhya and Valliappan, Nachiappan}, year={2023}, month={Jun.}, pages={7696-7704} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25933/25705", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25933", + "pdf_size": 543034, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=655720521685801354&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;google.com;google.com;gmail.com;google.com;google.com;google.com", + "email": "gmail.com;google.com;google.com;gmail.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26242", + "title": "Differentially Private Learning with Per-Sample Adaptive Clipping", + "track": "main", + "status": "Technical", + "abstract": "Privacy in AI remains a topic that draws attention from researchers and the general public in recent years. As one way to implement privacy-preserving AI, differentially private learning is a framework that enables AI models to use differential privacy (DP). To achieve DP in the learning process, existing algorithms typically limit the magnitude of gradients with a constant clipping, which requires carefully tuned due to its significant impact on model performance. As a solution to this issue, latest works NSGD and Auto-S innovatively propose to use normalization instead of clipping to avoid hyperparameter tuning. However, normalization-based approaches like NSGD and Auto-S rely on a monotonic weight function, which imposes excessive weight on small gradient samples and introduces extra deviation to the update. In this paper, we propose a Differentially Private Per-Sample Adaptive Clipping (DP-PSAC) algorithm based on a non-monotonic adaptive weight function, which guarantees privacy without the typical hyperparameter tuning process of using a constant clipping while significantly reducing the deviation between the update and true batch-averaged gradient. We provide a rigorous theoretical convergence analysis and show that with convergence rate at the same order, the proposed algorithm achieves a lower non-vanishing bound, which is maintained over training iterations, compared with NSGD/Auto-S. In addition, through extensive experimental evaluation, we show that DP-PSAC outperforms or matches the state-of-the-art methods on multiple main-stream vision and language tasks.", + "primary_area": "machine learning iv", + "author": "Tianyu Xia; Shuheng Shen; Su Yao; Xinyi Fu; Ke Xu; Xiaolong Xu; Xing Fu", + "authorids": "", + "aff": "School of Software & Microelectronics, Peking University; Tiansuan Lab, Ant Group; Beijing National Research Center for Information Science and Technology (BNRist), Tsinghua University; Tiansuan Lab, Ant Group; Department of Computer Science & Technology, Tsinghua University+Zhongguancun Laboratory, Beijing; Tiansuan Lab, Ant Group; Tiansuan Lab, Ant Group", + "bibtex": "@article{Xia_Shen_Yao_Fu_Xu_Xu_Fu_2023, title={Differentially Private Learning with Per-Sample Adaptive Clipping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26242}, DOI={10.1609/aaai.v37i9.26242}, abstractNote={Privacy in AI remains a topic that draws attention from researchers and the general public in recent years. As one way to implement privacy-preserving AI, differentially private learning is a framework that enables AI models to use differential privacy (DP). To achieve DP in the learning process, existing algorithms typically limit the magnitude of gradients with a constant clipping, which requires carefully tuned due to its significant impact on model performance. As a solution to this issue, latest works NSGD and Auto-S innovatively propose to use normalization instead of clipping to avoid hyperparameter tuning. However, normalization-based approaches like NSGD and Auto-S rely on a monotonic weight function, which imposes excessive weight on small gradient samples and introduces extra deviation to the update. In this paper, we propose a Differentially Private Per-Sample Adaptive Clipping (DP-PSAC) algorithm based on a non-monotonic adaptive weight function, which guarantees privacy without the typical hyperparameter tuning process of using a constant clipping while significantly reducing the deviation between the update and true batch-averaged gradient. We provide a rigorous theoretical convergence analysis and show that with convergence rate at the same order, the proposed algorithm achieves a lower non-vanishing bound, which is maintained over training iterations, compared with NSGD/Auto-S. In addition, through extensive experimental evaluation, we show that DP-PSAC outperforms or matches the state-of-the-art methods on multiple main-stream vision and language tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xia, Tianyu and Shen, Shuheng and Yao, Su and Fu, Xinyi and Xu, Ke and Xu, Xiaolong and Fu, Xing}, year={2023}, month={Jun.}, pages={10444-10452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26242/26014", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26242", + "pdf_size": 218509, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5339608576406335663&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.pku.edu.cn;antgroup.com;tsinghua.edu.cn;antgroup.com;tsinghua.edu.cn;antgroup.com;antgroup.com", + "email": "stu.pku.edu.cn;antgroup.com;tsinghua.edu.cn;antgroup.com;tsinghua.edu.cn;antgroup.com;antgroup.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;2+3;1;1", + "aff_unique_norm": "Peking University;Ant Group;Tsinghua University;Zhongguancun Laboratory", + "aff_unique_dep": "School of Software & Microelectronics;Tiansuan Lab;Beijing National Research Center for Information Science and Technology (BNRist);", + "aff_unique_url": "http://www.pku.edu.cn;https://www.antgroup.com;https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "PKU;Ant Group;Tsinghua;", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26452", + "title": "Differentially Private Nonlinear Causal Discovery from Numerical Data", + "track": "main", + "status": "Technical", + "abstract": "Recently, several methods such as private ANM, EM-PC and Priv-PC have been proposed to perform differentially private causal discovery in various scenarios including bivariate, multivariate Gaussian and categorical cases. However, there is little effort on how to conduct private nonlinear causal discovery from numerical data. This work tries to challenge this problem. To this end, we propose a method to infer nonlinear causal relations from observed numerical data by using regression-based conditional independence test (RCIT) that consists of kernel ridge regression (KRR) and Hilbert-Schmidt independence criterion (HSIC) with permutation approximation. Sensitivity analysis for RCIT is given and a private constraint-based causal discovery framework with differential privacy guarantee is developed. Extensive simulations and real-world experiments for both conditional independence test and causal discovery are conducted, which show that our method is effective in handling nonlinear numerical cases and easy to implement. The source code of our method and data are available at https://github.com/Causality-Inference/PCD.", + "primary_area": "reasoning under uncertainty", + "author": "Hao Zhang; Yewei Xia; Yixin Ren; Jihong Guan; Shuigeng Zhou", + "authorids": "", + "aff": "Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Department of Computer Science & Technology, Tongji University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China", + "bibtex": "@article{Zhang_Xia_Ren_Guan_Zhou_2023, title={Differentially Private Nonlinear Causal Discovery from Numerical Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26452}, DOI={10.1609/aaai.v37i10.26452}, abstractNote={Recently, several methods such as private ANM, EM-PC and Priv-PC have been proposed to perform differentially private causal discovery in various scenarios including bivariate, multivariate Gaussian and categorical cases. However, there is little effort on how to conduct private nonlinear causal discovery from numerical data. This work tries to challenge this problem. To this end, we propose a method to infer nonlinear causal relations from observed numerical data by using regression-based conditional independence test (RCIT) that consists of kernel ridge regression (KRR) and Hilbert-Schmidt independence criterion (HSIC) with permutation approximation. Sensitivity analysis for RCIT is given and a private constraint-based causal discovery framework with differential privacy guarantee is developed. Extensive simulations and real-world experiments for both conditional independence test and causal discovery are conducted, which show that our method is effective in handling nonlinear numerical cases and easy to implement. The source code of our method and data are available at https://github.com/Causality-Inference/PCD.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Hao and Xia, Yewei and Ren, Yixin and Guan, Jihong and Zhou, Shuigeng}, year={2023}, month={Jun.}, pages={12321-12328} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26452/26224", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26452", + "pdf_size": 258814, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16621132207498672376&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;tongji.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;tongji.edu.cn;fudan.edu.cn", + "github": "https://github.com/Causality-Inference/PCD", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Fudan University;Tongji University", + "aff_unique_dep": "School of Computer Science;Department of Computer Science & Technology", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.tongji.edu.cn", + "aff_unique_abbr": "Fudan;Tongji", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26502", + "title": "Diffuser: Efficient Transformers with Multi-Hop Attention Diffusion for Long Sequences", + "track": "main", + "status": "Technical", + "abstract": "Efficient Transformers have been developed for long sequence modeling, due to their subquadratic memory and time complexity. Sparse Transformer is a popular approach to improving the efficiency of Transformers by restricting self-attention to locations specified by the predefined sparse patterns. However, leveraging sparsity may sacrifice expressiveness compared to full-attention, when important token correlations are multiple hops away. To combine advantages of both the efficiency of sparse transformer and the expressiveness of full-attention Transformer, we propose Diffuser, a new state-of-the-art efficient Transformer. Diffuser incorporates all token interactions within one attention layer while maintaining low computation and memory costs. The key idea is to expand the receptive field of sparse attention using Attention Diffusion, which computes multi-hop token correlations based on all paths between corresponding disconnected tokens, besides attention among neighboring tokens. Theoretically, we show the expressiveness of Diffuser as a universal sequence approximator for sequence-to-sequence modeling, and investigate its ability to approximate full-attention by analyzing the graph expander property from the spectral perspective. Experimentally, we investigate the effectiveness of Diffuser with extensive evaluations, including language modeling, image modeling, and Long Range Arena (LRA). Evaluation results show that Diffuser achieves improvements by an average of 0.94% on text classification tasks and 2.30% on LRA, with 1.67x memory savings compared to state-of-the-art benchmarks, which demonstrates superior performance of Diffuser in both expressiveness and efficiency aspects.", + "primary_area": "speech natural language processing", + "author": "Aosong Feng; Irene Li; Yuang Jiang; Rex Ying", + "authorids": "", + "aff": "Yale University, New Haven, CT, USA; Yale University, New Haven, CT, USA; Yale University, New Haven, CT, USA; Yale University, New Haven, CT, USA", + "bibtex": "@article{Feng_Li_Jiang_Ying_2023, title={Diffuser: Efficient Transformers with Multi-Hop Attention Diffusion for Long Sequences}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26502}, DOI={10.1609/aaai.v37i11.26502}, abstractNote={Efficient Transformers have been developed for long sequence modeling, due to their subquadratic memory and time complexity. Sparse Transformer is a popular approach to improving the efficiency of Transformers by restricting self-attention to locations specified by the predefined sparse patterns. However, leveraging sparsity may sacrifice expressiveness compared to full-attention, when important token correlations are multiple hops away. To combine advantages of both the efficiency of sparse transformer and the expressiveness of full-attention Transformer, we propose Diffuser, a new state-of-the-art efficient Transformer. Diffuser incorporates all token interactions within one attention layer while maintaining low computation and memory costs. The key idea is to expand the receptive field of sparse attention using Attention Diffusion, which computes multi-hop token correlations based on all paths between corresponding disconnected tokens, besides attention among neighboring tokens. Theoretically, we show the expressiveness of Diffuser as a universal sequence approximator for sequence-to-sequence modeling, and investigate its ability to approximate full-attention by analyzing the graph expander property from the spectral perspective. Experimentally, we investigate the effectiveness of Diffuser with extensive evaluations, including language modeling, image modeling, and Long Range Arena (LRA). Evaluation results show that Diffuser achieves improvements by an average of 0.94% on text classification tasks and 2.30% on LRA, with 1.67x memory savings compared to state-of-the-art benchmarks, which demonstrates superior performance of Diffuser in both expressiveness and efficiency aspects.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feng, Aosong and Li, Irene and Jiang, Yuang and Ying, Rex}, year={2023}, month={Jun.}, pages={12772-12780} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26502/26274", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26502", + "pdf_size": 1729665, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=522302672251218489&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "yale.edu;yale.edu;yale.edu;yale.edu", + "email": "yale.edu;yale.edu;yale.edu;yale.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Yale University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.yale.edu", + "aff_unique_abbr": "Yale", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "New Haven", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26145", + "title": "Diffusing Gaussian Mixtures for Generating Categorical Data", + "track": "main", + "status": "Technical", + "abstract": "Learning a categorical distribution comes with its own set of challenges. A successful approach taken by state-of-the-art works is to cast the problem in a continuous domain to take advantage of the impressive performance of the generative models for continuous data. Amongst them are the recently emerging diffusion probabilistic models, which have the observed advantage of generating high-quality samples. Recent advances for categorical generative models have focused on log likelihood improvements. In this work, we propose a generative model for categorical data based on diffusion models with a focus on high-quality sample generation, and propose sampled-based evaluation methods. The efficacy of our method stems from performing diffusion in the continuous domain while having its parameterization informed by the structure of the categorical nature of the target distribution. Our method of evaluation highlights the capabilities and limitations of different generative models for generating categorical data, and includes experiments on synthetic and real-world protein datasets.", + "primary_area": "machine learning iii", + "author": "Florence Regol; Mark Coates", + "authorids": "", + "aff": "Dept. Electrical and Computer Engineering, McGill University; Dept. Electrical and Computer Engineering, McGill University", + "bibtex": "@article{Regol_Coates_2023, title={Diffusing Gaussian Mixtures for Generating Categorical Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26145}, DOI={10.1609/aaai.v37i8.26145}, abstractNote={Learning a categorical distribution comes with its own set of challenges. A successful approach taken by state-of-the-art works is to cast the problem in a continuous domain to take advantage of the impressive performance of the generative models for continuous data. Amongst them are the recently emerging diffusion probabilistic models, which have the observed advantage of generating high-quality samples. Recent advances for categorical generative models have focused on log likelihood improvements. In this work, we propose a generative model for categorical data based on diffusion models with a focus on high-quality sample generation, and propose sampled-based evaluation methods. The efficacy of our method stems from performing diffusion in the continuous domain while having its parameterization informed by the structure of the categorical nature of the target distribution. Our method of evaluation highlights the capabilities and limitations of different generative models for generating categorical data, and includes experiments on synthetic and real-world protein datasets.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Regol, Florence and Coates, Mark}, year={2023}, month={Jun.}, pages={9570-9578} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26145/25917", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26145", + "pdf_size": 356466, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6343867850744902834&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.mcgill.ca;mcgill.ca", + "email": "mail.mcgill.ca;mcgill.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "McGill University", + "aff_unique_dep": "Dept. Electrical and Computer Engineering", + "aff_unique_url": "https://www.mcgill.ca", + "aff_unique_abbr": "McGill", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26093", + "title": "Diffusion Models Beat GANs on Topology Optimization", + "track": "main", + "status": "Technical", + "abstract": "Structural topology optimization, which aims to find the optimal physical structure that maximizes mechanical performance, is vital in engineering design applications in aerospace, mechanical, and civil engineering. Recently, generative adversarial networks (GANs) have emerged as a popular alternative to traditional iterative topology optimization methods. However, GANs can be challenging to train, have limited generalizability, and often neglect important performance objectives such as mechanical compliance and manufacturability. To address these issues, we propose a new architecture called TopoDiff that uses conditional diffusion models to perform performance-aware and manufacturability-aware topology optimization. Our method introduces a surrogate model-based guidance strategy that actively favors structures with low compliance and good manufacturability. Compared to a state-of-the-art conditional GAN, our approach reduces the average error on physical performance by a factor of eight and produces eleven times fewer infeasible samples. Our work demonstrates the potential of using diffusion models in topology optimization and suggests a general framework for solving engineering optimization problems using external performance with constraint-aware guidance. We provide access to our data, code, and trained models at the following link: https://decode.mit.edu/projects/topodiff/.", + "primary_area": "machine learning iii", + "author": "Fran\u00e7ois Maz\u00e9; Faez Ahmed", + "authorids": "", + "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology", + "bibtex": "@article{Maz\u00e9_Ahmed_2023, title={Diffusion Models Beat GANs on Topology Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26093}, DOI={10.1609/aaai.v37i8.26093}, abstractNote={Structural topology optimization, which aims to find the optimal physical structure that maximizes mechanical performance, is vital in engineering design applications in aerospace, mechanical, and civil engineering. Recently, generative adversarial networks (GANs) have emerged as a popular alternative to traditional iterative topology optimization methods. However, GANs can be challenging to train, have limited generalizability, and often neglect important performance objectives such as mechanical compliance and manufacturability. To address these issues, we propose a new architecture called TopoDiff that uses conditional diffusion models to perform performance-aware and manufacturability-aware topology optimization. Our method introduces a surrogate model-based guidance strategy that actively favors structures with low compliance and good manufacturability. Compared to a state-of-the-art conditional GAN, our approach reduces the average error on physical performance by a factor of eight and produces eleven times fewer infeasible samples. Our work demonstrates the potential of using diffusion models in topology optimization and suggests a general framework for solving engineering optimization problems using external performance with constraint-aware guidance. We provide access to our data, code, and trained models at the following link: https://decode.mit.edu/projects/topodiff/.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Maz\u00e9, Fran\u00e7ois and Ahmed, Faez}, year={2023}, month={Jun.}, pages={9108-9116} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26093/25865", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26093", + "pdf_size": 1997923, + "gs_citation": 83, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2268080758993415399&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "etu.minesparis.psl.eu;mit.edu", + "email": "etu.minesparis.psl.eu;mit.edu", + "github": "", + "project": "https://decode.mit.edu/projects/topodiff/", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://web.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25677", + "title": "Direct Heterogeneous Causal Learning for Resource Allocation Problems in Marketing", + "track": "main", + "status": "Technical", + "abstract": "Marketing is an important mechanism to increase user engagement and improve platform revenue, and heterogeneous causal learning can help develop more effective strategies. Most decision-making problems in marketing can be formulated as resource allocation problems and have been studied for decades. Existing works usually divide the solution procedure into two fully decoupled stages, i.e., machine learning (ML) and operation research (OR) --- the first stage predicts the model parameters and they are fed to the optimization in the second stage. However, the error of the predicted parameters in ML cannot be respected and a series of complex mathematical operations in OR lead to the increased accumulative errors. Essentially, the improved precision on the prediction parameters may not have a positive correlation on the final solution due to the side-effect from the decoupled design.\n\nIn this paper, we propose a novel approach for solving resource allocation problems to mitigate the side-effects. Our key intuition is that we introduce the decision factor to establish a bridge between ML and OR such that the solution can be directly obtained in OR by only performing the sorting or comparison operations on the decision factor. Furthermore, we design a customized loss function that can conduct direct heterogeneous causal learning on the decision factor, an unbiased estimation of which can be guaranteed when the loss convergences. As a case study, we apply our approach to two crucial problems in marketing: the binary treatment assignment problem and the budget allocation problem with multiple treatments. Both large-scale simulations and online A/B Tests demonstrate that our approach achieves significant improvement compared with state-of-the-art.", + "primary_area": "domain s of application", + "author": "Hao Zhou; Shaoming Li; Guibin Jiang; Jiaqi Zheng; Dong Wang", + "authorids": "", + "aff": "Meituan, China; Meituan, China; Meituan, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; Meituan, China", + "bibtex": "@article{Zhou_Li_Jiang_Zheng_Wang_2023, title={Direct Heterogeneous Causal Learning for Resource Allocation Problems in Marketing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25677}, DOI={10.1609/aaai.v37i4.25677}, abstractNote={Marketing is an important mechanism to increase user engagement and improve platform revenue, and heterogeneous causal learning can help develop more effective strategies. Most decision-making problems in marketing can be formulated as resource allocation problems and have been studied for decades. Existing works usually divide the solution procedure into two fully decoupled stages, i.e., machine learning (ML) and operation research (OR) --- the first stage predicts the model parameters and they are fed to the optimization in the second stage. However, the error of the predicted parameters in ML cannot be respected and a series of complex mathematical operations in OR lead to the increased accumulative errors. Essentially, the improved precision on the prediction parameters may not have a positive correlation on the final solution due to the side-effect from the decoupled design. In this paper, we propose a novel approach for solving resource allocation problems to mitigate the side-effects. Our key intuition is that we introduce the decision factor to establish a bridge between ML and OR such that the solution can be directly obtained in OR by only performing the sorting or comparison operations on the decision factor. Furthermore, we design a customized loss function that can conduct direct heterogeneous causal learning on the decision factor, an unbiased estimation of which can be guaranteed when the loss convergences. As a case study, we apply our approach to two crucial problems in marketing: the binary treatment assignment problem and the budget allocation problem with multiple treatments. Both large-scale simulations and online A/B Tests demonstrate that our approach achieves significant improvement compared with state-of-the-art.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Hao and Li, Shaoming and Jiang, Guibin and Zheng, Jiaqi and Wang, Dong}, year={2023}, month={Jun.}, pages={5446-5454} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25677/25449", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25677", + "pdf_size": 276067, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11720803148026058710&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 5, + "aff_domain": "meituan.com;meituan.com;meituan.com;nju.edu.cn;meituan.com", + "email": "meituan.com;meituan.com;meituan.com;nju.edu.cn;meituan.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Meituan;Nanjing University", + "aff_unique_dep": ";State Key Laboratory for Novel Software Technology", + "aff_unique_url": "https://www.meituan.com;http://www.nju.edu.cn", + "aff_unique_abbr": "Meituan;Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25913", + "title": "Directed Acyclic Graph Structure Learning from Dynamic Graphs", + "track": "main", + "status": "Technical", + "abstract": "Estimating the structure of directed acyclic graphs (DAGs) of features (variables) plays a vital role in revealing the latent data generation process and providing causal insights in various applications. Although there have been many studies on structure learning with various types of data, the structure learning on the dynamic graph has not been explored yet, and thus we study the learning problem of node feature generation mechanism on such ubiquitous dynamic graph data. In a dynamic graph, we propose to simultaneously estimate contemporaneous relationships and time-lagged interaction relationships between the node features. These two kinds of relationships form a DAG, which could effectively characterize the feature generation process in a concise way. To learn such a DAG, we cast the learning problem as a continuous score-based optimization problem, which consists of a differentiable score function to measure the validity of the learned DAGs and a smooth acyclicity constraint to ensure the acyclicity of the learned DAGs. These two components are translated into an unconstraint augmented Lagrangian objective which could be minimized by mature continuous optimization techniques. The resulting algorithm, named GraphNOTEARS, outperforms baselines on simulated data across a wide range of settings that may encounter in real-world applications. We also apply the proposed approach on two dynamic graphs constructed from the real-world Yelp dataset, demonstrating our method could learn the connections between node features, which conforms with the domain knowledge.", + "primary_area": "machine learning i", + "author": "Shaohua Fan; Shuyang Zhang; Xiao Wang; Chuan Shi", + "authorids": "", + "aff": "Beijing University of Posts and Telecommunications, China; Beijing University of Posts and Telecommunications, China; Beijing University of Posts and Telecommunications, China + Peng Cheng Laboratory, China; Beijing University of Posts and Telecommunications, China + Peng Cheng Laboratory, China", + "bibtex": "@article{Fan_Zhang_Wang_Shi_2023, title={Directed Acyclic Graph Structure Learning from Dynamic Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25913}, DOI={10.1609/aaai.v37i6.25913}, abstractNote={Estimating the structure of directed acyclic graphs (DAGs) of features (variables) plays a vital role in revealing the latent data generation process and providing causal insights in various applications. Although there have been many studies on structure learning with various types of data, the structure learning on the dynamic graph has not been explored yet, and thus we study the learning problem of node feature generation mechanism on such ubiquitous dynamic graph data. In a dynamic graph, we propose to simultaneously estimate contemporaneous relationships and time-lagged interaction relationships between the node features. These two kinds of relationships form a DAG, which could effectively characterize the feature generation process in a concise way. To learn such a DAG, we cast the learning problem as a continuous score-based optimization problem, which consists of a differentiable score function to measure the validity of the learned DAGs and a smooth acyclicity constraint to ensure the acyclicity of the learned DAGs. These two components are translated into an unconstraint augmented Lagrangian objective which could be minimized by mature continuous optimization techniques. The resulting algorithm, named GraphNOTEARS, outperforms baselines on simulated data across a wide range of settings that may encounter in real-world applications. We also apply the proposed approach on two dynamic graphs constructed from the real-world Yelp dataset, demonstrating our method could learn the connections between node features, which conforms with the domain knowledge.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fan, Shaohua and Zhang, Shuyang and Wang, Xiao and Shi, Chuan}, year={2023}, month={Jun.}, pages={7512-7521} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25913/25685", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25913", + "pdf_size": 1528891, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9745443349635455818&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0+1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;", + "aff_unique_abbr": "BUPT;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26150", + "title": "DisGUIDE: Disagreement-Guided Data-Free Model Extraction", + "track": "main", + "status": "Technical", + "abstract": "Recent model-extraction attacks on Machine Learning as a Service (MLaaS) systems have moved towards data-free approaches, showing the feasibility of stealing models trained with difficult-to-access data. However, these attacks are ineffective or limited due to the low accuracy of extracted models and the high number of queries to the models under attack. The high query cost makes such techniques infeasible for online MLaaS systems that charge per query.\nWe create a novel approach to get higher accuracy and query efficiency than prior data-free model extraction techniques. Specifically, we introduce a novel generator training scheme that maximizes the disagreement loss between two clone models that attempt to copy the model under attack. This loss, combined with diversity loss and experience replay, enables the generator to produce better instances to train the clone models. Our evaluation on popular datasets CIFAR-10 and CIFAR-100 shows that our approach improves the final model accuracy by up to 3.42% and 18.48% respectively. The average number of queries required to achieve the accuracy of the prior state of the art is reduced by up to 64.95%. We hope this will promote future work on feasible data-free model extraction and defenses against such attacks.", + "primary_area": "machine learning iii", + "author": "Jonathan Rosenthal; Eric Enouen; Hung Viet Pham; Lin Tan", + "authorids": "", + "aff": "Purdue University; The Ohio State University + Purdue University; York University + University of Waterloo; Purdue University", + "bibtex": "@article{Rosenthal_Enouen_Pham_Tan_2023, title={DisGUIDE: Disagreement-Guided Data-Free Model Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26150}, DOI={10.1609/aaai.v37i8.26150}, abstractNote={Recent model-extraction attacks on Machine Learning as a Service (MLaaS) systems have moved towards data-free approaches, showing the feasibility of stealing models trained with difficult-to-access data. However, these attacks are ineffective or limited due to the low accuracy of extracted models and the high number of queries to the models under attack. The high query cost makes such techniques infeasible for online MLaaS systems that charge per query.\nWe create a novel approach to get higher accuracy and query efficiency than prior data-free model extraction techniques. Specifically, we introduce a novel generator training scheme that maximizes the disagreement loss between two clone models that attempt to copy the model under attack. This loss, combined with diversity loss and experience replay, enables the generator to produce better instances to train the clone models. Our evaluation on popular datasets CIFAR-10 and CIFAR-100 shows that our approach improves the final model accuracy by up to 3.42% and 18.48% respectively. The average number of queries required to achieve the accuracy of the prior state of the art is reduced by up to 64.95%. We hope this will promote future work on feasible data-free model extraction and defenses against such attacks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rosenthal, Jonathan and Enouen, Eric and Pham, Hung Viet and Tan, Lin}, year={2023}, month={Jun.}, pages={9614-9622} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26150/25922", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26150", + "pdf_size": 314847, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12058109638298105495&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "purdue.edu;osu.edu;yorku.ca;purdue.edu", + "email": "purdue.edu;osu.edu;yorku.ca;purdue.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+0;2+3;0", + "aff_unique_norm": "Purdue University;The Ohio State University;York University;University of Waterloo", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.purdue.edu;https://www.osu.edu;https://www.yorku.ca;https://uwaterloo.ca", + "aff_unique_abbr": "Purdue;OSU;York U;UW", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;1+1;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-25946", + "title": "Discriminability and Transferability Estimation: A Bayesian Source Importance Estimation Approach for Multi-Source-Free Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Source free domain adaptation (SFDA) transfers a single-source model to the unlabeled target domain without accessing the source data. With the intelligence development of various fields, a zoo of source models is more commonly available, arising in a new setting called multi-source-free domain adaptation (MSFDA). We find that the critical inborn challenge of MSFDA is how to estimate the importance (contribution) of each source model. In this paper, we shed new Bayesian light on the fact that the posterior probability of source importance connects to discriminability and transferability. We propose Discriminability And Transferability Estimation (DATE), a universal solution for source importance estimation. Specifically, a proxy discriminability perception module equips with habitat uncertainty and density to evaluate each sample's surrounding environment. A source-similarity transferability perception module quantifies the data distribution similarity and encourages the transferability to be reasonably distributed with a domain diversity loss. Extensive experiments show that DATE can precisely and objectively estimate the source importance and outperform prior arts by non-trivial margins. Moreover, experiments demonstrate that DATE can take the most popular SFDA networks as backbones and make them become advanced MSFDA solutions.", + "primary_area": "machine learning i", + "author": "Zhongyi Han; Zhiyan Zhang; Fan Wang; Rundong He; Wan Su; Xiaoming Xi; Yilong Yin", + "authorids": "", + "aff": "School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Computer Science and Technology, Shandong Jianzhu University; School of Software, Shandong University", + "bibtex": "@article{Han_Zhang_Wang_He_Su_Xi_Yin_2023, title={Discriminability and Transferability Estimation: A Bayesian Source Importance Estimation Approach for Multi-Source-Free Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25946}, DOI={10.1609/aaai.v37i6.25946}, abstractNote={Source free domain adaptation (SFDA) transfers a single-source model to the unlabeled target domain without accessing the source data. With the intelligence development of various fields, a zoo of source models is more commonly available, arising in a new setting called multi-source-free domain adaptation (MSFDA). We find that the critical inborn challenge of MSFDA is how to estimate the importance (contribution) of each source model. In this paper, we shed new Bayesian light on the fact that the posterior probability of source importance connects to discriminability and transferability. We propose Discriminability And Transferability Estimation (DATE), a universal solution for source importance estimation. Specifically, a proxy discriminability perception module equips with habitat uncertainty and density to evaluate each sample\u2019s surrounding environment. A source-similarity transferability perception module quantifies the data distribution similarity and encourages the transferability to be reasonably distributed with a domain diversity loss. Extensive experiments show that DATE can precisely and objectively estimate the source importance and outperform prior arts by non-trivial margins. Moreover, experiments demonstrate that DATE can take the most popular SFDA networks as backbones and make them become advanced MSFDA solutions.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Zhongyi and Zhang, Zhiyan and Wang, Fan and He, Rundong and Su, Wan and Xi, Xiaoming and Yin, Yilong}, year={2023}, month={Jun.}, pages={7811-7820} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25946/25718", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25946", + "pdf_size": 1252052, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=87311266190400031&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; ; ; ; ; ;sdu.edu.cn", + "email": "gmail.com; ; ; ; ; ;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;0", + "aff_unique_norm": "Shandong University;Shandong Jianzhu University", + "aff_unique_dep": "School of Software;School of Computer Science and Technology", + "aff_unique_url": "http://www.sdu.edu.cn;http://www.sdjzu.edu.cn", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25216", + "title": "Disentangle and Remerge: Interventional Knowledge Distillation for Few-Shot Object Detection from a Conditional Causal Perspective", + "track": "main", + "status": "Technical", + "abstract": "Few-shot learning models learn representations with limited human annotations, and such a learning paradigm demonstrates practicability in various tasks, e.g., image classification, object detection, etc. However, few-shot object detection methods suffer from an intrinsic defect that the limited training data makes the model cannot sufficiently explore semantic information. To tackle this, we introduce knowledge distillation to the few-shot object detection learning paradigm. We further run a motivating experiment, which demonstrates that in the process of knowledge distillation, the empirical error of the teacher model degenerates the prediction performance of the few-shot object detection model as the student. To understand the reasons behind this phenomenon, we revisit the learning paradigm of knowledge distillation on the few-shot object detection task from the causal theoretic standpoint, and accordingly, develop a Structural Causal Model. Following the theoretical guidance, we propose a backdoor adjustment-based knowledge distillation method for the few-shot object detection task, namely Disentangle and Remerge (D&R), to perform conditional causal intervention toward the corresponding Structural Causal Model. Empirically, the experiments on benchmarks demonstrate that D&R can yield significant performance boosts in few-shot object detection. Code is available at https://github.com/ZYN-1101/DandR.git.", + "primary_area": "computer vision i", + "author": "Jiangmeng Li; Yanan Zhang; Wenwen Qiang; Lingyu Si; Chengbo Jiao; Xiaohui Hu; Changwen Zheng; Fuchun Sun", + "authorids": "", + "aff": "University of Chinese Academy of Sciences+Institute of Software Chinese Academy of Sciences; University of Chinese Academy of Sciences+Institute of Software Chinese Academy of Sciences; University of Chinese Academy of Sciences+Institute of Software Chinese Academy of Sciences; University of Chinese Academy of Sciences+Institute of Software Chinese Academy of Sciences; University of Electronic Science and Technology of China; Institute of Software Chinese Academy of Sciences; Institute of Software Chinese Academy of Sciences; Tsinghua University", + "bibtex": "@article{Li_Zhang_Qiang_Si_Jiao_Hu_Zheng_Sun_2023, title={Disentangle and Remerge: Interventional Knowledge Distillation for Few-Shot Object Detection from a Conditional Causal Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25216}, DOI={10.1609/aaai.v37i1.25216}, abstractNote={Few-shot learning models learn representations with limited human annotations, and such a learning paradigm demonstrates practicability in various tasks, e.g., image classification, object detection, etc. However, few-shot object detection methods suffer from an intrinsic defect that the limited training data makes the model cannot sufficiently explore semantic information. To tackle this, we introduce knowledge distillation to the few-shot object detection learning paradigm. We further run a motivating experiment, which demonstrates that in the process of knowledge distillation, the empirical error of the teacher model degenerates the prediction performance of the few-shot object detection model as the student. To understand the reasons behind this phenomenon, we revisit the learning paradigm of knowledge distillation on the few-shot object detection task from the causal theoretic standpoint, and accordingly, develop a Structural Causal Model. Following the theoretical guidance, we propose a backdoor adjustment-based knowledge distillation method for the few-shot object detection task, namely Disentangle and Remerge (D&R), to perform conditional causal intervention toward the corresponding Structural Causal Model. Empirically, the experiments on benchmarks demonstrate that D&R can yield significant performance boosts in few-shot object detection. Code is available at https://github.com/ZYN-1101/DandR.git.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jiangmeng and Zhang, Yanan and Qiang, Wenwen and Si, Lingyu and Jiao, Chengbo and Hu, Xiaohui and Zheng, Changwen and Sun, Fuchun}, year={2023}, month={Jun.}, pages={1323-1333} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25216/24988", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25216", + "pdf_size": 601544, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15547678845945567643&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;hotmail.com;mail.tsinghua.edu.cn", + "email": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;hotmail.com;mail.tsinghua.edu.cn", + "github": "https://github.com/ZYN-1101/DandR.git", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;1;1;3", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;University of Electronic Science and Technology of China;Tsinghua University", + "aff_unique_dep": ";Institute of Software;;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ios.ac.cn;https://www.uestc.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "UCAS;CAS;UESTC;THU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26604", + "title": "Disentangled CVAEs with Contrastive Learning for Explainable Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Modern recommender systems are increasingly expected to provide informative explanations that enable users to understand the reason for particular recommendations. However, previous methods struggle to interpret the input IDs of user--item pairs in real-world datasets, failing to extract adequate characteristics for controllable generation. To address this issue, we propose disentangled conditional variational autoencoders (CVAEs) for explainable recommendation, which leverage disentangled latent preference factors and guide the explanation generation with the refined condition of CVAEs via a self-regularization contrastive learning loss. Extensive experiments demonstrate that our method generates high-quality explanations and achieves new state-of-the-art results in diverse domains.", + "primary_area": "speech natural language processing", + "author": "Linlin Wang; Zefeng Cai; Gerard de Melo; Zhu Cao; Liang He", + "authorids": "", + "aff": "East China Normal University; East China Normal University; Hasso Plattner Institute, University of Potsdam; East China University of Science and Technology; East China Normal University", + "bibtex": "@article{Wang_Cai_de Melo_Cao_He_2023, title={Disentangled CVAEs with Contrastive Learning for Explainable Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26604}, DOI={10.1609/aaai.v37i11.26604}, abstractNote={Modern recommender systems are increasingly expected to provide informative explanations that enable users to understand the reason for particular recommendations. However, previous methods struggle to interpret the input IDs of user--item pairs in real-world datasets, failing to extract adequate characteristics for controllable generation. To address this issue, we propose disentangled conditional variational autoencoders (CVAEs) for explainable recommendation, which leverage disentangled latent preference factors and guide the explanation generation with the refined condition of CVAEs via a self-regularization contrastive learning loss. Extensive experiments demonstrate that our method generates high-quality explanations and achieves new state-of-the-art results in diverse domains.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Linlin and Cai, Zefeng and de Melo, Gerard and Cao, Zhu and He, Liang}, year={2023}, month={Jun.}, pages={13691-13699} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26604/26376", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26604", + "pdf_size": 2116249, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12120564013989470421&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "cs.ecnu.edu.cn;cs.ecnu.edu.cn;foxmail.com;demelo.org;gmail.com", + "email": "cs.ecnu.edu.cn;cs.ecnu.edu.cn;foxmail.com;demelo.org;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "East China Normal University;Hasso Plattner Institute;East China University of Science and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.hpi.de;http://www.ecust.edu.cn", + "aff_unique_abbr": "ECNU;HPI;ECUST", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Potsdam", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;Germany" + }, + { + "id": "article-26266", + "title": "Disentangled Representation for Causal Mediation Analysis", + "track": "main", + "status": "Technical", + "abstract": "Estimating direct and indirect causal effects from observational data is crucial to understanding the causal mechanisms and predicting the behaviour under different interventions. Causal mediation analysis is a method that is often used to reveal direct and indirect effects. Deep learning shows promise in mediation analysis, but the current methods only assume latent confounders that affect treatment, mediator and outcome simultaneously, and fail to identify different types of latent confounders (e.g., confounders that only affect the mediator or outcome). Furthermore, current methods are based on the sequential ignorability assumption, which is not feasible for dealing with multiple types of latent confounders. This work aims to circumvent the sequential ignorability assumption and applies the piecemeal deconfounding assumption as an alternative. We propose the Disentangled Mediation Analysis Variational AutoEncoder (DMAVAE), which disentangles the representations of latent confounders into three types to accurately estimate the natural direct effect, natural indirect effect and total effect. Experimental results show that the proposed method outperforms existing methods and has strong generalisation ability. We further apply the method to a real-world dataset to show its potential application.", + "primary_area": "machine learning iv", + "author": "Ziqi Xu; Debo Cheng; Jiuyong Li; Jixue Liu; Lin Liu; Ke Wang", + "authorids": "", + "aff": "University of South Australia; University of South Australia+Guangxi Normal University; University of South Australia; University of South Australia; University of South Australia; Simon Fraser University", + "bibtex": "@article{Xu_Cheng_Li_Liu_Liu_Wang_2023, title={Disentangled Representation for Causal Mediation Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26266}, DOI={10.1609/aaai.v37i9.26266}, abstractNote={Estimating direct and indirect causal effects from observational data is crucial to understanding the causal mechanisms and predicting the behaviour under different interventions. Causal mediation analysis is a method that is often used to reveal direct and indirect effects. Deep learning shows promise in mediation analysis, but the current methods only assume latent confounders that affect treatment, mediator and outcome simultaneously, and fail to identify different types of latent confounders (e.g., confounders that only affect the mediator or outcome). Furthermore, current methods are based on the sequential ignorability assumption, which is not feasible for dealing with multiple types of latent confounders. This work aims to circumvent the sequential ignorability assumption and applies the piecemeal deconfounding assumption as an alternative. We propose the Disentangled Mediation Analysis Variational AutoEncoder (DMAVAE), which disentangles the representations of latent confounders into three types to accurately estimate the natural direct effect, natural indirect effect and total effect. Experimental results show that the proposed method outperforms existing methods and has strong generalisation ability. We further apply the method to a real-world dataset to show its potential application.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Ziqi and Cheng, Debo and Li, Jiuyong and Liu, Jixue and Liu, Lin and Wang, Ke}, year={2023}, month={Jun.}, pages={10666-10674} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26266/26038", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26266", + "pdf_size": 694113, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10463313848638655418&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mymail.unisa.edu.au;gmail.com;unisa.edu.au;unisa.edu.au;unisa.edu.au;cs.sfu.ca", + "email": "mymail.unisa.edu.au;gmail.com;unisa.edu.au;unisa.edu.au;unisa.edu.au;cs.sfu.ca", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;2", + "aff_unique_norm": "University of South Australia;Guangxi Normal University;Simon Fraser University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unisa.edu.au;http://www.gxnu.edu.cn;https://www.sfu.ca", + "aff_unique_abbr": "UNISA;;SFU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0;0;0;2", + "aff_country_unique": "Australia;China;Canada" + }, + { + "id": "article-25084", + "title": "Disentangling Reafferent Effects by Doing Nothing", + "track": "main", + "status": "Technical", + "abstract": "An agent's ability to distinguish between sensory effects that are self-caused, and those that are not, is instrumental in the achievement of its goals. This ability is thought to be central to a variety of functions in biological organisms, from perceptual stabilisation and accurate motor control, to higher level cognitive functions such as planning, mirroring and the sense of agency. Although many of these functions are well studied in AI, this important distinction is rarely made explicit and the focus tends to be on the associational relationship between action and sensory effect or success. Toward the development of more general agents, we develop a framework that enables agents to disentangle self-caused and externally-caused sensory effects. Informed by relevant models and experiments in robotics, and in the biological and cognitive sciences, we demonstrate the general applicability of this framework through an extensive experimental evaluation over three different environments.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Benedict Wilkins; Kostas Stathis", + "authorids": "", + "aff": "Department of Computer Science, Royal Holloway University of London; Department of Computer Science, Royal Holloway University of London", + "bibtex": "@article{Wilkins_Stathis_2023, title={Disentangling Reafferent Effects by Doing Nothing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25084}, DOI={10.1609/aaai.v37i1.25084}, abstractNote={An agent\u2019s ability to distinguish between sensory effects that are self-caused, and those that are not, is instrumental in the achievement of its goals. This ability is thought to be central to a variety of functions in biological organisms, from perceptual stabilisation and accurate motor control, to higher level cognitive functions such as planning, mirroring and the sense of agency. Although many of these functions are well studied in AI, this important distinction is rarely made explicit and the focus tends to be on the associational relationship between action and sensory effect or success. Toward the development of more general agents, we develop a framework that enables agents to disentangle self-caused and externally-caused sensory effects. Informed by relevant models and experiments in robotics, and in the biological and cognitive sciences, we demonstrate the general applicability of this framework through an extensive experimental evaluation over three different environments.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wilkins, Benedict and Stathis, Kostas}, year={2023}, month={Jun.}, pages={128-136} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25084/24856", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25084", + "pdf_size": 781212, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12368083530308216417&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;rhul.ac.uk", + "email": "gmail.com;rhul.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Royal Holloway University of London", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.royalholloway.ac.uk", + "aff_unique_abbr": "RHUL", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26959", + "title": "Disentangling the Benefits of Self-Supervised Learning to Deployment-Driven Downstream Tasks of Satellite Images (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this paper, we investigate the benefits of self-supervised learning (SSL) to downstream tasks of satellite images. Unlike common student academic projects, this work focuses on the advantages of the SSL for deployment-driven tasks which have specific scenarios with low or high-spatial resolution images. Our preliminary experiments demonstrate the robust benefits of the SSL trained by medium-resolution (10m) images to both low-resolution (100m) scene classification case (4.25%\u2191) and very high-resolution (5cm) aerial image segmentation case (1.96%\u2191), respectively.", + "primary_area": "", + "author": "Zhuo Deng; Yibing Wei; Mingye Zhu; Xueliang Wang; Junchi Zhou; Zhicheng Yang; Hang Zhou; Zhenjie Cao; Lan Ma; Mei Han; Jui-Hsin Lai", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Shenzhen, Guangdong, China+Ping An Technology, Shenzhen, Guangdong, China; University of Wisconsin-Madison, Madison, WI, USA+PAII Inc., Palo Alto, CA, USA; University of Science and Technology of China, Hefei, Anhui, China+Ping An Technology, Shenzhen, Guangdong, China; Tsinghua Shenzhen International Graduate School, Shenzhen, Guangdong, China; Tsinghua Shenzhen International Graduate School, Shenzhen, Guangdong, China; PAII Inc., Palo Alto, CA, USA; PAII Inc., Palo Alto, CA, USA; Ping An Technology, Shenzhen, Guangdong, China+Tsinghua Shenzhen International Graduate School, Shenzhen, Guangdong, China; Tsinghua Shenzhen International Graduate School, Shenzhen, Guangdong, China; PAII Inc., Palo Alto, CA, USA; PAII Inc., Palo Alto, CA, USA", + "bibtex": "@article{Deng_Wei_Zhu_Wang_Zhou_Yang_Zhou_Cao_Ma_Han_Lai_2024, title={Disentangling the Benefits of Self-Supervised Learning to Deployment-Driven Downstream Tasks of Satellite Images (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26959}, DOI={10.1609/aaai.v37i13.26959}, abstractNote={In this paper, we investigate the benefits of self-supervised learning (SSL) to downstream tasks of satellite images. Unlike common student academic projects, this work focuses on the advantages of the SSL for deployment-driven tasks which have specific scenarios with low or high-spatial resolution images. Our preliminary experiments demonstrate the robust benefits of the SSL trained by medium-resolution (10m) images to both low-resolution (100m) scene classification case (4.25%\u2191) and very high-resolution (5cm) aerial image segmentation case (1.96%\u2191), respectively.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Zhuo and Wei, Yibing and Zhu, Mingye and Wang, Xueliang and Zhou, Junchi and Yang, Zhicheng and Zhou, Hang and Cao, Zhenjie and Ma, Lan and Han, Mei and Lai, Jui-Hsin}, year={2024}, month={Jul.}, pages={16198-16199} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26959/26731", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26959", + "pdf_size": 62116, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:y2T9L_iyopsJ:scholar.google.com/&scioq=Disentangling+the+Benefits+of+Self-Supervised+Learning+to+Deployment-Driven+Downstream+Tasks+of+Satellite+Images+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com; ; ; ; ; ; ; ; ;gmail.com", + "email": "mails.tsinghua.edu.cn;gmail.com; ; ; ; ; ; ; ; ;gmail.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0+1;2+3;4+1;0;0;3;3;1+0;0;3;3", + "aff_unique_norm": "Tsinghua University;Ping An Technology;University of Wisconsin-Madison;PAII Inc.;University of Science and Technology of China", + "aff_unique_dep": "International Graduate School;;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.pingan.com;https://www.wisc.edu;;http://www.ustc.edu.cn", + "aff_unique_abbr": "THU;Ping An;UW-Madison;;USTC", + "aff_campus_unique_index": "0+0;1+2;3+0;0;0;2;2;0+0;0;2;2", + "aff_campus_unique": "Shenzhen;Madison;Palo Alto;Hefei", + "aff_country_unique_index": "0+0;1+1;0+0;0;0;1;1;0+0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25914", + "title": "Dish-TS: A General Paradigm for Alleviating Distribution Shift in Time Series Forecasting", + "track": "main", + "status": "Technical", + "abstract": "The distribution shift in Time Series Forecasting (TSF), indicating series distribution changes over time, largely hinders the performance of TSF models. Existing works towards distribution shift in time series are mostly limited in the quantification of distribution and, more importantly, overlook the potential shift between lookback and horizon windows. To address above challenges, we systematically summarize the distribution shift in TSF into two categories. Regarding lookback windows as input-space and horizon windows as output-space, there exist (i) intra-space shift, that the distribution within the input-space keeps shifted over time, and (ii) inter-space shift, that the distribution is shifted between input-space and output-space. Then we introduce, Dish-TS, a general neural paradigm for alleviating distribution shift in TSF. Specifically, for better distribution estimation, we propose the coefficient net (Conet), which can be any neural architectures, to map input sequences into learnable distribution coefficients. To relieve intra-space and inter-space shift, we organize Dish-TS as a Dual-Conet framework to separately learn the distribution of input- and output-space, which naturally captures the distribution difference of two spaces. In addition, we introduce a more effective training strategy for intractable Conet learning. Finally, we conduct extensive experiments on several datasets coupled with different state-of-the-art forecasting models. Experimental results show Dish-TS consistently boosts them with a more than 20% average improvement. Code is available at https://github.com/weifantt/Dish-TS.", + "primary_area": "machine learning i", + "author": "Wei Fan; Pengyang Wang; Dongkun Wang; Dongjie Wang; Yuanchun Zhou; Yanjie Fu", + "authorids": "", + "aff": "Department of Computer Science, University of Central Florida; State Key Laboratory of Internet of Things for Smart City, University of Macau; State Key Laboratory of Internet of Things for Smart City, University of Macau; Department of Computer Science, University of Central Florida; Computer Network Information Center, Chinese Academy of Sciences; Department of Computer Science, University of Central Florida", + "bibtex": "@article{Fan_Wang_Wang_Wang_Zhou_Fu_2023, title={Dish-TS: A General Paradigm for Alleviating Distribution Shift in Time Series Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25914}, DOI={10.1609/aaai.v37i6.25914}, abstractNote={The distribution shift in Time Series Forecasting (TSF), indicating series distribution changes over time, largely hinders the performance of TSF models. Existing works towards distribution shift in time series are mostly limited in the quantification of distribution and, more importantly, overlook the potential shift between lookback and horizon windows. To address above challenges, we systematically summarize the distribution shift in TSF into two categories. Regarding lookback windows as input-space and horizon windows as output-space, there exist (i) intra-space shift, that the distribution within the input-space keeps shifted over time, and (ii) inter-space shift, that the distribution is shifted between input-space and output-space. Then we introduce, Dish-TS, a general neural paradigm for alleviating distribution shift in TSF. Specifically, for better distribution estimation, we propose the coefficient net (Conet), which can be any neural architectures, to map input sequences into learnable distribution coefficients. To relieve intra-space and inter-space shift, we organize Dish-TS as a Dual-Conet framework to separately learn the distribution of input- and output-space, which naturally captures the distribution difference of two spaces. In addition, we introduce a more effective training strategy for intractable Conet learning. Finally, we conduct extensive experiments on several datasets coupled with different state-of-the-art forecasting models. Experimental results show Dish-TS consistently boosts them with a more than 20% average improvement. Code is available at https://github.com/weifantt/Dish-TS.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fan, Wei and Wang, Pengyang and Wang, Dongkun and Wang, Dongjie and Zhou, Yuanchun and Fu, Yanjie}, year={2023}, month={Jun.}, pages={7522-7529} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25914/25686", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25914", + "pdf_size": 707323, + "gs_citation": 94, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15200120863915401155&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "knights.ucf.edu;knights.ucf.edu;um.edu.mo;um.edu.mo;cnic.cn;ucf.edu", + "email": "knights.ucf.edu;knights.ucf.edu;um.edu.mo;um.edu.mo;cnic.cn;ucf.edu", + "github": "https://github.com/weifantt/Dish-TS", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;0;2;0", + "aff_unique_norm": "University of Central Florida;University of Macau;Chinese Academy of Sciences", + "aff_unique_dep": "Department of Computer Science;State Key Laboratory of Internet of Things for Smart City;Computer Network Information Center", + "aff_unique_url": "https://www.ucf.edu;https://www.um.edu.mo;http://www.cas.cn", + "aff_unique_abbr": "UCF;UM;CAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;2;0", + "aff_country_unique": "United States;Macau;China" + }, + { + "id": "article-26583", + "title": "Distantly-Supervised Named Entity Recognition with Adaptive Teacher Learning and Fine-Grained Student Ensemble", + "track": "main", + "status": "Technical", + "abstract": "Distantly-Supervised Named Entity Recognition (DS-NER) effectively alleviates the data scarcity problem in NER by automatically generating training samples. Unfortunately, the distant supervision may induce noisy labels, thus undermining the robustness of the learned models and restricting the practical application. \nTo relieve this problem, recent works adopt self-training teacher-student frameworks to gradually refine the training labels and improve the generalization ability of NER models. However, we argue that the performance of the current self-training frameworks for DS-NER is severely underestimated by their plain designs, including both inadequate student learning and coarse-grained teacher updating. Therefore, in this paper, we make the first attempt to alleviate these issues by proposing: \n(1) adaptive teacher learning comprised of joint training of two teacher-student networks and considering both consistent and inconsistent predictions between two teachers, thus promoting comprehensive student learning. \n(2) fine-grained student ensemble that updates each fragment of the teacher model with a temporal moving average of the corresponding fragment of the student, which enhances consistent predictions on each model fragment against noise. \nTo verify the effectiveness of our proposed method, we conduct experiments on four DS-NER datasets. The experimental results demonstrate that our method significantly surpasses previous SOTA methods. The code is available at https://github.com/zenhjunpro/ATSEN.", + "primary_area": "speech natural language processing", + "author": "Xiaoye Qu; Jun Zeng; Daizong Liu; Zhefeng Wang; Baoxing Huai; Pan Zhou", + "authorids": "", + "aff": "Huawei Cloud; School of Software Engineering, Huazhong University of Science and Technology; Peking University; Huawei Cloud + Hubei Key Laboratory of Distributed System Security, Hubei Engineering Research Center on Big Data Security, School of Cyber Science and Engineering, Huazhong University of Science and Technology; Huawei Cloud; Hubei Key Laboratory of Distributed System Security, Hubei Engineering Research Center on Big Data Security, School of Cyber Science and Engineering, Huazhong University of Science and Technology", + "bibtex": "@article{Qu_Zeng_Liu_Wang_Huai_Zhou_2023, title={Distantly-Supervised Named Entity Recognition with Adaptive Teacher Learning and Fine-Grained Student Ensemble}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26583}, DOI={10.1609/aaai.v37i11.26583}, abstractNote={Distantly-Supervised Named Entity Recognition (DS-NER) effectively alleviates the data scarcity problem in NER by automatically generating training samples. Unfortunately, the distant supervision may induce noisy labels, thus undermining the robustness of the learned models and restricting the practical application. To relieve this problem, recent works adopt self-training teacher-student frameworks to gradually refine the training labels and improve the generalization ability of NER models. However, we argue that the performance of the current self-training frameworks for DS-NER is severely underestimated by their plain designs, including both inadequate student learning and coarse-grained teacher updating. Therefore, in this paper, we make the first attempt to alleviate these issues by proposing: (1) adaptive teacher learning comprised of joint training of two teacher-student networks and considering both consistent and inconsistent predictions between two teachers, thus promoting comprehensive student learning. (2) fine-grained student ensemble that updates each fragment of the teacher model with a temporal moving average of the corresponding fragment of the student, which enhances consistent predictions on each model fragment against noise. To verify the effectiveness of our proposed method, we conduct experiments on four DS-NER datasets. The experimental results demonstrate that our method significantly surpasses previous SOTA methods. The code is available at https://github.com/zenhjunpro/ATSEN.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qu, Xiaoye and Zeng, Jun and Liu, Daizong and Wang, Zhefeng and Huai, Baoxing and Zhou, Pan}, year={2023}, month={Jun.}, pages={13501-13509} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26583/26355", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26583", + "pdf_size": 310202, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11064537848233491596&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "huawei.com;hust.edu.cn;stu.pku.edu.cn;huawei.com;huawei.com;hust.edu.cn", + "email": "huawei.com;hust.edu.cn;stu.pku.edu.cn;huawei.com;huawei.com;hust.edu.cn", + "github": "https://github.com/zenhjunpro/ATSEN", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0+1;0;1", + "aff_unique_norm": "Huawei;Huazhong University of Science and Technology;Peking University", + "aff_unique_dep": "Huawei Cloud;School of Software Engineering;", + "aff_unique_url": "https://www.huaweicloud.com;http://www.hust.edu.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "Huawei Cloud;HUST;Peking U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26218", + "title": "Distributed Projection-Free Online Learning for Smooth and Convex Losses", + "track": "main", + "status": "Technical", + "abstract": "We investigate the problem of distributed online convex optimization with complicated constraints, in which the projection operation could be the computational bottleneck. To avoid projections, distributed online projection-free methods have been proposed and attain an O(T^{3/4}) regret bound for general convex losses. However, they cannot utilize the smoothness condition, which has been exploited in the centralized setting to improve the regret. In this paper, we propose a new distributed online projection-free method with a tighter regret bound of O(T^{2/3}) for smooth and convex losses. Specifically, we first provide a distributed extension of Follow-the-Perturbed-Leader so that the smoothness can be utilized in the distributed setting. Then, we reduce the computational cost via sampling and blocking techniques. In this way, our method only needs to solve one linear optimization per round on average. Finally, we conduct experiments on benchmark datasets to verify the effectiveness of our proposed method.", + "primary_area": "machine learning iii", + "author": "Yibo Wang; Yuanyu Wan; Shimao Zhang; Lijun Zhang", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + Peng Cheng Laboratory, Shenzhen 518055, China; School of Software Technology, Zhejiang University, Ningbo 315048, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + Peng Cheng Laboratory, Shenzhen 518055, China", + "bibtex": "@article{Wang_Wan_Zhang_Zhang_2023, title={Distributed Projection-Free Online Learning for Smooth and Convex Losses}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26218}, DOI={10.1609/aaai.v37i8.26218}, abstractNote={We investigate the problem of distributed online convex optimization with complicated constraints, in which the projection operation could be the computational bottleneck. To avoid projections, distributed online projection-free methods have been proposed and attain an O(T^{3/4}) regret bound for general convex losses. However, they cannot utilize the smoothness condition, which has been exploited in the centralized setting to improve the regret. In this paper, we propose a new distributed online projection-free method with a tighter regret bound of O(T^{2/3}) for smooth and convex losses. Specifically, we first provide a distributed extension of Follow-the-Perturbed-Leader so that the smoothness can be utilized in the distributed setting. Then, we reduce the computational cost via sampling and blocking techniques. In this way, our method only needs to solve one linear optimization per round on average. Finally, we conduct experiments on benchmark datasets to verify the effectiveness of our proposed method.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yibo and Wan, Yuanyu and Zhang, Shimao and Zhang, Lijun}, year={2023}, month={Jun.}, pages={10226-10234} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26218/25990", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26218", + "pdf_size": 538414, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16397938836138830651&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "lamda.nju.edu.cn;zju.edu.cn;smail.nju.edu.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;zju.edu.cn;smail.nju.edu.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;0+1", + "aff_unique_norm": "Nanjing University;Peng Cheng Laboratory;Zhejiang University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;;School of Software Technology", + "aff_unique_url": "http://www.nju.edu.cn;;http://www.zju.edu.cn", + "aff_unique_abbr": "Nanjing U;;ZJU", + "aff_campus_unique_index": "0+1;2;0;0+1", + "aff_campus_unique": "Nanjing;Shenzhen;Ningbo", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25798", + "title": "Distributed Spectrum-Based Fault Localization", + "track": "main", + "status": "Technical", + "abstract": "Spectrum-Based Fault Localization (SFL) is a popular approach for diagnosing faulty systems. SFL algorithms are inherently centralized, where observations are collected and analyzed by a single diagnoser. Applying SFL to diagnose distributed systems is challenging, especially when communication is costly and there are privacy concerns. We propose two SFL-based algorithms that are designed for distributed systems: one for diagnosing a single faulty component and one for diagnosing multiple faults. We analyze these algorithms theoretically and empirically. Our analysis shows that the distributed SFL algorithms we developed output identical diagnoses to centralized SFL while preserving privacy.", + "primary_area": "knowledge representation and reasoning", + "author": "Avraham Natan; Roni Stern; Meir Kalech", + "authorids": "", + "aff": "Ben-Gurion University of the Negev, Israel; Ben-Gurion University of the Negev, Israel; Ben-Gurion University of the Negev, Israel", + "bibtex": "@article{Natan_Stern_Kalech_2023, title={Distributed Spectrum-Based Fault Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25798}, DOI={10.1609/aaai.v37i5.25798}, abstractNote={Spectrum-Based Fault Localization (SFL) is a popular approach for diagnosing faulty systems. SFL algorithms are inherently centralized, where observations are collected and analyzed by a single diagnoser. Applying SFL to diagnose distributed systems is challenging, especially when communication is costly and there are privacy concerns. We propose two SFL-based algorithms that are designed for distributed systems: one for diagnosing a single faulty component and one for diagnosing multiple faults. We analyze these algorithms theoretically and empirically. Our analysis shows that the distributed SFL algorithms we developed output identical diagnoses to centralized SFL while preserving privacy.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Natan, Avraham and Stern, Roni and Kalech, Meir}, year={2023}, month={Jun.}, pages={6491-6498} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25798/25570", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25798", + "pdf_size": 154491, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=285690691661075340&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com;bgu.ac.il", + "email": "gmail.com;gmail.com;bgu.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Ben-Gurion University of the Negev", + "aff_unique_dep": "", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26804", + "title": "Distributed Stochastic Nested Optimization for Emerging Machine Learning Models: Algorithm and Theory", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Traditional machine learning models can be formulated as the expected risk minimization (ERM) problem:\nminw\u2208Rd E\u03be [l(w; \u03be)], where w \u2208 Rd denotes the model parameter, \u03be represents training samples, l(\u00b7) is the loss function. Numerous optimization algorithms, such as stochastic gradient descent (SGD), have been developed to solve the ERM problem. However, a wide range of emerging machine learning models are beyond this class of optimization problems, such as model-agnostic meta-learning (Finn, Abbeel, and Levine 2017). Of particular interest of my research is the stochastic nested optimization (SNO) problem, whose objective function has a nested structure. Specifically, I have been focusing on two instances of this kind of problem: stochastic compositional optimization (SCO) problems, which cover meta-learning, area-under-the-precision recall-curve optimization, contrastive self-supervised learning, etc., and stochastic bilevel optimization (SBO) problems, which can be applied to meta-learning, hyperparameter optimization, neural network architecture search, etc.\nWith the emergence of large-scale distributed data, such as the user data generated on mobile devices or intelligent hardware, it is imperative to develop distributed optimization algorithms for SNO (Distributed SNO). A significant challenge for optimizing distributed SNO problems lies in that the stochastic (hyper-)gradient is a biased estimation of the full gradient. Thus, existing distributed optimization algorithms when applied to them suffer from slow convergence rates. In this talk, I will discuss my recent works about distributed SCO (Gao and Huang 2021; Gao, Li, and Huang 2022) and distributed SBO (Gao, Gu, and Thai 2022; Gao 2022) under both centralized and decentralized settings, including algorithmic details about reducing the bias of stochastic gradient, theoretical convergence rate, and practical machine learning applications, and then highlight challenges for future research.", + "primary_area": "", + "author": "Hongchang Gao", + "authorids": "", + "aff": "Department of Computer and Information Sciences, Temple University, PA, USA", + "bibtex": "@article{Gao_2024, title={Distributed Stochastic Nested Optimization for Emerging Machine Learning Models: Algorithm and Theory}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26804}, DOI={10.1609/aaai.v37i13.26804}, abstractNote={Traditional machine learning models can be formulated as the expected risk minimization (ERM) problem:\nminw\u2208Rd E\u03be [l(w; \u03be)], where w \u2208 Rd denotes the model parameter, \u03be represents training samples, l(\u00b7) is the loss function. Numerous optimization algorithms, such as stochastic gradient descent (SGD), have been developed to solve the ERM problem. However, a wide range of emerging machine learning models are beyond this class of optimization problems, such as model-agnostic meta-learning (Finn, Abbeel, and Levine 2017). Of particular interest of my research is the stochastic nested optimization (SNO) problem, whose objective function has a nested structure. Specifically, I have been focusing on two instances of this kind of problem: stochastic compositional optimization (SCO) problems, which cover meta-learning, area-under-the-precision recall-curve optimization, contrastive self-supervised learning, etc., and stochastic bilevel optimization (SBO) problems, which can be applied to meta-learning, hyperparameter optimization, neural network architecture search, etc.\nWith the emergence of large-scale distributed data, such as the user data generated on mobile devices or intelligent hardware, it is imperative to develop distributed optimization algorithms for SNO (Distributed SNO). A significant challenge for optimizing distributed SNO problems lies in that the stochastic (hyper-)gradient is a biased estimation of the full gradient. Thus, existing distributed optimization algorithms when applied to them suffer from slow convergence rates. In this talk, I will discuss my recent works about distributed SCO (Gao and Huang 2021; Gao, Li, and Huang 2022) and distributed SBO (Gao, Gu, and Thai 2022; Gao 2022) under both centralized and decentralized settings, including algorithmic details about reducing the bias of stochastic gradient, theoretical convergence rate, and practical machine learning applications, and then highlight challenges for future research.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Hongchang}, year={2024}, month={Jul.}, pages={15437-15437} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26804/26576", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26804", + "pdf_size": 396293, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7183361682371046357&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "temple.edu", + "email": "temple.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Temple University", + "aff_unique_dep": "Department of Computer and Information Sciences", + "aff_unique_url": "https://www.temple.edu", + "aff_unique_abbr": "Temple", + "aff_campus_unique_index": "0", + "aff_campus_unique": "PA", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26394", + "title": "Distributionally Robust Optimization with Probabilistic Group", + "track": "main", + "status": "Technical", + "abstract": "Modern machine learning models may be susceptible to learning spurious correlations that hold on average but not for the atypical group of samples. To address the problem, previous approaches minimize the empirical worst-group risk. Despite the promise, they often assume that each sample belongs to one and only one group, which does not allow expressing the uncertainty in group labeling. In this paper, we propose a novel framework PG-DRO, which explores the idea of probabilistic group membership for distributionally robust optimization. Key to our framework, we consider soft group membership instead of hard group annotations. The group probabilities can be flexibly generated using either supervised learning or zero-shot approaches. Our framework accommodates samples with group membership ambiguity, offering stronger flexibility and generality than the prior art. We comprehensively evaluate PG-DRO on both image classification and natural language processing benchmarks, establishing superior performance.", + "primary_area": "philosophy and ethics of ai", + "author": "Soumya Suvra Ghosal; Yixuan Li", + "authorids": "", + "aff": "Department of Computer Sciences, University of Wisconsin \u2013 Madison; Department of Computer Sciences, University of Wisconsin \u2013 Madison", + "bibtex": "@article{Ghosal_Li_2023, title={Distributionally Robust Optimization with Probabilistic Group}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26394}, DOI={10.1609/aaai.v37i10.26394}, abstractNote={Modern machine learning models may be susceptible to learning spurious correlations that hold on average but not for the atypical group of samples. To address the problem, previous approaches minimize the empirical worst-group risk. Despite the promise, they often assume that each sample belongs to one and only one group, which does not allow expressing the uncertainty in group labeling. In this paper, we propose a novel framework PG-DRO, which explores the idea of probabilistic group membership for distributionally robust optimization. Key to our framework, we consider soft group membership instead of hard group annotations. The group probabilities can be flexibly generated using either supervised learning or zero-shot approaches. Our framework accommodates samples with group membership ambiguity, offering stronger flexibility and generality than the prior art. We comprehensively evaluate PG-DRO on both image classification and natural language processing benchmarks, establishing superior performance.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosal, Soumya Suvra and Li, Yixuan}, year={2023}, month={Jun.}, pages={11809-11817} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26394/26166", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26394", + "pdf_size": 5503093, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4560529994864647605&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.wisc.edu;cs.wisc.edu", + "email": "cs.wisc.edu;cs.wisc.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Wisconsin\u2013Madison", + "aff_unique_dep": "Department of Computer Sciences", + "aff_unique_url": "https://www.wisc.edu", + "aff_unique_abbr": "UW\u2013Madison", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Madison", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25323", + "title": "Diversified and Realistic 3D Augmentation via Iterative Construction, Random Placement, and HPR Occlusion", + "track": "main", + "status": "Technical", + "abstract": "In autonomous driving, data augmentation is commonly used for improving 3D object detection. The most basic methods include insertion of copied objects and rotation and scaling of the entire training frame. Numerous variants have been developed as well. The existing methods, however, are considerably limited when compared to the variety of the real world possibilities. In this work, we develop a diversified and realistic augmentation method that can flexibly construct a whole-body object, freely locate and rotate the object, and apply self-occlusion and external-occlusion accordingly. To improve the diversity of the whole-body object construction, we develop an iterative method that stochastically combines multiple objects observed from the real world into a single object. Unlike the existing augmentation methods, the constructed objects can be randomly located and rotated in the training frame because proper occlusions can be reflected to the whole-body objects in the final step. Finally, proper self-occlusion at each local object level and external-occlusion at the global frame level are applied using the Hidden Point Removal (HPR) algorithm that is computationally efficient. HPR is also used for adaptively controlling the point density of each object according to the object's distance from the LiDAR. Experiment results show that the proposed DR.CPO algorithm is data-efficient and model-agnostic without incurring any computational overhead. Also, DR.CPO can improve mAP performance by 2.08% when compared to the best 3D detection result known for KITTI dataset.", + "primary_area": "computer vision ii", + "author": "Jungwook Shin; Jaeill Kim; Kyungeun Lee; Hyunghun Cho; Wonjong Rhee", + "authorids": "", + "aff": "Department of Intelligence and Information, Seoul National University, Seoul, 08826, South Korea + SK Telecom Co., Ltd, Seoul, 04539, South Korea; Department of Intelligence and Information, Seoul National University, Seoul, 08826, South Korea; Department of Intelligence and Information, Seoul National University, Seoul, 08826, South Korea; Department of Intelligence and Information, Seoul National University, Seoul, 08826, South Korea; Department of Intelligence and Information, Seoul National University, Seoul, 08826, South Korea + Interdisciplinary Program in Artificial Intelligence (IPAI), Seoul National University, Seoul, 08826, South Korea + Artificial Intelligence Institute, Seoul National University, Seoul, 08826, South Korea", + "bibtex": "@article{Shin_Kim_Lee_Cho_Rhee_2023, title={Diversified and Realistic 3D Augmentation via Iterative Construction, Random Placement, and HPR Occlusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25323}, DOI={10.1609/aaai.v37i2.25323}, abstractNote={In autonomous driving, data augmentation is commonly used for improving 3D object detection. The most basic methods include insertion of copied objects and rotation and scaling of the entire training frame. Numerous variants have been developed as well. The existing methods, however, are considerably limited when compared to the variety of the real world possibilities. In this work, we develop a diversified and realistic augmentation method that can flexibly construct a whole-body object, freely locate and rotate the object, and apply self-occlusion and external-occlusion accordingly. To improve the diversity of the whole-body object construction, we develop an iterative method that stochastically combines multiple objects observed from the real world into a single object. Unlike the existing augmentation methods, the constructed objects can be randomly located and rotated in the training frame because proper occlusions can be reflected to the whole-body objects in the final step. Finally, proper self-occlusion at each local object level and external-occlusion at the global frame level are applied using the Hidden Point Removal (HPR) algorithm that is computationally efficient. HPR is also used for adaptively controlling the point density of each object according to the object\u2019s distance from the LiDAR. Experiment results show that the proposed DR.CPO algorithm is data-efficient and model-agnostic without incurring any computational overhead. Also, DR.CPO can improve mAP performance by 2.08% when compared to the best 3D detection result known for KITTI dataset.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shin, Jungwook and Kim, Jaeill and Lee, Kyungeun and Cho, Hyunghun and Rhee, Wonjong}, year={2023}, month={Jun.}, pages={2282-2291} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25323/25095", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25323", + "pdf_size": 1136988, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8593648423453704332&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;0+0+0", + "aff_unique_norm": "Seoul National University;SK Telecom", + "aff_unique_dep": "Department of Intelligence and Information;", + "aff_unique_url": "https://www.snu.ac.kr;https://www.sktelecom.com", + "aff_unique_abbr": "SNU;SKT", + "aff_campus_unique_index": "0+0;0;0;0;0+0+0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0+0;0;0;0;0+0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26454", + "title": "Diversity Maximization in the Presence of Outliers", + "track": "main", + "status": "Technical", + "abstract": "Given a set X of n points in a metric space, the problem of diversity maximization is to extract a set S of k points from X so that the diversity of S is maximized. This problem is essential in AI-related fields, such as web search, databases, recommender systems, and data mining. Although there have been extensive studies of this problem, these studies assume that X is clean. This usually does not hold, because real-world datasets usually contain outliers. The state-of-the-art algorithm for the diversity maximization problem is based on furthest point retrieval, which is too sensitive to outliers. We therefore address the problem of diversity maximization with outliers and propose two algorithms with performance guarantee. The first algorithm runs in O((k+z)n) time, guarantees 1/2-approximation, and returns no outliers, where z is the number of outliers. The second algorithm runs in O(kz) time (which is independent of n), guarantees 1/6(1+epsilon)-approximation, and returns no outliers with constant probability. We conduct experiments on real datasets to demonstrate the effectiveness and efficiency of our algorithms.", + "primary_area": "search and optimization", + "author": "Daichi Amagata", + "authorids": "", + "aff": "Osaka University", + "bibtex": "@article{Amagata_2023, title={Diversity Maximization in the Presence of Outliers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26454}, DOI={10.1609/aaai.v37i10.26454}, abstractNote={Given a set X of n points in a metric space, the problem of diversity maximization is to extract a set S of k points from X so that the diversity of S is maximized. This problem is essential in AI-related fields, such as web search, databases, recommender systems, and data mining. Although there have been extensive studies of this problem, these studies assume that X is clean. This usually does not hold, because real-world datasets usually contain outliers. The state-of-the-art algorithm for the diversity maximization problem is based on furthest point retrieval, which is too sensitive to outliers. We therefore address the problem of diversity maximization with outliers and propose two algorithms with performance guarantee. The first algorithm runs in O((k+z)n) time, guarantees 1/2-approximation, and returns no outliers, where z is the number of outliers. The second algorithm runs in O(kz) time (which is independent of n), guarantees 1/6(1+epsilon)-approximation, and returns no outliers with constant probability. We conduct experiments on real datasets to demonstrate the effectiveness and efficiency of our algorithms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Amagata, Daichi}, year={2023}, month={Jun.}, pages={12338-12345} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26454/26226", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26454", + "pdf_size": 549193, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4066198061390306395&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ist.osaka-u.ac.jp", + "email": "ist.osaka-u.ac.jp", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Osaka University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.osaka-u.ac.jp", + "aff_unique_abbr": "Osaka U", + "aff_country_unique_index": "0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26112", + "title": "Do Invariances in Deep Neural Networks Align with Human Perception?", + "track": "main", + "status": "Technical", + "abstract": "An evaluation criterion for safe and trustworthy deep learning is how well the invariances captured by representations of deep neural networks (DNNs) are shared with humans. We identify challenges in measuring these invariances. Prior works used gradient-based methods to generate identically represented inputs (IRIs), ie, inputs which have identical representations (on a given layer) of a neural network, and thus capture invariances of a given network. One necessary criterion for a network's invariances to align with human perception is for its IRIs look 'similar' to humans. Prior works, however, have mixed takeaways; some argue that later layers of DNNs do not learn human-like invariances yet others seem to indicate otherwise. We argue that the loss function used to generate IRIs can heavily affect takeaways about invariances of the network and is the primary reason for these conflicting findings. We propose an adversarial regularizer on the IRI generation loss that finds IRIs that make any model appear to have very little shared invariance with humans. Based on this evidence, we argue that there is scope for improving models to have human-like invariances, and further, to have meaningful comparisons between models one should use IRIs generated using the regularizer-free loss. We then conduct an in-depth investigation of how different components (eg architectures, training losses, data augmentations) of the deep learning pipeline contribute to learning models that have good alignment with humans. We find that architectures with residual connections trained using a (self-supervised) contrastive loss with l_p ball adversarial data augmentation tend to learn invariances that are most aligned with humans. Code: github.com/nvedant07/Human-NN-Alignment", + "primary_area": "machine learning iii", + "author": "Vedant Nanda; Ayan Majumdar; Camila Kolling; John P. Dickerson; Krishna P. Gummadi; Bradley C. Love; Adrian Weller", + "authorids": "", + "aff": "University of Maryland, College Park, USA+Max Planck Institute for Software Systems (MPI-SWS), Germany; Max Planck Institute for Software Systems (MPI-SWS), Germany; Max Planck Institute for Software Systems (MPI-SWS), Germany; University of Maryland, College Park, USA; Max Planck Institute for Software Systems (MPI-SWS), Germany; The Alan Turing Institute, London, England+University College London, London, England; The Alan Turing Institute, London, England+University of Cambridge, Cambridge, England", + "bibtex": "@article{Nanda_Majumdar_Kolling_Dickerson_Gummadi_Love_Weller_2023, title={Do Invariances in Deep Neural Networks Align with Human Perception?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26112}, DOI={10.1609/aaai.v37i8.26112}, abstractNote={An evaluation criterion for safe and trustworthy deep learning is how well the invariances captured by representations of deep neural networks (DNNs) are shared with humans. We identify challenges in measuring these invariances. Prior works used gradient-based methods to generate identically represented inputs (IRIs), ie, inputs which have identical representations (on a given layer) of a neural network, and thus capture invariances of a given network. One necessary criterion for a network\u2019s invariances to align with human perception is for its IRIs look \u2019similar\u2019 to humans. Prior works, however, have mixed takeaways; some argue that later layers of DNNs do not learn human-like invariances yet others seem to indicate otherwise. We argue that the loss function used to generate IRIs can heavily affect takeaways about invariances of the network and is the primary reason for these conflicting findings. We propose an adversarial regularizer on the IRI generation loss that finds IRIs that make any model appear to have very little shared invariance with humans. Based on this evidence, we argue that there is scope for improving models to have human-like invariances, and further, to have meaningful comparisons between models one should use IRIs generated using the regularizer-free loss. We then conduct an in-depth investigation of how different components (eg architectures, training losses, data augmentations) of the deep learning pipeline contribute to learning models that have good alignment with humans. We find that architectures with residual connections trained using a (self-supervised) contrastive loss with l_p ball adversarial data augmentation tend to learn invariances that are most aligned with humans. Code: github.com/nvedant07/Human-NN-Alignment}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nanda, Vedant and Majumdar, Ayan and Kolling, Camila and Dickerson, John P. and Gummadi, Krishna P. and Love, Bradley C. and Weller, Adrian}, year={2023}, month={Jun.}, pages={9277-9285} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26112/25884", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26112", + "pdf_size": 2179752, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7265764544516763509&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mpi-sws.org;mpi-sws.org;mpi-sws.org;cs.umd.edu;mpi-sws.org;ucl.ac.uk;eng.cam.ac.uk", + "email": "mpi-sws.org;mpi-sws.org;mpi-sws.org;cs.umd.edu;mpi-sws.org;ucl.ac.uk;eng.cam.ac.uk", + "github": "github.com/nvedant07/Human-NN-Alignment", + "project": "https://arxiv.org/abs/2111.14726", + "author_num": 7, + "aff_unique_index": "0+1;1;1;0;1;2+3;2+4", + "aff_unique_norm": "University of Maryland;Max Planck Institute for Software Systems;The Alan Turing Institute;University College London;University of Cambridge", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www/umd.edu;https://www.mpi-sws.org;https://www.turing.ac.uk;https://www.ucl.ac.uk;https://www.cam.ac.uk", + "aff_unique_abbr": "UMD;MPI-SWS;ATI;UCL;Cambridge", + "aff_campus_unique_index": "0;0;2+2;2+3", + "aff_campus_unique": "College Park;;London;Cambridge", + "aff_country_unique_index": "0+1;1;1;0;1;2+2;2+2", + "aff_country_unique": "United States;Germany;United Kingdom" + }, + { + "id": "article-25282", + "title": "DocEdit: Language-Guided Document Editing", + "track": "main", + "status": "Technical", + "abstract": "Professional document editing tools require a certain level of expertise to perform complex edit operations. To make editing tools accessible to increasingly novice users, we investigate intelligent document assistant systems that can make or suggest edits based on a user's natural language request. Such a system should be able to understand the user's ambiguous requests and contextualize them to the visual cues and textual content found in a document image to edit localized unstructured text and structured layouts. To this end, we propose a new task of language-guided localized document editing, where the user provides a document and an open vocabulary editing request, and the intelligent system produces a command that can be used to automate edits in real-world document editing software. In support of this task, we curate the DocEdit dataset, a collection of approximately 28K instances of user edit requests over PDF and design templates along with their corresponding ground truth software executable commands. To our knowledge, this is the first dataset that provides a diverse mix of edit operations with direct and indirect references to the embedded text and visual objects such as paragraphs, lists, tables, etc. We also propose DocEditor, a Transformer-based localization-aware multimodal (textual, spatial, and visual) model that performs the new task. The model attends to both document objects and related text contents which may be referred to in a user edit request, generating a multimodal embedding that is used to predict an edit command and associated bounding box localizing it. Our proposed model empirically outperforms other baseline deep learning approaches by 15-18%, providing a strong starting point for future work.", + "primary_area": "computer vision ii", + "author": "Puneet Mathur; Rajiv Jain; Jiuxiang Gu; Franck Dernoncourt; Dinesh Manocha; Vlad I. Morariu", + "authorids": "", + "aff": "University of Maryland, College Park; Adobe Research; Adobe Research; Adobe Research; University of Maryland, College Park; Adobe Research", + "bibtex": "@article{Mathur_Jain_Gu_Dernoncourt_Manocha_Morariu_2023, title={DocEdit: Language-Guided Document Editing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25282}, DOI={10.1609/aaai.v37i2.25282}, abstractNote={Professional document editing tools require a certain level of expertise to perform complex edit operations. To make editing tools accessible to increasingly novice users, we investigate intelligent document assistant systems that can make or suggest edits based on a user\u2019s natural language request. Such a system should be able to understand the user\u2019s ambiguous requests and contextualize them to the visual cues and textual content found in a document image to edit localized unstructured text and structured layouts. To this end, we propose a new task of language-guided localized document editing, where the user provides a document and an open vocabulary editing request, and the intelligent system produces a command that can be used to automate edits in real-world document editing software. In support of this task, we curate the DocEdit dataset, a collection of approximately 28K instances of user edit requests over PDF and design templates along with their corresponding ground truth software executable commands. To our knowledge, this is the first dataset that provides a diverse mix of edit operations with direct and indirect references to the embedded text and visual objects such as paragraphs, lists, tables, etc. We also propose DocEditor, a Transformer-based localization-aware multimodal (textual, spatial, and visual) model that performs the new task. The model attends to both document objects and related text contents which may be referred to in a user edit request, generating a multimodal embedding that is used to predict an edit command and associated bounding box localizing it. Our proposed model empirically outperforms other baseline deep learning approaches by 15-18%, providing a strong starting point for future work.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mathur, Puneet and Jain, Rajiv and Gu, Jiuxiang and Dernoncourt, Franck and Manocha, Dinesh and Morariu, Vlad I.}, year={2023}, month={Jun.}, pages={1914-1922} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25282/25054", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25282", + "pdf_size": 1141986, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6151177295668302808&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "umd.edu;adobe.com;adobe.com;adobe.com;umd.edu;adobe.com", + "email": "umd.edu;adobe.com;adobe.com;adobe.com;umd.edu;adobe.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;1", + "aff_unique_norm": "University of Maryland;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www/umd.edu;https://research.adobe.com", + "aff_unique_abbr": "UMD;Adobe", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26349", + "title": "Does It Pay to Optimize AUC?", + "track": "main", + "status": "Technical", + "abstract": "The Area Under the ROC Curve (AUC) is an important model metric for evaluating binary classifiers, and many algorithms have been proposed to optimize AUC approximately. It raises the question of whether the generally insignificant gains observed by previous studies are due to inherent limitations of the metric or the inadequate quality of optimization.\n\nTo better understand the value of optimizing for AUC, we present an efficient algorithm, namely AUC-opt, to find the provably optimal AUC linear classifier in R2, which runs in O(n+n- log n+n-) where n+ and n- are the number of positive and negative samples respectively. Furthermore, it can be naturally extended to Rd in O(n+n-d-1 log (n+n-)) by recursively calling AUC-opt in lower-dimensional spaces. We prove the problem is NP-complete when d is not fixed, reducing from the open hemisphere problem.\n\nCompared with other methods, experiments show that AUC-opt achieves statistically significant improvements between 17 to 40 in R2 and 4 to 42 in R3 of 50 t-SNE training datasets. However, generally, the gain proves insignificant on most testing datasets compared to the best standard classifiers. Similar observations are found for nonlinear AUC methods under real-world datasets.", + "primary_area": "machine learning iv", + "author": "Baojian Zhou; Steven Skiena", + "authorids": "", + "aff": "Fudan University, Shanghai, China; Stony Brook University, New York, USA", + "bibtex": "@article{Zhou_Skiena_2023, title={Does It Pay to Optimize AUC?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26349}, DOI={10.1609/aaai.v37i9.26349}, abstractNote={The Area Under the ROC Curve (AUC) is an important model metric for evaluating binary classifiers, and many algorithms have been proposed to optimize AUC approximately. It raises the question of whether the generally insignificant gains observed by previous studies are due to inherent limitations of the metric or the inadequate quality of optimization. To better understand the value of optimizing for AUC, we present an efficient algorithm, namely AUC-opt, to find the provably optimal AUC linear classifier in R2, which runs in O(n+n- log n+n-) where n+ and n- are the number of positive and negative samples respectively. Furthermore, it can be naturally extended to Rd in O(n+n-d-1 log (n+n-)) by recursively calling AUC-opt in lower-dimensional spaces. We prove the problem is NP-complete when d is not fixed, reducing from the open hemisphere problem. Compared with other methods, experiments show that AUC-opt achieves statistically significant improvements between 17 to 40 in R2 and 4 to 42 in R3 of 50 t-SNE training datasets. However, generally, the gain proves insignificant on most testing datasets compared to the best standard classifiers. Similar observations are found for nonlinear AUC methods under real-world datasets.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Baojian and Skiena, Steven}, year={2023}, month={Jun.}, pages={11408-11416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26349/26121", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26349", + "pdf_size": 228530, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:zEjC213vA7EJ:scholar.google.com/&scioq=Does+It+Pay+to+Optimize+AUC%3F&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "fudan.edu.cn;cs.stonybrook.edu", + "email": "fudan.edu.cn;cs.stonybrook.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Fudan University;Stony Brook University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.stonybrook.edu", + "aff_unique_abbr": "Fudan;SBU", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Shanghai;Stony Brook", + "aff_country_unique_index": "0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26887", + "title": "Does Knowing When Help Is Needed Improve Subgoal Hint Performance in an Intelligent Data-Driven Logic Tutor?", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "The assistance dilemma is a well-recognized challenge to determine\nwhen and how to provide help during problem solving\nin intelligent tutoring systems. This dilemma is particularly\nchallenging to address in domains such as logic proofs,\nwhere problems can be solved in a variety of ways. In this\nstudy, we investigate two data-driven techniques to address\nthe when and how of the assistance dilemma, combining a\nmodel that predicts when students need help learning efficient\nstrategies, and hints that suggest what subgoal to achieve.\nWe conduct a study assessing the impact of the new pedagogical\npolicy against a control policy without these adaptive\ncomponents. We found empirical evidence which suggests\nthat showing subgoals in training problems upon predictions\nof the model helped the students who needed it most\nand improved test performance when compared to their control\npeers. Our key findings include significantly fewer steps\nin posttest problem solutions for students with low prior proficiency\nand significantly reduced help avoidance for all students\nin training.", + "primary_area": "", + "author": "Nazia Alam; Mehak Maniktala; Behrooz Mostafavi; Min Chi; Tiffany Barnes", + "authorids": "", + "aff": "North Carolina State University; North Carolina State University; North Carolina State University; North Carolina State University; North Carolina State University", + "bibtex": "@article{Alam_Maniktala_Mostafavi_Chi_Barnes_2024, title={Does Knowing When Help Is Needed Improve Subgoal Hint Performance in an Intelligent Data-Driven Logic Tutor?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26887}, DOI={10.1609/aaai.v37i13.26887}, abstractNote={The assistance dilemma is a well-recognized challenge to determine\nwhen and how to provide help during problem solving\nin intelligent tutoring systems. This dilemma is particularly\nchallenging to address in domains such as logic proofs,\nwhere problems can be solved in a variety of ways. In this\nstudy, we investigate two data-driven techniques to address\nthe when and how of the assistance dilemma, combining a\nmodel that predicts when students need help learning efficient\nstrategies, and hints that suggest what subgoal to achieve.\nWe conduct a study assessing the impact of the new pedagogical\npolicy against a control policy without these adaptive\ncomponents. We found empirical evidence which suggests\nthat showing subgoals in training problems upon predictions\nof the model helped the students who needed it most\nand improved test performance when compared to their control\npeers. Our key findings include significantly fewer steps\nin posttest problem solutions for students with low prior proficiency\nand significantly reduced help avoidance for all students\nin training.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alam, Nazia and Maniktala, Mehak and Mostafavi, Behrooz and Chi, Min and Barnes, Tiffany}, year={2024}, month={Jul.}, pages={15895-15902} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26887/26659", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26887", + "pdf_size": 298099, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:8uybsKAq4oYJ:scholar.google.com/&scioq=Does+Knowing+When+Help+Is+Needed+Improve+Subgoal+Hint+Performance+in+an+Intelligent+Data-Driven+Logic+Tutor%3F&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "ncsu.edu;ncsu.edu;ncsu.edu;ncsu.edu;ncsu.edu", + "email": "ncsu.edu;ncsu.edu;ncsu.edu;ncsu.edu;ncsu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "North Carolina State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ncsu.edu", + "aff_unique_abbr": "NCSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26185", + "title": "Domain Adaptation with Adversarial Training on Penultimate Activations", + "track": "main", + "status": "Technical", + "abstract": "Enhancing model prediction confidence on target data is an important objective in Unsupervised Domain Adaptation (UDA). In this paper, we explore adversarial training on penultimate activations, i.e., input features of the final linear classification layer. We show that this strategy is more efficient and better correlated with the objective of boosting prediction confidence than adversarial training on input images or intermediate features, as used in previous works. Furthermore, with activation normalization commonly used in domain adaptation to reduce domain gap, we derive two variants and systematically analyze the effects of normalization on our adversarial training. This is illustrated both in theory and through empirical analysis on real adaptation tasks. Extensive experiments are conducted on popular UDA benchmarks under both standard setting and source-data free setting. The results validate that our method achieves the best scores against previous arts. Code is available at https://github.com/tsun/APA.", + "primary_area": "machine learning iii", + "author": "Tao Sun; Cheng Lu; Haibin Ling", + "authorids": "", + "aff": "Stony Brook University, USA; XPeng Motors, USA; Stony Brook University, USA", + "bibtex": "@article{Sun_Lu_Ling_2023, title={Domain Adaptation with Adversarial Training on Penultimate Activations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26185}, DOI={10.1609/aaai.v37i8.26185}, abstractNote={Enhancing model prediction confidence on target data is an important objective in Unsupervised Domain Adaptation (UDA). In this paper, we explore adversarial training on penultimate activations, i.e., input features of the final linear classification layer. We show that this strategy is more efficient and better correlated with the objective of boosting prediction confidence than adversarial training on input images or intermediate features, as used in previous works. Furthermore, with activation normalization commonly used in domain adaptation to reduce domain gap, we derive two variants and systematically analyze the effects of normalization on our adversarial training. This is illustrated both in theory and through empirical analysis on real adaptation tasks. Extensive experiments are conducted on popular UDA benchmarks under both standard setting and source-data free setting. The results validate that our method achieves the best scores against previous arts. Code is available at https://github.com/tsun/APA.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Tao and Lu, Cheng and Ling, Haibin}, year={2023}, month={Jun.}, pages={9935-9943} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26185/25957", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26185", + "pdf_size": 683983, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4805766390242202817&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.stonybrook.edu;xiaopeng.com;cs.stonybrook.edu", + "email": "cs.stonybrook.edu;xiaopeng.com;cs.stonybrook.edu", + "github": "https://github.com/tsun/APA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Stony Brook University;XPeng Motors", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stonybrook.edu;https://www.xpengmotor.com", + "aff_unique_abbr": "SBU;XPeng", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25294", + "title": "Domain Decorrelation with Potential Energy Ranking", + "track": "main", + "status": "Technical", + "abstract": "Machine learning systems, especially the methods based on deep learning, enjoy great success in modern computer vision tasks under ideal experimental settings. Generally, these classic deep learning methods are built on the i.i.d. assumption, supposing the training and test data are drawn from the same distribution independently and identically. However, the aforementioned i.i.d. assumption is, in general, unavailable in the real-world scenarios, and as a result, leads to sharp performance decay of deep learning algorithms. Behind this, domain shift is one of the primary factors to be blamed. In order to tackle this problem, we propose using Potential Energy Ranking (PoER) to decouple the object feature and the domain feature in given images, promoting the learning of label-discriminative representations while filtering out the irrelevant correlations between the objects and the background. PoER employs the ranking loss in shallow layers to make features with identical category and domain labels close to each other and vice versa. This makes the neural networks aware of both objects and background characteristics, which is vital for generating domain-invariant features. Subsequently, with the stacked convolutional blocks, PoER further uses the contrastive loss to make features within the same categories distribute densely no matter domains, filtering out the domain information progressively for feature alignment. PoER reports superior performance on domain generalization benchmarks, improving the average top-1 accuracy by at least 1.20% compared to the existing methods. Moreover, we use PoER in the ECCV 2022 NICO Challenge, achieving top place with only a vanilla ResNet-18 and winning the jury award. The code has been made publicly available at: https://github.com/ForeverPs/PoER.", + "primary_area": "computer vision ii", + "author": "Sen Pei; Jiaxi Sun; Richard Yi Da Xu; Shiming Xiang; Gaofeng Meng", + "authorids": "", + "aff": "NLPR, Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences; Hong Kong Baptist University; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences", + "bibtex": "@article{Pei_Sun_Xu_Xiang_Meng_2023, title={Domain Decorrelation with Potential Energy Ranking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25294}, DOI={10.1609/aaai.v37i2.25294}, abstractNote={Machine learning systems, especially the methods based on deep learning, enjoy great success in modern computer vision tasks under ideal experimental settings. Generally, these classic deep learning methods are built on the i.i.d. assumption, supposing the training and test data are drawn from the same distribution independently and identically. However, the aforementioned i.i.d. assumption is, in general, unavailable in the real-world scenarios, and as a result, leads to sharp performance decay of deep learning algorithms. Behind this, domain shift is one of the primary factors to be blamed. In order to tackle this problem, we propose using Potential Energy Ranking (PoER) to decouple the object feature and the domain feature in given images, promoting the learning of label-discriminative representations while filtering out the irrelevant correlations between the objects and the background. PoER employs the ranking loss in shallow layers to make features with identical category and domain labels close to each other and vice versa. This makes the neural networks aware of both objects and background characteristics, which is vital for generating domain-invariant features. Subsequently, with the stacked convolutional blocks, PoER further uses the contrastive loss to make features within the same categories distribute densely no matter domains, filtering out the domain information progressively for feature alignment. PoER reports superior performance on domain generalization benchmarks, improving the average top-1 accuracy by at least 1.20% compared to the existing methods. Moreover, we use PoER in the ECCV 2022 NICO Challenge, achieving top place with only a vanilla ResNet-18 and winning the jury award. The code has been made publicly available at: https://github.com/ForeverPs/PoER.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pei, Sen and Sun, Jiaxi and Xu, Richard Yi Da and Xiang, Shiming and Meng, Gaofeng}, year={2023}, month={Jun.}, pages={2020-2028} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25294/25066", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25294", + "pdf_size": 8622645, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=568679051070572114&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ia.ac.cn;ia.ac.cn;hkbu.edu.hk;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;hkbu.edu.hk;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "https://github.com/ForeverPs/PoER", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+0;0+1+0;2;0+1;0+1+0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Hong Kong Baptist University", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;https://www.hkbu.edu.hk", + "aff_unique_abbr": "CAS;UCAS;HKBU", + "aff_campus_unique_index": "1;1;;1", + "aff_campus_unique": ";Hong Kong", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25312", + "title": "Domain Generalised Faster R-CNN", + "track": "main", + "status": "Technical", + "abstract": "Domain generalisation (i.e. out-of-distribution generalisation)\nis an open problem in machine learning, where the goal is\nto train a model via one or more source domains, that will\ngeneralise well to unknown target domains. While the topic\nis attracting increasing interest, it has not been studied in\ndetail in the context of object detection. The established approaches\nall operate under the covariate shift assumption,\nwhere the conditional distributions are assumed to be approximately\nequal across source domains. This is the first\npaper to address domain generalisation in the context of object\ndetection, with a rigorous mathematical analysis of domain\nshift, without the covariate shift assumption. We focus on\nimproving the generalisation ability of object detection by\nproposing new regularisation terms to address the domain\nshift that arises due to both classification and bounding box\nregression. Also, we include an additional consistency regularisation\nterm to align the local and global level predictions.\nThe proposed approach is implemented as a Domain\nGeneralised Faster R-CNN and evaluated using four object\ndetection datasets which provide domain metadata (GWHD,\nCityscapes, BDD100K, Sim10K) where it exhibits a consistent\nperformance improvement over the baselines. All the\ncodes for replicating the results in this paper can be found at\nhttps://github.com/karthikiitm87/domain-generalisation.git", + "primary_area": "computer vision ii", + "author": "Karthik Seemakurthy; Charles Fox; Erchan Aptoula; Petra Bosilj", + "authorids": "", + "aff": "Lincoln Institute of Agri-Food Technology, University of Lincoln, United Kingdom + School of Computer Science, University of Lincoln, United Kingdom; School of Computer Science, University of Lincoln, United Kingdom; Faculty of Engineering and Natural Sciences (VPALab), Sabanci University, T\u00fcrkiye; Lincoln Institute of Agri-Food Technology, University of Lincoln, United Kingdom + School of Computer Science, University of Lincoln, United Kingdom", + "bibtex": "@article{Seemakurthy_Fox_Aptoula_Bosilj_2023, title={Domain Generalised Faster R-CNN}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25312}, DOI={10.1609/aaai.v37i2.25312}, abstractNote={Domain generalisation (i.e. out-of-distribution generalisation)\nis an open problem in machine learning, where the goal is\nto train a model via one or more source domains, that will\ngeneralise well to unknown target domains. While the topic\nis attracting increasing interest, it has not been studied in\ndetail in the context of object detection. The established approaches\nall operate under the covariate shift assumption,\nwhere the conditional distributions are assumed to be approximately\nequal across source domains. This is the first\npaper to address domain generalisation in the context of object\ndetection, with a rigorous mathematical analysis of domain\nshift, without the covariate shift assumption. We focus on\nimproving the generalisation ability of object detection by\nproposing new regularisation terms to address the domain\nshift that arises due to both classification and bounding box\nregression. Also, we include an additional consistency regularisation\nterm to align the local and global level predictions.\nThe proposed approach is implemented as a Domain\nGeneralised Faster R-CNN and evaluated using four object\ndetection datasets which provide domain metadata (GWHD,\nCityscapes, BDD100K, Sim10K) where it exhibits a consistent\nperformance improvement over the baselines. All the\ncodes for replicating the results in this paper can be found at\nhttps://github.com/karthikiitm87/domain-generalisation.git}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Seemakurthy, Karthik and Fox, Charles and Aptoula, Erchan and Bosilj, Petra}, year={2023}, month={Jun.}, pages={2180-2190} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25312/25084", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25312", + "pdf_size": 3944339, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6739723904585473395&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "lincoln.ac.uk;lincoln.ac.uk;sabanciuniv.edu;lincoln.ac.uk", + "email": "lincoln.ac.uk;lincoln.ac.uk;sabanciuniv.edu;lincoln.ac.uk", + "github": "https://github.com/karthikiitm87/domain-generalisation.git", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0;1;0+0", + "aff_unique_norm": "University of Lincoln;Sabanci University", + "aff_unique_dep": "Lincoln Institute of Agri-Food Technology;Faculty of Engineering and Natural Sciences", + "aff_unique_url": "https://www.lincoln.ac.uk;https://www.sabanciuniv.edu/", + "aff_unique_abbr": ";Sabanci", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;1;0+0", + "aff_country_unique": "United Kingdom;Turkey" + }, + { + "id": "article-26498", + "title": "Domain-Adapted Dependency Parsing for Cross-Domain Named Entity Recognition", + "track": "main", + "status": "Technical", + "abstract": "In recent years, many researchers have leveraged structural information from dependency trees to improve Named Entity Recognition (NER). Most of their methods take dependency-tree labels as input features for NER model training. However, such dependency information is not inherently provided in most NER corpora, making the methods with low usability in practice. To effectively exploit the potential of word-dependency knowledge, motivated by the success of Multi-Task Learning on cross-domain NER, we investigate a novel NER learning method incorporating cross-domain Dependency Parsing (DP) as its auxiliary learning task. Then, considering the high consistency of word-dependency relations across domains, we present an unsupervised domain-adapted method to transfer word-dependency knowledge from high-resource domains to low-resource ones. With the help of cross-domain DP to bridge different domains, both useful cross-domain and cross-task knowledge can be learned by our model to considerably benefit cross-domain NER. To make better use of the cross-task knowledge between NER and DP, we unify both tasks in a shared network architecture for joint learning, using Maximum Mean Discrepancy(MMD). Finally, through extensive experiments, we show our proposed method can not only effectively take advantage of word-dependency knowledge, but also significantly outperform other Multi-Task Learning methods on cross-domain NER. Our code is open-source and available at https://github.com/xianghuisun/DADP.", + "primary_area": "speech natural language processing", + "author": "Chenxiao Dou; Xianghui Sun; Yaoshu Wang; Yunjie Ji; Baochang Ma; Xiangang Li", + "authorids": "", + "aff": "Nanhu Academy of Electronics and Information Technology; Beike; Shenzhen Institute of Computing Sciences, Shenzhen University; Beike; Beike; Beike", + "bibtex": "@article{Dou_Sun_Wang_Ji_Ma_Li_2023, title={Domain-Adapted Dependency Parsing for Cross-Domain Named Entity Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26498}, DOI={10.1609/aaai.v37i11.26498}, abstractNote={In recent years, many researchers have leveraged structural information from dependency trees to improve Named Entity Recognition (NER). Most of their methods take dependency-tree labels as input features for NER model training. However, such dependency information is not inherently provided in most NER corpora, making the methods with low usability in practice. To effectively exploit the potential of word-dependency knowledge, motivated by the success of Multi-Task Learning on cross-domain NER, we investigate a novel NER learning method incorporating cross-domain Dependency Parsing (DP) as its auxiliary learning task. Then, considering the high consistency of word-dependency relations across domains, we present an unsupervised domain-adapted method to transfer word-dependency knowledge from high-resource domains to low-resource ones. With the help of cross-domain DP to bridge different domains, both useful cross-domain and cross-task knowledge can be learned by our model to considerably benefit cross-domain NER. To make better use of the cross-task knowledge between NER and DP, we unify both tasks in a shared network architecture for joint learning, using Maximum Mean Discrepancy(MMD). Finally, through extensive experiments, we show our proposed method can not only effectively take advantage of word-dependency knowledge, but also significantly outperform other Multi-Task Learning methods on cross-domain NER. Our code is open-source and available at https://github.com/xianghuisun/DADP.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dou, Chenxiao and Sun, Xianghui and Wang, Yaoshu and Ji, Yunjie and Ma, Baochang and Li, Xiangang}, year={2023}, month={Jun.}, pages={12737-12744} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26498/26270", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26498", + "pdf_size": 738889, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1411661785488267783&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "cnaeit.com;ke.com;sics.ac.cn;ke.com;ke.com;ke.com", + "email": "cnaeit.com;ke.com;sics.ac.cn;ke.com;ke.com;ke.com", + "github": "https://github.com/xianghuisun/DADP", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;1;1", + "aff_unique_norm": "Nanhu Academy of Electronics and Information Technology;Beike;Shenzhen University", + "aff_unique_dep": "Electronics and Information Technology;;Institute of Computing Sciences", + "aff_unique_url": ";https://www.beike.com;https://www.szu.edu.cn", + "aff_unique_abbr": ";Beike;SZU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25131", + "title": "Domain-General Crowd Counting in Unseen Scenarios", + "track": "main", + "status": "Technical", + "abstract": "Domain shift across crowd data severely hinders crowd counting models to generalize to unseen scenarios. Although domain adaptive crowd counting approaches close this gap to a certain extent, they are still dependent on the target domain data to adapt (e.g. finetune) their models to the specific domain. In this paper, we instead target to train a model based on a single source domain which can generalize well on any unseen domain. This falls into the realm of domain generalization that remains unexplored in crowd counting. We first introduce a dynamic sub-domain division scheme which divides the source domain into multiple sub-domains such that we can initiate a meta-learning framework for domain generalization. The sub-domain division is dynamically refined during the meta-learning. Next, in order to disentangle domain-invariant information from domain-specific information in image features, we design the domain-invariant and -specific crowd memory modules to re-encode image features. Two types of losses, i.e. feature reconstruction and orthogonal losses, are devised to enable this disentanglement. Extensive experiments on several standard crowd counting benchmarks i.e. SHA, SHB, QNRF, and NWPU, show the strong generalizability of our method. Our code is available at: https://github.com/ZPDu/Domain-general-Crowd-Counting-in-Unseen-Scenarios", + "primary_area": "computer vision i", + "author": "Zhipeng Du; Jiankang Deng; Miaojing Shi", + "authorids": "", + "aff": "1College of Electronic and Information Engineering, Tongji University, China + 2King\u2019s College London, UK; 3Huawei London Research Center, UK; 1College of Electronic and Information Engineering, Tongji University, China + 2King\u2019s College London, UK", + "bibtex": "@article{Du_Deng_Shi_2023, title={Domain-General Crowd Counting in Unseen Scenarios}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25131}, DOI={10.1609/aaai.v37i1.25131}, abstractNote={Domain shift across crowd data severely hinders crowd counting models to generalize to unseen scenarios. Although domain adaptive crowd counting approaches close this gap to a certain extent, they are still dependent on the target domain data to adapt (e.g. finetune) their models to the specific domain. In this paper, we instead target to train a model based on a single source domain which can generalize well on any unseen domain. This falls into the realm of domain generalization that remains unexplored in crowd counting. We first introduce a dynamic sub-domain division scheme which divides the source domain into multiple sub-domains such that we can initiate a meta-learning framework for domain generalization. The sub-domain division is dynamically refined during the meta-learning. Next, in order to disentangle domain-invariant information from domain-specific information in image features, we design the domain-invariant and -specific crowd memory modules to re-encode image features. Two types of losses, i.e. feature reconstruction and orthogonal losses, are devised to enable this disentanglement. Extensive experiments on several standard crowd counting benchmarks i.e. SHA, SHB, QNRF, and NWPU, show the strong generalizability of our method. Our code is available at: https://github.com/ZPDu/Domain-general-Crowd-Counting-in-Unseen-Scenarios}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Zhipeng and Deng, Jiankang and Shi, Miaojing}, year={2023}, month={Jun.}, pages={561-570} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25131/24903", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25131", + "pdf_size": 1619219, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3240225817893051866&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 9, + "aff_domain": "kcl.ac.uk;imperial.ac.uk;tongji.edu.cn", + "email": "kcl.ac.uk;imperial.ac.uk;tongji.edu.cn", + "github": "https://github.com/ZPDu/Domain-general-Crowd-Counting-in-Unseen-Scenarios", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0+1", + "aff_unique_norm": "Tongji University;King's College London;Huawei", + "aff_unique_dep": "College of Electronic and Information Engineering;;Huawei London Research Center", + "aff_unique_url": "https://www.tongji.edu.cn;https://www.kcl.ac.uk;https://www.huawei.com/uk", + "aff_unique_abbr": ";KCL;Huawei", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";London", + "aff_country_unique_index": "0+1;1;0+1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26614", + "title": "Don\u2019t Be So Sure! Boosting ASR Decoding via Confidence Relaxation", + "track": "main", + "status": "Technical", + "abstract": "Automatic Speech Recognition (ASR) systems frequently use a search-based decoding strategy aiming to find the best attainable transcript by considering multiple candidates. One prominent speech recognition decoding heuristic is beam search, which seeks the transcript with the greatest likelihood computed using the predicted distribution. While showing substantial performance gains in various tasks, beam search loses some of its effectiveness when the predicted probabilities are highly confident, i.e., the predicted distribution is massed for a single or very few classes. We show that recently proposed Self-Supervised Learning (SSL)-based ASR models tend to yield exceptionally confident predictions that may hamper beam search from truly considering a diverse set of candidates. We perform a layer analysis to reveal and visualize how predictions evolve, and propose a decoding procedure that improves the performance of fine-tuned ASR models. Our proposed approach does not require further training beyond the original fine-tuning, nor additional model parameters. In fact, we find that our proposed method requires significantly less inference computation than current approaches. We propose aggregating the top M layers, potentially leveraging useful information encoded in intermediate layers, and relaxing model confidence. We demonstrate the effectiveness of our approach by conducting an empirical study on varying amounts of labeled resources and different model sizes, showing consistent improvements in particular when applied to low-resource scenarios.", + "primary_area": "speech natural language processing", + "author": "Tomer Wullach; Shlomo E. Chazan", + "authorids": "", + "aff": "OriginAI, Ramat-Gan, Israel; OriginAI, Ramat-Gan, Israel", + "bibtex": "@article{Wullach_Chazan_2023, title={Don\u2019t Be So Sure! Boosting ASR Decoding via Confidence Relaxation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26614}, DOI={10.1609/aaai.v37i11.26614}, abstractNote={Automatic Speech Recognition (ASR) systems frequently use a search-based decoding strategy aiming to find the best attainable transcript by considering multiple candidates. One prominent speech recognition decoding heuristic is beam search, which seeks the transcript with the greatest likelihood computed using the predicted distribution. While showing substantial performance gains in various tasks, beam search loses some of its effectiveness when the predicted probabilities are highly confident, i.e., the predicted distribution is massed for a single or very few classes. We show that recently proposed Self-Supervised Learning (SSL)-based ASR models tend to yield exceptionally confident predictions that may hamper beam search from truly considering a diverse set of candidates. We perform a layer analysis to reveal and visualize how predictions evolve, and propose a decoding procedure that improves the performance of fine-tuned ASR models. Our proposed approach does not require further training beyond the original fine-tuning, nor additional model parameters. In fact, we find that our proposed method requires significantly less inference computation than current approaches. We propose aggregating the top M layers, potentially leveraging useful information encoded in intermediate layers, and relaxing model confidence. We demonstrate the effectiveness of our approach by conducting an empirical study on varying amounts of labeled resources and different model sizes, showing consistent improvements in particular when applied to low-resource scenarios.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wullach, Tomer and Chazan, Shlomo E.}, year={2023}, month={Jun.}, pages={13780-13788} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26614/26386", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26614", + "pdf_size": 505292, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15699025279097998874&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "originai.co;originai.co", + "email": "originai.co;originai.co", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "OriginAI", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25661", + "title": "Don\u2019t Predict Counterfactual Values, Predict Expected Values Instead", + "track": "main", + "status": "Technical", + "abstract": "Counterfactual Regret Minimization algorithms are the most popular way of estimating the Nash Equilibrium in imperfect-information zero-sum games.\nIn particular, DeepStack -- the state-of-the-art Poker bot -- employs the so-called Deep Counterfactual Value Network (DCVN) to learn the Counterfactual Values (CFVs) associated with various states in the game.\nEach CFV is a multiplication of two factors: (1) the probability that the opponent would reach a given state in a game, which can be explicitly calculated from the input data, and (2) the expected value (EV) of a payoff in that state, which is a complex function of the input data, hard to calculate.\nIn this paper, we propose a simple yet powerful modification to the CFVs estimation process, which consists in utilizing a deep neural network to estimate only the EV factor of CFV. This new target setting significantly simplifies the learning problem and leads to much more accurate CFVs estimation. \nA direct comparison, in terms of CFVs prediction losses, shows a significant prediction accuracy improvement of the proposed approach (DEVN) over the original DCVN formulation (relatively by 9.18-15.70% when using card abstraction, and by 3.37-8.39% without card abstraction, depending on a particular setting). \nFurthermore, the application of DEVN improves the theoretical lower bound of the error by 29.05-31.83% compared to the DCVN pipeline when card abstraction is applied.\nAdditionally, DEVN is able to achieve the goal using significantly smaller, and faster to infer, networks.\nWhile the proposed modification may seem to be of a rather technical nature, it, in fact, presents a fundamentally different approach to the overall process of learning and estimating CFVs, since the distributions of the training signals differ significantly between DCVN and DEVN. The former estimates CFVs, which are biased by the probability of reaching a given game state, while training the latter relies on a direct EV estimation, regardless of the state probability. In effect, the learning signal of DEVN presents a better estimation of the true value of a given state, thus allowing more accurate CFVs estimation.", + "primary_area": "domain s of application", + "author": "Jeremiasz Wo\u0142osiuk; Maciej \u015awiechowski; Jacek Ma\u0144dziuk", + "authorids": "", + "aff": "Deepsolver; QED Software+Warsaw University of Technology; Warsaw University of Technology", + "bibtex": "@article{Wo\u0142osiuk_\u015awiechowski_Ma\u0144dziuk_2023, title={Don\u2019t Predict Counterfactual Values, Predict Expected Values Instead}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25661}, DOI={10.1609/aaai.v37i4.25661}, abstractNote={Counterfactual Regret Minimization algorithms are the most popular way of estimating the Nash Equilibrium in imperfect-information zero-sum games.\nIn particular, DeepStack -- the state-of-the-art Poker bot -- employs the so-called Deep Counterfactual Value Network (DCVN) to learn the Counterfactual Values (CFVs) associated with various states in the game.\nEach CFV is a multiplication of two factors: (1) the probability that the opponent would reach a given state in a game, which can be explicitly calculated from the input data, and (2) the expected value (EV) of a payoff in that state, which is a complex function of the input data, hard to calculate.\nIn this paper, we propose a simple yet powerful modification to the CFVs estimation process, which consists in utilizing a deep neural network to estimate only the EV factor of CFV. This new target setting significantly simplifies the learning problem and leads to much more accurate CFVs estimation. A direct comparison, in terms of CFVs prediction losses, shows a significant prediction accuracy improvement of the proposed approach (DEVN) over the original DCVN formulation (relatively by 9.18-15.70% when using card abstraction, and by 3.37-8.39% without card abstraction, depending on a particular setting). Furthermore, the application of DEVN improves the theoretical lower bound of the error by 29.05-31.83% compared to the DCVN pipeline when card abstraction is applied.\nAdditionally, DEVN is able to achieve the goal using significantly smaller, and faster to infer, networks.\nWhile the proposed modification may seem to be of a rather technical nature, it, in fact, presents a fundamentally different approach to the overall process of learning and estimating CFVs, since the distributions of the training signals differ significantly between DCVN and DEVN. The former estimates CFVs, which are biased by the probability of reaching a given game state, while training the latter relies on a direct EV estimation, regardless of the state probability. In effect, the learning signal of DEVN presents a better estimation of the true value of a given state, thus allowing more accurate CFVs estimation.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wo\u0142osiuk, Jeremiasz and \u015awiechowski, Maciej and Ma\u0144dziuk, Jacek}, year={2023}, month={Jun.}, pages={5303-5311} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25661/25433", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25661", + "pdf_size": 871059, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6470586912217282136&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "deepsolver.com;qed.pl;pw.edu.pl", + "email": "deepsolver.com;qed.pl;pw.edu.pl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;2", + "aff_unique_norm": "Deepsolver;QED Software;Warsaw University of Technology", + "aff_unique_dep": ";;", + "aff_unique_url": ";;https://www.pw.edu.pl", + "aff_unique_abbr": ";;WUT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1", + "aff_country_unique": ";Poland" + }, + { + "id": "article-25344", + "title": "Doodle to Object: Practical Zero-Shot Sketch-Based 3D Shape Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Zero-shot (ZS) sketch-based three-dimensional (3D) shape retrieval (SBSR) is challenging due to the abstraction of sketches, cross-domain discrepancies between two-dimensional sketches and 3D shapes, and ZS-driven semantic knowledge transference from seen to unseen categories. Extant SBSR datasets suffer from lack of data, and no current SBSR methods consider ZS scenarios. In this paper, we contribute a new Doodle2Object (D2O) dataset consisting of 8,992 3D shapes and over 7M sketches spanning 50 categories. Then, we propose a novel prototype contrastive learning (PCL) method that effectively extracts features from different domains and adapts them to unseen categories. Specifically, our PCL method combines the ideas of contrastive and cluster-based prototype learning, and several randomly selected prototypes of different classes are assigned to each sample. By comparing these prototypes, a given sample can be moved closer to the same semantic class of samples while moving away from negative ones. Extensive experiments on two common SBSR benchmarks and our D2O dataset demonstrate the efficacy of the proposed PCL method for ZS-SBSR. Resource is available at https://github.com/yigohw/doodle2object.", + "primary_area": "computer vision ii", + "author": "Bingrui Wang; Yuan Zhou", + "authorids": "", + "aff": "School of Electrical and Information Engineering, Tianjin University, Tianjin, China; School of Electrical and Information Engineering, Tianjin University, Tianjin, China", + "bibtex": "@article{Wang_Zhou_2023, title={Doodle to Object: Practical Zero-Shot Sketch-Based 3D Shape Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25344}, DOI={10.1609/aaai.v37i2.25344}, abstractNote={Zero-shot (ZS) sketch-based three-dimensional (3D) shape retrieval (SBSR) is challenging due to the abstraction of sketches, cross-domain discrepancies between two-dimensional sketches and 3D shapes, and ZS-driven semantic knowledge transference from seen to unseen categories. Extant SBSR datasets suffer from lack of data, and no current SBSR methods consider ZS scenarios. In this paper, we contribute a new Doodle2Object (D2O) dataset consisting of 8,992 3D shapes and over 7M sketches spanning 50 categories. Then, we propose a novel prototype contrastive learning (PCL) method that effectively extracts features from different domains and adapts them to unseen categories. Specifically, our PCL method combines the ideas of contrastive and cluster-based prototype learning, and several randomly selected prototypes of different classes are assigned to each sample. By comparing these prototypes, a given sample can be moved closer to the same semantic class of samples while moving away from negative ones. Extensive experiments on two common SBSR benchmarks and our D2O dataset demonstrate the efficacy of the proposed PCL method for ZS-SBSR. Resource is available at https://github.com/yigohw/doodle2object.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Bingrui and Zhou, Yuan}, year={2023}, month={Jun.}, pages={2474-2482} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25344/25116", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25344", + "pdf_size": 1995928, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6702359373226437515&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn", + "github": "https://github.com/yigohw/doodle2object", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "School of Electrical and Information Engineering", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "Tianjin University", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Tianjin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26001", + "title": "Double Doubly Robust Thompson Sampling for Generalized Linear Contextual Bandits", + "track": "main", + "status": "Technical", + "abstract": "We propose a novel algorithm for generalized linear contextual bandits (GLBs) with a regret bound sublinear to the time horizon, the minimum eigenvalue of the covariance of contexts and a lower bound of the variance of rewards.\nIn several identified cases, our result is the first regret bound for generalized linear bandits (GLBs) achieving the regret bound sublinear to the dimension of contexts without discarding the observed rewards.\nPrevious approaches achieve the regret bound sublinear to the dimension of contexts by discarding the observed rewards, whereas our algorithm achieves the bound incorporating contexts from all arms in our double doubly robust (DDR) estimator.\nThe DDR estimator is a subclass of doubly robust estimator but with a tighter error bound.\nWe also provide a logarithmic cumulative regret bound under a probabilistic margin condition.\nThis is the first regret bound under the margin condition for linear models or GLMs when contexts are different for all arms but coefficients are common.\nWe conduct empirical studies using synthetic data and real examples, demonstrating the effectiveness of our algorithm.", + "primary_area": "machine learning ii", + "author": "Wonyoung Kim; Kyungbok Lee; Myunghee Cho Paik", + "authorids": "", + "aff": "Department of Industrial Engineering and Operations Research, Columbia University; Department of Statistics, Seoul National University + Shepherd23 Inc.; Department of Statistics, Seoul National University + Shepherd23 Inc.", + "bibtex": "@article{Kim_Lee_Paik_2023, title={Double Doubly Robust Thompson Sampling for Generalized Linear Contextual Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26001}, DOI={10.1609/aaai.v37i7.26001}, abstractNote={We propose a novel algorithm for generalized linear contextual bandits (GLBs) with a regret bound sublinear to the time horizon, the minimum eigenvalue of the covariance of contexts and a lower bound of the variance of rewards.\nIn several identified cases, our result is the first regret bound for generalized linear bandits (GLBs) achieving the regret bound sublinear to the dimension of contexts without discarding the observed rewards.\nPrevious approaches achieve the regret bound sublinear to the dimension of contexts by discarding the observed rewards, whereas our algorithm achieves the bound incorporating contexts from all arms in our double doubly robust (DDR) estimator.\nThe DDR estimator is a subclass of doubly robust estimator but with a tighter error bound.\nWe also provide a logarithmic cumulative regret bound under a probabilistic margin condition.\nThis is the first regret bound under the margin condition for linear models or GLMs when contexts are different for all arms but coefficients are common.\nWe conduct empirical studies using synthetic data and real examples, demonstrating the effectiveness of our algorithm.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Wonyoung and Lee, Kyungbok and Paik, Myunghee Cho}, year={2023}, month={Jun.}, pages={8300-8307} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26001/25773", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26001", + "pdf_size": 257877, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1361943442463237236&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "columbia.edu;snu.ac.kr;snu.ac.kr", + "email": "columbia.edu;snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;1+2", + "aff_unique_norm": "Columbia University;Seoul National University;Shepherd23 Inc.", + "aff_unique_dep": "Department of Industrial Engineering and Operations Research;Department of Statistics;", + "aff_unique_url": "https://www.columbia.edu;https://www.snu.ac.kr;", + "aff_unique_abbr": "Columbia;SNU;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Seoul", + "aff_country_unique_index": "0;1+0;1+0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "article-26988", + "title": "Double Policy Network for Aspect Sentiment Triplet Extraction (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Aspect Sentiment Triplet Extraction (ASTE) is the task to extract aspects, opinions and associated sentiments from sentences. Previous studies do not adequately consider the complicated interactions between aspect and opinion terms in both extraction logic and strategy. We present a novel Double Policy Network with Multi-Tag based Reward model (DPN-MTR), which adopts two networks ATE, TSOTE and a Trigger Mechanism to execute ASTE task following a more logical framework. A Multi-Tag based reward is also proposed to solve the limitations of existing studies for identifying aspect/opinion terms with multiple tokens (one term may consist of two or more tokens) to a certain extent. Extensive experiments are conducted on four widely-used benchmark datasets, and demonstrate the effectiveness of our model in generally improving the performance on ASTE significantly.", + "primary_area": "", + "author": "Xuting Li; Daifeng Li; Ruo Du; Dingquan Chen; Andrew Madden", + "authorids": "", + "aff": "School of Information Management, Sun Yat-sen University; School of Information Management, Sun Yat-sen University; Galanz Research Center; School of Information Management, Sun Yat-sen University; University of Sheffield", + "bibtex": "@article{Li_Li_Du_Chen_Madden_2024, title={Double Policy Network for Aspect Sentiment Triplet Extraction (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26988}, DOI={10.1609/aaai.v37i13.26988}, abstractNote={Aspect Sentiment Triplet Extraction (ASTE) is the task to extract aspects, opinions and associated sentiments from sentences. Previous studies do not adequately consider the complicated interactions between aspect and opinion terms in both extraction logic and strategy. We present a novel Double Policy Network with Multi-Tag based Reward model (DPN-MTR), which adopts two networks ATE, TSOTE and a Trigger Mechanism to execute ASTE task following a more logical framework. A Multi-Tag based reward is also proposed to solve the limitations of existing studies for identifying aspect/opinion terms with multiple tokens (one term may consist of two or more tokens) to a certain extent. Extensive experiments are conducted on four widely-used benchmark datasets, and demonstrate the effectiveness of our model in generally improving the performance on ASTE significantly.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xuting and Li, Daifeng and Du, Ruo and Chen, Dingquan and Madden, Andrew}, year={2024}, month={Jul.}, pages={16256-16257} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26988/26760", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26988", + "pdf_size": 142783, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9789218165009665413&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;hotmail.com;mail.sysu.edu.cn;hotmail.com", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;hotmail.com;mail.sysu.edu.cn;hotmail.com", + "github": "https://github.com/lixt47/DPN-SR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Sun Yat-sen University;Galanz Research Center;University of Sheffield", + "aff_unique_dep": "School of Information Management;;", + "aff_unique_url": "http://www.sysu.edu.cn;http://www.galanz.com.cn;https://www.sheffield.ac.uk", + "aff_unique_abbr": "SYSU;;Sheffield", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25945", + "title": "Dream to Generalize: Zero-Shot Model-Based Reinforcement Learning for Unseen Visual Distractions", + "track": "main", + "status": "Technical", + "abstract": "Model-based reinforcement learning (MBRL) has been used to efficiently solve vision-based control tasks in high-dimensional image observations. Although recent MBRL algorithms perform well in trained observations, they fail when faced with visual distractions in observations. These task-irrelevant distractions (e.g., clouds, shadows, and light) may be constantly present in real-world scenarios. In this study, we propose a novel self-supervised method, Dream to Generalize (Dr. G), for zero-shot MBRL. Dr. G trains its encoder and world model with dual contrastive learning which efficiently captures task-relevant features among multi-view data augmentations. We also introduce a recurrent state inverse dynamics model that helps the world model to better understand the temporal structure. The proposed methods can enhance the robustness of the world model against visual distractions. To evaluate the generalization performance, we first train Dr. G on simple backgrounds and then test it on complex natural video backgrounds in the DeepMind Control suite, and the randomizing environments in Robosuite. Dr. G yields a performance improvement of 117% and 14% over prior works, respectively. Our code is open-sourced and available at https://github.com/JeongsooHa/DrG.git", + "primary_area": "machine learning i", + "author": "Jeongsoo Ha; Kyungsoo Kim; Yusung Kim", + "authorids": "", + "aff": "Mechatronics Research, Samsung Electronics; Intelligent Agent Lab, NCSOFT; Department of Computer Science and Engineering, Sungkyunkwan University", + "bibtex": "@article{Ha_Kim_Kim_2023, title={Dream to Generalize: Zero-Shot Model-Based Reinforcement Learning for Unseen Visual Distractions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25945}, DOI={10.1609/aaai.v37i6.25945}, abstractNote={Model-based reinforcement learning (MBRL) has been used to efficiently solve vision-based control tasks in high-dimensional image observations. Although recent MBRL algorithms perform well in trained observations, they fail when faced with visual distractions in observations. These task-irrelevant distractions (e.g., clouds, shadows, and light) may be constantly present in real-world scenarios. In this study, we propose a novel self-supervised method, Dream to Generalize (Dr. G), for zero-shot MBRL. Dr. G trains its encoder and world model with dual contrastive learning which efficiently captures task-relevant features among multi-view data augmentations. We also introduce a recurrent state inverse dynamics model that helps the world model to better understand the temporal structure. The proposed methods can enhance the robustness of the world model against visual distractions. To evaluate the generalization performance, we first train Dr. G on simple backgrounds and then test it on complex natural video backgrounds in the DeepMind Control suite, and the randomizing environments in Robosuite. Dr. G yields a performance improvement of 117% and 14% over prior works, respectively. Our code is open-sourced and available at https://github.com/JeongsooHa/DrG.git}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ha, Jeongsoo and Kim, Kyungsoo and Kim, Yusung}, year={2023}, month={Jun.}, pages={7802-7810} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25945/25717", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25945", + "pdf_size": 9319416, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=388696943782536476&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "samsung.com;ncsoft.com;skku.edu", + "email": "samsung.com;ncsoft.com;skku.edu", + "github": "https://github.com/JeongsooHa/DrG.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Samsung Electronics;NCSOFT;Sungkyunkwan University", + "aff_unique_dep": "Mechatronics Research;Intelligent Agent Lab;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.samsung.com;https://www.ncsoft.com;https://www.sungkyunkwan.ac.kr", + "aff_unique_abbr": "Samsung;NCSOFT;SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26588", + "title": "Drop Clause: Enhancing Performance, Robustness and Pattern Recognition Capabilities of the Tsetlin Machine", + "track": "main", + "status": "Technical", + "abstract": "Logic-based machine learning has the crucial advantage of transparency. However, despite significant recent progress, further research is needed to close the accuracy gap between logic-based architectures and deep neural network ones. This paper introduces a novel variant of the Tsetlin machine (TM) that randomly drops clauses, the logical learning element of TMs. In effect, TM with Drop Clause ignores a random selection of the clauses in each epoch, selected according to a predefined probability. In this way, the TM learning phase becomes more diverse. To explore the effects that Drop Clause has on accuracy, training time and robustness, we conduct extensive experiments on nine benchmark datasets in natural language processing (IMDb, R8, R52, MR, and TREC) and image classification (MNIST, Fashion MNIST, CIFAR-10, and CIFAR-100). Our proposed model outperforms baseline machine learning algorithms by a wide margin and achieves competitive performance compared with recent deep learning models, such as BERT-Large and AlexNet-DFA. In brief, we observe up to +10% increase in accuracy and 2x to 4x faster learning than for the standard TM. We visualize the patterns learnt by Drop Clause TM in the form of heatmaps and show evidence of the ability of drop clause to learn more unique and discriminative patterns. We finally evaluate how Drop Clause affects learning robustness by introducing corruptions and alterations in the image/language test data, which exposes increased learning robustness.", + "primary_area": "speech natural language processing", + "author": "Jivitesh Sharma; Rohan Yadav; Ole-Christoffer Granmo; Lei Jiao", + "authorids": "", + "aff": "Center for Artificial Intelligence Research (CAIR), University of Agder, Norway; Center for Artificial Intelligence Research (CAIR), University of Agder, Norway; Center for Artificial Intelligence Research (CAIR), University of Agder, Norway; Center for Artificial Intelligence Research (CAIR), University of Agder, Norway", + "bibtex": "@article{Sharma_Yadav_Granmo_Jiao_2023, title={Drop Clause: Enhancing Performance, Robustness and Pattern Recognition Capabilities of the Tsetlin Machine}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26588}, DOI={10.1609/aaai.v37i11.26588}, abstractNote={Logic-based machine learning has the crucial advantage of transparency. However, despite significant recent progress, further research is needed to close the accuracy gap between logic-based architectures and deep neural network ones. This paper introduces a novel variant of the Tsetlin machine (TM) that randomly drops clauses, the logical learning element of TMs. In effect, TM with Drop Clause ignores a random selection of the clauses in each epoch, selected according to a predefined probability. In this way, the TM learning phase becomes more diverse. To explore the effects that Drop Clause has on accuracy, training time and robustness, we conduct extensive experiments on nine benchmark datasets in natural language processing (IMDb, R8, R52, MR, and TREC) and image classification (MNIST, Fashion MNIST, CIFAR-10, and CIFAR-100). Our proposed model outperforms baseline machine learning algorithms by a wide margin and achieves competitive performance compared with recent deep learning models, such as BERT-Large and AlexNet-DFA. In brief, we observe up to +10% increase in accuracy and 2x to 4x faster learning than for the standard TM. We visualize the patterns learnt by Drop Clause TM in the form of heatmaps and show evidence of the ability of drop clause to learn more unique and discriminative patterns. We finally evaluate how Drop Clause affects learning robustness by introducing corruptions and alterations in the image/language test data, which exposes increased learning robustness.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharma, Jivitesh and Yadav, Rohan and Granmo, Ole-Christoffer and Jiao, Lei}, year={2023}, month={Jun.}, pages={13547-13555} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26588/26360", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26588", + "pdf_size": 528315, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14286766310381333696&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "uia.no;uia.no;uia.no;uia.no", + "email": "uia.no;uia.no;uia.no;uia.no", + "github": "https://github.com/Anonymous-2491/Drop-Clause-Interpretable-TM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Agder", + "aff_unique_dep": "Center for Artificial Intelligence Research (CAIR)", + "aff_unique_url": "https://www.uia.no", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Norway" + }, + { + "id": "article-25545", + "title": "DropMessage: Unifying Random Dropping for Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Graph Neural Networks (GNNs) are powerful tools for graph representation learning. Despite their rapid development, GNNs also face some challenges, such as over-fitting, over-smoothing, and non-robustness. Previous works indicate that these problems can be alleviated by random dropping methods, which integrate augmented data into models by randomly masking parts of the input. However, some open problems of random dropping on GNNs remain to be solved. First, it is challenging to find a universal method that are suitable for all cases considering the divergence of different datasets and models. Second, augmented data introduced to GNNs causes the incomplete coverage of parameters and unstable training process. Third, there is no theoretical analysis on the effectiveness of random dropping methods on GNNs. In this paper, we propose a novel random dropping method called DropMessage, which performs dropping operations directly on the propagated messages during the message-passing process. More importantly, we find that DropMessage provides a unified framework for most existing random dropping methods, based on which we give theoretical analysis of their effectiveness. Furthermore, we elaborate the superiority of DropMessage: it stabilizes the training process by reducing sample variance; it keeps information diversity from the perspective of information theory, enabling it become a theoretical upper bound of other methods. To evaluate our proposed method, we conduct experiments that aims for multiple tasks on five public datasets and two industrial datasets with various backbone models. The experimental results show that DropMessage has the advantages of both effectiveness and generalization, and can significantly alleviate the problems mentioned above. A detailed version with full appendix can be found on arXiv: https://arxiv.org/abs/2204.10037.", + "primary_area": "data mining and knowledge management", + "author": "Taoran Fang; Zhiqing Xiao; Chunping Wang; Jiarong Xu; Xuan Yang; Yang Yang", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; FinVolution Group; Fudan University; Zhejiang University; Zhejiang University", + "bibtex": "@article{Fang_Xiao_Wang_Xu_Yang_Yang_2023, title={DropMessage: Unifying Random Dropping for Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25545}, DOI={10.1609/aaai.v37i4.25545}, abstractNote={Graph Neural Networks (GNNs) are powerful tools for graph representation learning. Despite their rapid development, GNNs also face some challenges, such as over-fitting, over-smoothing, and non-robustness. Previous works indicate that these problems can be alleviated by random dropping methods, which integrate augmented data into models by randomly masking parts of the input. However, some open problems of random dropping on GNNs remain to be solved. First, it is challenging to find a universal method that are suitable for all cases considering the divergence of different datasets and models. Second, augmented data introduced to GNNs causes the incomplete coverage of parameters and unstable training process. Third, there is no theoretical analysis on the effectiveness of random dropping methods on GNNs. In this paper, we propose a novel random dropping method called DropMessage, which performs dropping operations directly on the propagated messages during the message-passing process. More importantly, we find that DropMessage provides a unified framework for most existing random dropping methods, based on which we give theoretical analysis of their effectiveness. Furthermore, we elaborate the superiority of DropMessage: it stabilizes the training process by reducing sample variance; it keeps information diversity from the perspective of information theory, enabling it become a theoretical upper bound of other methods. To evaluate our proposed method, we conduct experiments that aims for multiple tasks on five public datasets and two industrial datasets with various backbone models. The experimental results show that DropMessage has the advantages of both effectiveness and generalization, and can significantly alleviate the problems mentioned above. A detailed version with full appendix can be found on arXiv: https://arxiv.org/abs/2204.10037.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Taoran and Xiao, Zhiqing and Wang, Chunping and Xu, Jiarong and Yang, Xuan and Yang, Yang}, year={2023}, month={Jun.}, pages={4267-4275} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25545/25317", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25545", + "pdf_size": 263542, + "gs_citation": 67, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18183978898372866469&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "zju.edu.cn;zju.edu.cn;xinye.com;fudan.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;xinye.com;fudan.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "https://arxiv.org/abs/2204.10037", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "Zhejiang University;FinVolution Group;Fudan University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.finvolution.com/;https://www.fudan.edu.cn", + "aff_unique_abbr": "ZJU;;Fudan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26163", + "title": "Dropout Is NOT All You Need to Prevent Gradient Leakage", + "track": "main", + "status": "Technical", + "abstract": "Gradient inversion attacks on federated learning systems reconstruct client training data from exchanged gradient information. To defend against such attacks, a variety of defense mechanisms were proposed. However, they usually lead to an unacceptable trade-off between privacy and model utility. Recent observations suggest that dropout could mitigate gradient leakage and improve model utility if added to neural networks. Unfortunately, this phenomenon has not been systematically researched yet. In this work, we thoroughly analyze the effect of dropout on iterative gradient inversion attacks. We find that state of the art attacks are not able to reconstruct the client data due to the stochasticity induced by dropout during model training. Nonetheless, we argue that dropout does not offer reliable protection if the dropout induced stochasticity is adequately modeled during attack optimization. Consequently, we propose a novel Dropout Inversion Attack (DIA) that jointly optimizes for client data and dropout masks to approximate the stochastic client model. We conduct an extensive systematic evaluation of our attack on four seminal model architectures and three image classification datasets of increasing complexity. We find that our proposed attack bypasses the protection seemingly induced by dropout and reconstructs client data with high fidelity. Our work demonstrates that privacy inducing changes to model architectures alone cannot be assumed to reliably protect from gradient leakage and therefore should be combined with complementary defense mechanisms.", + "primary_area": "machine learning iii", + "author": "Daniel Scheliga; Patrick Maeder; Marco Seeland", + "authorids": "", + "aff": "Technische Universit \u00a8at Ilmenau, Germany + Friedrich Schiller Universit \u00a8at Jena, Germany; Technische Universit \u00a8at Ilmenau, Germany + Friedrich Schiller Universit \u00a8at Jena, Germany; Technische Universit \u00a8at Ilmenau, Germany", + "bibtex": "@article{Scheliga_Maeder_Seeland_2023, title={Dropout Is NOT All You Need to Prevent Gradient Leakage}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26163}, DOI={10.1609/aaai.v37i8.26163}, abstractNote={Gradient inversion attacks on federated learning systems reconstruct client training data from exchanged gradient information. To defend against such attacks, a variety of defense mechanisms were proposed. However, they usually lead to an unacceptable trade-off between privacy and model utility. Recent observations suggest that dropout could mitigate gradient leakage and improve model utility if added to neural networks. Unfortunately, this phenomenon has not been systematically researched yet. In this work, we thoroughly analyze the effect of dropout on iterative gradient inversion attacks. We find that state of the art attacks are not able to reconstruct the client data due to the stochasticity induced by dropout during model training. Nonetheless, we argue that dropout does not offer reliable protection if the dropout induced stochasticity is adequately modeled during attack optimization. Consequently, we propose a novel Dropout Inversion Attack (DIA) that jointly optimizes for client data and dropout masks to approximate the stochastic client model. We conduct an extensive systematic evaluation of our attack on four seminal model architectures and three image classification datasets of increasing complexity. We find that our proposed attack bypasses the protection seemingly induced by dropout and reconstructs client data with high fidelity. Our work demonstrates that privacy inducing changes to model architectures alone cannot be assumed to reliably protect from gradient leakage and therefore should be combined with complementary defense mechanisms.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Scheliga, Daniel and Maeder, Patrick and Seeland, Marco}, year={2023}, month={Jun.}, pages={9733-9741} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26163/25935", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26163", + "pdf_size": 2152281, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5860389148131911224&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "tu-ilmenau.de;tu-ilmenau.de;tu-ilmenau.de", + "email": "tu-ilmenau.de;tu-ilmenau.de;tu-ilmenau.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0", + "aff_unique_norm": "Technische Universit\u00e4t Ilmenau;Friedrich Schiller University Jena", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tu-ilmenau.de/;https://www.uni-jena.de/", + "aff_unique_abbr": "TU Ilmenau;FSU Jena", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25970", + "title": "DrugOOD: Out-of-Distribution Dataset Curator and Benchmark for AI-Aided Drug Discovery \u2013 a Focus on Affinity Prediction Problems with Noise Annotations", + "track": "main", + "status": "Technical", + "abstract": "AI-aided drug discovery (AIDD) is gaining popularity due to its potential to make the search for new pharmaceuticals faster, less expensive, and more effective. Despite its extensive use in numerous fields (e.g., ADMET prediction, virtual screening), little research has been conducted on the out-of-distribution (OOD) learning problem with noise. We present DrugOOD, a systematic OOD dataset curator and benchmark for AIDD. Particularly, we focus on the drug-target binding affinity prediction problem, which involves both macromolecule (protein target) and small-molecule (drug compound). DrugOOD offers an automated dataset curator with user-friendly customization scripts, rich domain annotations aligned with biochemistry knowledge, realistic noise level annotations, and rigorous benchmarking of SOTA OOD algorithms, as opposed to only providing fixed datasets. Since the molecular data is often modeled as irregular graphs using graph neural network (GNN) backbones, DrugOOD also serves as a valuable testbed for graph OOD learning problems. Extensive empirical studies have revealed a significant performance gap between in-distribution and out-of-distribution experiments, emphasizing the need for the development of more effective schemes that permit OOD generalization under noise for AIDD.", + "primary_area": "machine learning ii", + "author": "Yuanfeng Ji; Lu Zhang; Jiaxiang Wu; Bingzhe Wu; Lanqing Li; Long-Kai Huang; Tingyang Xu; Yu Rong; Jie Ren; Ding Xue; Houtim Lai; Wei Liu; Junzhou Huang; Shuigeng Zhou; Ping Luo; Peilin Zhao; Yatao Bian", + "authorids": "", + "aff": "Tencent AI Lab; Tencent AI Lab + Fudan University; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Fudan University; The University of Hong Kong; Tencent AI Lab; Tencent AI Lab", + "bibtex": "@article{Ji_Zhang_Wu_Wu_Li_Huang_Xu_Rong_Ren_Xue_Lai_Liu_Huang_Zhou_Luo_Zhao_Bian_2023, title={DrugOOD: Out-of-Distribution Dataset Curator and Benchmark for AI-Aided Drug Discovery \u2013 a Focus on Affinity Prediction Problems with Noise Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25970}, DOI={10.1609/aaai.v37i7.25970}, abstractNote={AI-aided drug discovery (AIDD) is gaining popularity due to its potential to make the search for new pharmaceuticals faster, less expensive, and more effective. Despite its extensive use in numerous fields (e.g., ADMET prediction, virtual screening), little research has been conducted on the out-of-distribution (OOD) learning problem with noise. We present DrugOOD, a systematic OOD dataset curator and benchmark for AIDD. Particularly, we focus on the drug-target binding affinity prediction problem, which involves both macromolecule (protein target) and small-molecule (drug compound). DrugOOD offers an automated dataset curator with user-friendly customization scripts, rich domain annotations aligned with biochemistry knowledge, realistic noise level annotations, and rigorous benchmarking of SOTA OOD algorithms, as opposed to only providing fixed datasets. Since the molecular data is often modeled as irregular graphs using graph neural network (GNN) backbones, DrugOOD also serves as a valuable testbed for graph OOD learning problems. Extensive empirical studies have revealed a significant performance gap between in-distribution and out-of-distribution experiments, emphasizing the need for the development of more effective schemes that permit OOD generalization under noise for AIDD.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ji, Yuanfeng and Zhang, Lu and Wu, Jiaxiang and Wu, Bingzhe and Li, Lanqing and Huang, Long-Kai and Xu, Tingyang and Rong, Yu and Ren, Jie and Xue, Ding and Lai, Houtim and Liu, Wei and Huang, Junzhou and Zhou, Shuigeng and Luo, Ping and Zhao, Peilin and Bian, Yatao}, year={2023}, month={Jun.}, pages={8023-8031} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25970/25742", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25970", + "pdf_size": 631144, + "gs_citation": 122, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5259372886774805348&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "connect.hku.hk; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ", + "email": "connect.hku.hk; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 17, + "aff_unique_index": "0;0+1;0;0;0;0;0;0;0;0;0;0;0;1;2;0;0", + "aff_unique_norm": "Tencent;Fudan University;The University of Hong Kong", + "aff_unique_dep": "Tencent AI Lab;;", + "aff_unique_url": "https://ai.tencent.com;https://www.fudan.edu.cn;https://www.hku.hk", + "aff_unique_abbr": "Tencent AI Lab;Fudan;HKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26057", + "title": "Dual Label-Guided Graph Refinement for Multi-View Graph Clustering", + "track": "main", + "status": "Technical", + "abstract": "With the increase of multi-view graph data, multi-view graph clustering (MVGC) that can discover the hidden clusters without label supervision has attracted growing attention from researchers. Existing MVGC methods are often sensitive to the given graphs, especially influenced by the low quality graphs, i.e., they tend to be limited by the homophily assumption. However, the widespread real-world data hardly satisfy the homophily assumption. This gap limits the performance of existing MVGC methods on low homophilous graphs. To mitigate this limitation, our motivation is to extract high-level view-common information which is used to refine each view's graph, and reduce the influence of non-homophilous edges. To this end, we propose dual label-guided graph refinement for multi-view graph clustering (DuaLGR), to alleviate the vulnerability in facing low homophilous graphs. Specifically, DuaLGR consists of two modules named dual label-guided graph refinement module and graph encoder module. The first module is designed to extract the soft label from node features and graphs, and then learn a refinement matrix. In cooperation with the pseudo label from the second module, these graphs are refined and aggregated adaptively with different orders. Subsequently, a consensus graph can be generated in the guidance of the pseudo label. Finally, the graph encoder module encodes the consensus graph along with node features to produce the high-level pseudo label for iteratively clustering. The experimental results show the superior performance on coping with low homophilous graph data. The source code for DuaLGR is available at https://github.com/YwL-zhufeng/DuaLGR.", + "primary_area": "machine learning ii", + "author": "Yawen Ling; Jianpeng Chen; Yazhou Ren; Xiaorong Pu; Jie Xu; Xiaofeng Zhu; Lifang He", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China+Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China+Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; Department of Computer Science and Engineering, Lehigh University, Bethlehem, PA, USA", + "bibtex": "@article{Ling_Chen_Ren_Pu_Xu_Zhu_He_2023, title={Dual Label-Guided Graph Refinement for Multi-View Graph Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26057}, DOI={10.1609/aaai.v37i7.26057}, abstractNote={With the increase of multi-view graph data, multi-view graph clustering (MVGC) that can discover the hidden clusters without label supervision has attracted growing attention from researchers. Existing MVGC methods are often sensitive to the given graphs, especially influenced by the low quality graphs, i.e., they tend to be limited by the homophily assumption. However, the widespread real-world data hardly satisfy the homophily assumption. This gap limits the performance of existing MVGC methods on low homophilous graphs. To mitigate this limitation, our motivation is to extract high-level view-common information which is used to refine each view\u2019s graph, and reduce the influence of non-homophilous edges. To this end, we propose dual label-guided graph refinement for multi-view graph clustering (DuaLGR), to alleviate the vulnerability in facing low homophilous graphs. Specifically, DuaLGR consists of two modules named dual label-guided graph refinement module and graph encoder module. The first module is designed to extract the soft label from node features and graphs, and then learn a refinement matrix. In cooperation with the pseudo label from the second module, these graphs are refined and aggregated adaptively with different orders. Subsequently, a consensus graph can be generated in the guidance of the pseudo label. Finally, the graph encoder module encodes the consensus graph along with node features to produce the high-level pseudo label for iteratively clustering. The experimental results show the superior performance on coping with low homophilous graph data. The source code for DuaLGR is available at https://github.com/YwL-zhufeng/DuaLGR.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ling, Yawen and Chen, Jianpeng and Ren, Yazhou and Pu, Xiaorong and Xu, Jie and Zhu, Xiaofeng and He, Lifang}, year={2023}, month={Jun.}, pages={8791-8798} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26057/25829", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26057", + "pdf_size": 253266, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13248900741030977489&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "outlook.com;outlook.com;uestc.edu.cn;uestc.edu.cn;outlook.com;gmail.com;lehigh.edu", + "email": "outlook.com;outlook.com;uestc.edu.cn;uestc.edu.cn;outlook.com;gmail.com;lehigh.edu", + "github": "https://github.com/YwL-zhufeng/DuaLGR", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0+0;0;0+0;1", + "aff_unique_norm": "University of Electronic Science and Technology of China;Lehigh University", + "aff_unique_dep": "School of Computer Science and Engineering;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.uestc.edu.cn;https://www.lehigh.edu", + "aff_unique_abbr": "UESTC;Lehigh", + "aff_campus_unique_index": "0;0;1;0+1;0;0+1;2", + "aff_campus_unique": "Chengdu;Shenzhen;Bethlehem", + "aff_country_unique_index": "0;0;0;0+0;0;0+0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25536", + "title": "Dual Low-Rank Graph Autoencoder for Semantic and Topological Networks", + "track": "main", + "status": "Technical", + "abstract": "Due to the powerful capability to gather the information of neighborhood nodes, Graph Convolutional Network (GCN) has become a widely explored hotspot in recent years. As a well-established extension, Graph AutoEncoder (GAE) succeeds in mining underlying node representations via evaluating the quality of adjacency matrix reconstruction from learned features. However, limited works on GAE were devoted to leveraging both semantic and topological graphs, and they only indirectly extracted the relationships between graphs via weights shared by features. To better capture the connections between nodes from these two types of graphs, this paper proposes a graph neural network dubbed Dual Low-Rank Graph AutoEncoder (DLR-GAE), which takes both semantic and topological homophily into consideration. Differing from prior works that share common weights between GCNs, the presented DLR-GAE conducts sustained exploration of low-rank information between two distinct graphs, and reconstructs adjacency matrices from learned latent factors and embeddings. In order to obtain valid adjacency matrices that meet certain conditions, we design some surrogates and projections to restrict the learned factor matrix. We compare the proposed model with state-of-the-art methods on several datasets, which demonstrates the superior accuracy of DLR-GAE in semi-supervised classification.", + "primary_area": "data mining and knowledge management", + "author": "Zhaoliang Chen; Zhihao Wu; Shiping Wang; Wenzhong Guo", + "authorids": "", + "aff": "1College of Computer and Data Science, Fuzhou University, Fuzhou, China + 2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, Fuzhou, China; 1College of Computer and Data Science, Fuzhou University, Fuzhou, China + 2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, Fuzhou, China; 1College of Computer and Data Science, Fuzhou University, Fuzhou, China + 2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, Fuzhou, China; 1College of Computer and Data Science, Fuzhou University, Fuzhou, China + 2Fujian Provincial Key Laboratory of Network Computing and Intelligent Information Processing, Fuzhou University, Fuzhou, China", + "bibtex": "@article{Chen_Wu_Wang_Guo_2023, title={Dual Low-Rank Graph Autoencoder for Semantic and Topological Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25536}, DOI={10.1609/aaai.v37i4.25536}, abstractNote={Due to the powerful capability to gather the information of neighborhood nodes, Graph Convolutional Network (GCN) has become a widely explored hotspot in recent years. As a well-established extension, Graph AutoEncoder (GAE) succeeds in mining underlying node representations via evaluating the quality of adjacency matrix reconstruction from learned features. However, limited works on GAE were devoted to leveraging both semantic and topological graphs, and they only indirectly extracted the relationships between graphs via weights shared by features. To better capture the connections between nodes from these two types of graphs, this paper proposes a graph neural network dubbed Dual Low-Rank Graph AutoEncoder (DLR-GAE), which takes both semantic and topological homophily into consideration. Differing from prior works that share common weights between GCNs, the presented DLR-GAE conducts sustained exploration of low-rank information between two distinct graphs, and reconstructs adjacency matrices from learned latent factors and embeddings. In order to obtain valid adjacency matrices that meet certain conditions, we design some surrogates and projections to restrict the learned factor matrix. We compare the proposed model with state-of-the-art methods on several datasets, which demonstrates the superior accuracy of DLR-GAE in semi-supervised classification.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Zhaoliang and Wu, Zhihao and Wang, Shiping and Guo, Wenzhong}, year={2023}, month={Jun.}, pages={4191-4198} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25536/25308", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25536", + "pdf_size": 1641095, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11704899405811756479&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "outlook.com;gmail.com;163.com;fzu.edu.cn", + "email": "outlook.com;gmail.com;163.com;fzu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Fuzhou University", + "aff_unique_dep": "College of Computer and Data Science", + "aff_unique_url": "https://www.fzu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Fuzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25346", + "title": "Dual Memory Aggregation Network for Event-Based Object Detection with Learnable Representation", + "track": "main", + "status": "Technical", + "abstract": "Event-based cameras are bio-inspired sensors that capture brightness change of every pixel in an asynchronous manner. Compared with frame-based sensors, event cameras have microsecond-level latency and high dynamic range, hence showing great potential for object detection under high-speed motion and poor illumination conditions. Due to sparsity and asynchronism nature with event streams, most of existing approaches resort to hand-crafted methods to convert event data into 2D grid representation. However, they are sub-optimal in aggregating information from event stream for object detection. In this work, we propose to learn an event representation optimized for event-based object detection. Specifically, event streams are divided into grids in the x-y-t coordinates for both positive and negative polarity, producing a set of pillars as 3D tensor representation. To fully exploit information with event streams to detect objects, a dual-memory aggregation network (DMANet) is proposed to leverage both long and short memory along event streams to aggregate effective information for object detection. Long memory is encoded in the hidden state of adaptive convLSTMs while short memory is modeled by computing spatial-temporal correlation between event pillars at neighboring time intervals. Extensive experiments on the recently released event-based automotive detection dataset demonstrate the effectiveness of the proposed method.", + "primary_area": "computer vision ii", + "author": "Dongsheng Wang; Xu Jia; Yang Zhang; Xinyu Zhang; Yaoyuan Wang; Ziyang Zhang; Dong Wang; Huchuan Lu", + "authorids": "", + "aff": "School of Artificial Intelligence, Dalian University of Technology, China; School of Artificial Intelligence, Dalian University of Technology, China; School of Artificial Intelligence, Dalian University of Technology, China; School of Artificial Intelligence, Dalian University of Technology, China; Advanced Computing and Storage Lab, Huawei Technologies Co. Ltd, China; Advanced Computing and Storage Lab, Huawei Technologies Co. Ltd, China; School of Artificial Intelligence, Dalian University of Technology, China; School of Artificial Intelligence, Dalian University of Technology, China", + "bibtex": "@article{Wang_Jia_Zhang_Zhang_Wang_Zhang_Wang_Lu_2023, title={Dual Memory Aggregation Network for Event-Based Object Detection with Learnable Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25346}, DOI={10.1609/aaai.v37i2.25346}, abstractNote={Event-based cameras are bio-inspired sensors that capture brightness change of every pixel in an asynchronous manner. Compared with frame-based sensors, event cameras have microsecond-level latency and high dynamic range, hence showing great potential for object detection under high-speed motion and poor illumination conditions. Due to sparsity and asynchronism nature with event streams, most of existing approaches resort to hand-crafted methods to convert event data into 2D grid representation. However, they are sub-optimal in aggregating information from event stream for object detection. In this work, we propose to learn an event representation optimized for event-based object detection. Specifically, event streams are divided into grids in the x-y-t coordinates for both positive and negative polarity, producing a set of pillars as 3D tensor representation. To fully exploit information with event streams to detect objects, a dual-memory aggregation network (DMANet) is proposed to leverage both long and short memory along event streams to aggregate effective information for object detection. Long memory is encoded in the hidden state of adaptive convLSTMs while short memory is modeled by computing spatial-temporal correlation between event pillars at neighboring time intervals. Extensive experiments on the recently released event-based automotive detection dataset demonstrate the effectiveness of the proposed method.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Dongsheng and Jia, Xu and Zhang, Yang and Zhang, Xinyu and Wang, Yaoyuan and Zhang, Ziyang and Wang, Dong and Lu, Huchuan}, year={2023}, month={Jun.}, pages={2492-2500} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25346/25118", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25346", + "pdf_size": 5280613, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6598400232334350905&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.dlut.edu.cn;mail.dlut.edu.cn;mail.dlut.edu.cn;dlut.edu.cn;huawei.com;huawei.com;dlut.edu.cn;dlut.edu.cn", + "email": "mail.dlut.edu.cn;mail.dlut.edu.cn;mail.dlut.edu.cn;dlut.edu.cn;huawei.com;huawei.com;dlut.edu.cn;dlut.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;1;1;0;0", + "aff_unique_norm": "Dalian University of Technology;Huawei Technologies Co. Ltd", + "aff_unique_dep": "School of Artificial Intelligence;Advanced Computing and Storage Lab", + "aff_unique_url": "http://en.dlut.edu.cn/;https://www.huawei.com", + "aff_unique_abbr": "DUT;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25489", + "title": "Dual Memory Units with Uncertainty Regulation for Weakly Supervised Video Anomaly Detection", + "track": "main", + "status": "Technical", + "abstract": "Learning discriminative features for effectively separating abnormal events from normality is crucial for weakly supervised video anomaly detection (WS-VAD) tasks. Existing approaches, both video and segment level label oriented, mainly focus on extracting representations for anomaly data while neglecting the implication of normal data. We observe that such a scheme is sub-optimal, i.e., for better distinguishing anomaly one needs to understand what is a normal state, and may yield a higher false alarm rate. To address this issue, we propose an Uncertainty Regulated Dual Memory Units (UR-DMU) model to learn both the representations of normal data and discriminative features of abnormal data. To be specific, inspired by the traditional global and local structure on graph convolutional networks, we introduce a Global and Local Multi-Head Self Attention (GL-MHSA) module for the Transformer network to obtain more expressive embeddings for capturing associations in videos. Then, we use two memory banks, one additional abnormal memory for tackling hard samples, to store and separate abnormal and normal prototypes and maximize the margins between the two representations. Finally, we propose an uncertainty learning scheme to learn the normal data latent space, that is robust to noise from camera switching, object changing, scene transforming, etc. Extensive experiments on XD-Violence and UCF-Crime datasets demonstrate that our method outperforms the state-of-the-art methods by a sizable margin.", + "primary_area": "computer vision iii", + "author": "Hang Zhou; Junqing Yu; Wei Yang", + "authorids": "", + "aff": "Huazhong University of Science and Technology; Huazhong University of Science and Technology; Huazhong University of Science and Technology", + "bibtex": "@article{Zhou_Yu_Yang_2023, title={Dual Memory Units with Uncertainty Regulation for Weakly Supervised Video Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25489}, DOI={10.1609/aaai.v37i3.25489}, abstractNote={Learning discriminative features for effectively separating abnormal events from normality is crucial for weakly supervised video anomaly detection (WS-VAD) tasks. Existing approaches, both video and segment level label oriented, mainly focus on extracting representations for anomaly data while neglecting the implication of normal data. We observe that such a scheme is sub-optimal, i.e., for better distinguishing anomaly one needs to understand what is a normal state, and may yield a higher false alarm rate. To address this issue, we propose an Uncertainty Regulated Dual Memory Units (UR-DMU) model to learn both the representations of normal data and discriminative features of abnormal data. To be specific, inspired by the traditional global and local structure on graph convolutional networks, we introduce a Global and Local Multi-Head Self Attention (GL-MHSA) module for the Transformer network to obtain more expressive embeddings for capturing associations in videos. Then, we use two memory banks, one additional abnormal memory for tackling hard samples, to store and separate abnormal and normal prototypes and maximize the margins between the two representations. Finally, we propose an uncertainty learning scheme to learn the normal data latent space, that is robust to noise from camera switching, object changing, scene transforming, etc. Extensive experiments on XD-Violence and UCF-Crime datasets demonstrate that our method outperforms the state-of-the-art methods by a sizable margin.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Hang and Yu, Junqing and Yang, Wei}, year={2023}, month={Jun.}, pages={3769-3777} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25489/25261", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25489", + "pdf_size": 1220977, + "gs_citation": 99, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10248391801522195619&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26032", + "title": "Dual Mutual Information Constraints for Discriminative Clustering", + "track": "main", + "status": "Technical", + "abstract": "Deep clustering is a fundamental task in machine learning and data mining that aims at learning clustering-oriented feature representations. In previous studies, most of deep clustering methods follow the idea of self-supervised representation learning by maximizing the consistency of all similar instance pairs while ignoring the effect of feature redundancy on clustering performance. In this paper, to address the above issue, we design a dual mutual information constrained clustering method named DMICC which is based on deep contrastive clustering architecture, in which the dual mutual information constraints are particularly employed with solid theoretical guarantees and experimental validations. Specifically, at the feature level, we reduce the redundancy among features by minimizing the mutual information across all the dimensionalities to encourage the neural network to extract more discriminative features. At the instance level, we maximize the mutual information of the similar instance pairs to obtain more unbiased and robust representations. The dual mutual information constraints happen simultaneously and thus complement each other to jointly optimize better features that are suitable for the clustering task. We also prove that our adopted mutual information constraints are superior in feature extraction, and the proposed dual mutual information constraints are clearly bounded and thus solvable. Extensive experiments on five benchmark datasets show that our proposed approach outperforms most other clustering algorithms. The code is available at https://github.com/Li-Hyn/DMICC.", + "primary_area": "machine learning ii", + "author": "Hongyu Li; Lefei Zhang; Kehua Su", + "authorids": "", + "aff": "School of Computer Science, Wuhan University, Wuhan, 430072, P. R. China+Hubei Luojia Laboratory, Wuhan 430072, P. R. China; School of Computer Science, Wuhan University, Wuhan, 430072, P. R. China; School of Computer Science, Wuhan University, Wuhan, 430072, P. R. China", + "bibtex": "@article{Li_Zhang_Su_2023, title={Dual Mutual Information Constraints for Discriminative Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26032}, DOI={10.1609/aaai.v37i7.26032}, abstractNote={Deep clustering is a fundamental task in machine learning and data mining that aims at learning clustering-oriented feature representations. In previous studies, most of deep clustering methods follow the idea of self-supervised representation learning by maximizing the consistency of all similar instance pairs while ignoring the effect of feature redundancy on clustering performance. In this paper, to address the above issue, we design a dual mutual information constrained clustering method named DMICC which is based on deep contrastive clustering architecture, in which the dual mutual information constraints are particularly employed with solid theoretical guarantees and experimental validations. Specifically, at the feature level, we reduce the redundancy among features by minimizing the mutual information across all the dimensionalities to encourage the neural network to extract more discriminative features. At the instance level, we maximize the mutual information of the similar instance pairs to obtain more unbiased and robust representations. The dual mutual information constraints happen simultaneously and thus complement each other to jointly optimize better features that are suitable for the clustering task. We also prove that our adopted mutual information constraints are superior in feature extraction, and the proposed dual mutual information constraints are clearly bounded and thus solvable. Extensive experiments on five benchmark datasets show that our proposed approach outperforms most other clustering algorithms. The code is available at https://github.com/Li-Hyn/DMICC.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Hongyu and Zhang, Lefei and Su, Kehua}, year={2023}, month={Jun.}, pages={8571-8579} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26032/25804", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26032", + "pdf_size": 3409926, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8435175937908630289&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "whu.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "https://github.com/Li-Hyn/DMICC", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Wuhan University;Hubei Luojia Laboratory", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "http://www.whu.edu.cn;", + "aff_unique_abbr": "WHU;", + "aff_campus_unique_index": "0+0;0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25122", + "title": "Dual-Domain Attention for Image Deblurring", + "track": "main", + "status": "Technical", + "abstract": "As a long-standing and challenging task, image deblurring aims to reconstruct the latent sharp image from its degraded counterpart. In this study, to bridge the gaps between degraded/sharp image pairs in the spatial and frequency domains simultaneously, we develop the dual-domain attention mechanism for image deblurring. Self-attention is widely used in vision tasks, however, due to the quadratic complexity, it is not applicable to image deblurring with high-resolution images. To alleviate this issue, we propose a novel spatial attention module by implementing self-attention in the style of dynamic group convolution for integrating information from the local region, enhancing the representation learning capability and reducing computational burden. Regarding frequency domain learning, many frequency-based deblurring approaches either treat the spectrum as a whole or decompose frequency components in a complicated manner. In this work, we devise a frequency attention module to compactly decouple the spectrum into distinct frequency parts and accentuate the informative part with extremely lightweight learnable parameters. Finally, we incorporate attention modules into a U-shaped network. Extensive comparisons with prior arts on the common benchmarks show that our model, named Dual-domain Attention Network (DDANet), obtains comparable results with a significantly improved inference speed.", + "primary_area": "computer vision i", + "author": "Yuning Cui; Yi Tao; Wenqi Ren; Alois Knoll", + "authorids": "", + "aff": "Technical University of Munich; MIT Universal Village Program; Shenzhen Campus of Sun Yat-sen University; Technical University of Munich", + "bibtex": "@article{Cui_Tao_Ren_Knoll_2023, title={Dual-Domain Attention for Image Deblurring}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25122}, DOI={10.1609/aaai.v37i1.25122}, abstractNote={As a long-standing and challenging task, image deblurring aims to reconstruct the latent sharp image from its degraded counterpart. In this study, to bridge the gaps between degraded/sharp image pairs in the spatial and frequency domains simultaneously, we develop the dual-domain attention mechanism for image deblurring. Self-attention is widely used in vision tasks, however, due to the quadratic complexity, it is not applicable to image deblurring with high-resolution images. To alleviate this issue, we propose a novel spatial attention module by implementing self-attention in the style of dynamic group convolution for integrating information from the local region, enhancing the representation learning capability and reducing computational burden. Regarding frequency domain learning, many frequency-based deblurring approaches either treat the spectrum as a whole or decompose frequency components in a complicated manner. In this work, we devise a frequency attention module to compactly decouple the spectrum into distinct frequency parts and accentuate the informative part with extremely lightweight learnable parameters. Finally, we incorporate attention modules into a U-shaped network. Extensive comparisons with prior arts on the common benchmarks show that our model, named Dual-domain Attention Network (DDANet), obtains comparable results with a significantly improved inference speed.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Yuning and Tao, Yi and Ren, Wenqi and Knoll, Alois}, year={2023}, month={Jun.}, pages={479-487} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25122/24894", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25122", + "pdf_size": 1495482, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14160811618017343674&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "in.tum.de;in.tum.de;universal-village.org;mail.sysu.edu.cn", + "email": "in.tum.de;in.tum.de;universal-village.org;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Technical University of Munich;Massachusetts Institute of Technology;Sun Yat-sen University", + "aff_unique_dep": ";Universal Village Program;", + "aff_unique_url": "https://www.tum.de;https://www.mit.edu;http://www.sysu.edu.cn/", + "aff_unique_abbr": "TUM;MIT;SYSU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;1;2;0", + "aff_country_unique": "Germany;United States;China" + }, + { + "id": "article-27051", + "title": "DyCVAE: Learning Dynamic Causal Factors for Non-stationary Series Domain Generalization (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Learning domain-invariant representations is a major task of out-of-distribution generalization. To address this issue, recent efforts have taken into accounting causality, aiming at learning the causal factors with regard to tasks. However, extending existing generalization methods for adapting non-stationary time series may be ineffective, because they fail to model the underlying causal factors due to temporal-domain shifts except for source-domain shifts, as pointed out by recent studies. To this end, we propose a novel model DyCVAE to learn dynamic causal factors. The results on synthetic and real datasets demonstrate the effectiveness of our proposed model for the task of generalization in time series domain.", + "primary_area": "", + "author": "Weifeng Zhang; Zhiyuan Wang; Kunpeng Zhang; Ting Zhong; Fan Zhou", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Maryland; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China + Kashi Institute of Electronics and Information Industry", + "bibtex": "@article{Zhang_Wang_Zhang_Zhong_Zhou_2024, title={DyCVAE: Learning Dynamic Causal Factors for Non-stationary Series Domain Generalization (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27051}, DOI={10.1609/aaai.v37i13.27051}, abstractNote={Learning domain-invariant representations is a major task of out-of-distribution generalization. To address this issue, recent efforts have taken into accounting causality, aiming at learning the causal factors with regard to tasks. However, extending existing generalization methods for adapting non-stationary time series may be ineffective, because they fail to model the underlying causal factors due to temporal-domain shifts except for source-domain shifts, as pointed out by recent studies. To this end, we propose a novel model DyCVAE to learn dynamic causal factors. The results on synthetic and real datasets demonstrate the effectiveness of our proposed model for the task of generalization in time series domain.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Weifeng and Wang, Zhiyuan and Zhang, Kunpeng and Zhong, Ting and Zhou, Fan}, year={2024}, month={Jul.}, pages={16382-16383} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27051/26823", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27051", + "pdf_size": 95975, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:6Y1uNWYOEI8J:scholar.google.com/&scioq=DyCVAE:+Learning+Dynamic+Causal+Factors+for+Non-stationary+Series+Domain+Generalization+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "outlook.com;gmail.com;umd.edu;uestc.edu.cn;uestc.edu.cn", + "email": "outlook.com;gmail.com;umd.edu;uestc.edu.cn;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0+2", + "aff_unique_norm": "University of Electronic Science and Technology of China;University of Maryland;Kashi Institute of Electronics and Information Industry", + "aff_unique_dep": ";;Electronics and Information Industry", + "aff_unique_url": "https://www.uestc.edu.cn;https://www/umd.edu;", + "aff_unique_abbr": "UESTC;UMD;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26543", + "title": "DyRRen: A Dynamic Retriever-Reranker-Generator Model for Numerical Reasoning over Tabular and Textual Data", + "track": "main", + "status": "Technical", + "abstract": "Numerical reasoning over hybrid data containing tables and long texts has recently received research attention from the AI community. To generate an executable reasoning program consisting of math and table operations to answer a question, state-of-the-art methods use a retriever-generator pipeline. However, their retrieval results are static, while different generation steps may rely on different sentences. To attend to the retrieved information that is relevant to each generation step, in this paper, we propose DyRRen, an extended retriever-reranker-generator framework where each generation step is enhanced by a dynamic reranking of retrieved sentences. It outperforms existing baselines on the FinQA dataset.", + "primary_area": "speech natural language processing", + "author": "Xiao Li; Yin Zhu; Sichen Liu; Jiangzhou Ju; Yuzhong Qu; Gong Cheng", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China", + "bibtex": "@article{Li_Zhu_Liu_Ju_Qu_Cheng_2023, title={DyRRen: A Dynamic Retriever-Reranker-Generator Model for Numerical Reasoning over Tabular and Textual Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26543}, DOI={10.1609/aaai.v37i11.26543}, abstractNote={Numerical reasoning over hybrid data containing tables and long texts has recently received research attention from the AI community. To generate an executable reasoning program consisting of math and table operations to answer a question, state-of-the-art methods use a retriever-generator pipeline. However, their retrieval results are static, while different generation steps may rely on different sentences. To attend to the retrieved information that is relevant to each generation step, in this paper, we propose DyRRen, an extended retriever-reranker-generator framework where each generation step is enhanced by a dynamic reranking of retrieved sentences. It outperforms existing baselines on the FinQA dataset.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiao and Zhu, Yin and Liu, Sichen and Ju, Jiangzhou and Qu, Yuzhong and Cheng, Gong}, year={2023}, month={Jun.}, pages={13139-13147} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26543/26315", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26543", + "pdf_size": 619773, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12118082996975543267&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "NU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26339", + "title": "Dynamic Ensemble of Low-Fidelity Experts: Mitigating NAS \u201cCold-Start\u201d", + "track": "main", + "status": "Technical", + "abstract": "Predictor-based Neural Architecture Search (NAS) employs an architecture performance predictor to improve the sample efficiency. However, predictor-based NAS suffers from the severe ``cold-start'' problem, since a large amount of architecture-performance data is required to get a working predictor. In this paper, we focus on exploiting information in cheaper-to-obtain performance estimations (i.e., low-fidelity information) to mitigate the large data requirements of predictor training. Despite the intuitiveness of this idea, we observe that using inappropriate low-fidelity information even damages the prediction ability and different search spaces have different preferences for low-fidelity information types. To solve the problem and better fuse beneficial information provided by different types of low-fidelity information, we propose a novel dynamic ensemble predictor framework that comprises two steps. In the first step, we train different sub-predictors on different types of available low-fidelity information to extract beneficial knowledge as low-fidelity experts. In the second step, we learn a gating network to dynamically output a set of weighting coefficients conditioned on each input neural architecture, which will be used to combine the predictions of different low-fidelity experts in a weighted sum. The overall predictor is optimized on a small set of actual architecture-performance data to fuse the knowledge from different low-fidelity experts to make the final prediction. We conduct extensive experiments across five search spaces with different architecture encoders under various experimental settings. For example, our methods can improve the Kendall's Tau correlation coefficient between actual performance and predicted scores from 0.2549 to 0.7064 with only 25 actual architecture-performance data on NDS-ResNet. Our method can easily be incorporated into existing predictor-based NAS frameworks to discover better architectures. Our method will be implemented in Mindspore (Huawei 2020), and the example code is published at https://github.com/A-LinCui/DELE.", + "primary_area": "machine learning iv", + "author": "Junbo Zhao; Xuefei Ning; Enshu Liu; Binxin Ru; Zixuan Zhou; Tianchen Zhao; Chen Chen; Jiajin Zhang; Qingmin Liao; Yu Wang", + "authorids": "", + "aff": "Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; SailYond Technology & Research Institute of Tsinghua University in Shenzhen; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Huawei Technologies Co., Ltd; Huawei Technologies Co., Ltd; Tsinghua Shenzhen International Graduate School; Department of Electronic Engineering, Tsinghua University", + "bibtex": "@article{Zhao_Ning_Liu_Ru_Zhou_Zhao_Chen_Zhang_Liao_Wang_2023, title={Dynamic Ensemble of Low-Fidelity Experts: Mitigating NAS \u201cCold-Start\u201d}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26339}, DOI={10.1609/aaai.v37i9.26339}, abstractNote={Predictor-based Neural Architecture Search (NAS) employs an architecture performance predictor to improve the sample efficiency. However, predictor-based NAS suffers from the severe ``cold-start\u2019\u2019 problem, since a large amount of architecture-performance data is required to get a working predictor. In this paper, we focus on exploiting information in cheaper-to-obtain performance estimations (i.e., low-fidelity information) to mitigate the large data requirements of predictor training. Despite the intuitiveness of this idea, we observe that using inappropriate low-fidelity information even damages the prediction ability and different search spaces have different preferences for low-fidelity information types. To solve the problem and better fuse beneficial information provided by different types of low-fidelity information, we propose a novel dynamic ensemble predictor framework that comprises two steps. In the first step, we train different sub-predictors on different types of available low-fidelity information to extract beneficial knowledge as low-fidelity experts. In the second step, we learn a gating network to dynamically output a set of weighting coefficients conditioned on each input neural architecture, which will be used to combine the predictions of different low-fidelity experts in a weighted sum. The overall predictor is optimized on a small set of actual architecture-performance data to fuse the knowledge from different low-fidelity experts to make the final prediction. We conduct extensive experiments across five search spaces with different architecture encoders under various experimental settings. For example, our methods can improve the Kendall\u2019s Tau correlation coefficient between actual performance and predicted scores from 0.2549 to 0.7064 with only 25 actual architecture-performance data on NDS-ResNet. Our method can easily be incorporated into existing predictor-based NAS frameworks to discover better architectures. Our method will be implemented in Mindspore (Huawei 2020), and the example code is published at https://github.com/A-LinCui/DELE.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Junbo and Ning, Xuefei and Liu, Enshu and Ru, Binxin and Zhou, Zixuan and Zhao, Tianchen and Chen, Chen and Zhang, Jiajin and Liao, Qingmin and Wang, Yu}, year={2023}, month={Jun.}, pages={11316-11326} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26339/26111", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26339", + "pdf_size": 436296, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6716859380663159374&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;tsinghua.edu.cn; ; ; ; ; ; ; ;", + "email": "gmail.com;tsinghua.edu.cn; ; ; ; ; ; ; ;", + "github": "https://github.com/A-LinCui/DELE", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;1;1;0;0", + "aff_unique_norm": "Tsinghua University;Huawei Technologies", + "aff_unique_dep": "Department of Electronic Engineering;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "THU;Huawei", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26338", + "title": "Dynamic Heterogeneous Graph Attention Neural Architecture Search", + "track": "main", + "status": "Technical", + "abstract": "Dynamic heterogeneous graph neural networks (DHGNNs) have been shown to be effective in handling the ubiquitous dynamic heterogeneous graphs. However, the existing DHGNNs are hand-designed, requiring extensive human efforts and failing to adapt to diverse dynamic heterogeneous graph scenarios. In this paper, we propose to automate the design of DHGNN, which faces two major challenges: 1) how to design the search space to jointly consider the spatial-temporal dependencies and heterogeneous interactions in graphs; 2) how to design an efficient search algorithm in the potentially large and complex search space. To tackle these challenges, we propose a novel Dynamic Heterogeneous Graph Attention Search (DHGAS) method. Our proposed method can automatically discover the optimal DHGNN architecture and adapt to various dynamic heterogeneous graph scenarios without human guidance. In particular, we first propose a unified dynamic heterogeneous graph attention (DHGA) framework, which enables each node to jointly attend its heterogeneous and dynamic neighbors. Based on the framework, we design a localization space to determine where the attention should be applied and a parameterization space to determine how the attention should be parameterized. Lastly, we design a multi-stage differentiable search algorithm to efficiently explore the search space. Extensive experiments on real-world dynamic heterogeneous graph datasets demonstrate that our proposed method significantly outperforms state-of-the-art baselines for tasks including link prediction, node classification and node regression. To the best of our knowledge, DHGAS is the first dynamic heterogeneous graph neural architecture search method.", + "primary_area": "machine learning iv", + "author": "Zeyang Zhang; Ziwei Zhang; Xin Wang; Yijian Qin; Zhou Qin; Wenwu Zhu", + "authorids": "", + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Alibaba Group; Tsinghua University", + "bibtex": "@article{Zhang_Zhang_Wang_Qin_Qin_Zhu_2023, title={Dynamic Heterogeneous Graph Attention Neural Architecture Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26338}, DOI={10.1609/aaai.v37i9.26338}, abstractNote={Dynamic heterogeneous graph neural networks (DHGNNs) have been shown to be effective in handling the ubiquitous dynamic heterogeneous graphs. However, the existing DHGNNs are hand-designed, requiring extensive human efforts and failing to adapt to diverse dynamic heterogeneous graph scenarios. In this paper, we propose to automate the design of DHGNN, which faces two major challenges: 1) how to design the search space to jointly consider the spatial-temporal dependencies and heterogeneous interactions in graphs; 2) how to design an efficient search algorithm in the potentially large and complex search space. To tackle these challenges, we propose a novel Dynamic Heterogeneous Graph Attention Search (DHGAS) method. Our proposed method can automatically discover the optimal DHGNN architecture and adapt to various dynamic heterogeneous graph scenarios without human guidance. In particular, we first propose a unified dynamic heterogeneous graph attention (DHGA) framework, which enables each node to jointly attend its heterogeneous and dynamic neighbors. Based on the framework, we design a localization space to determine where the attention should be applied and a parameterization space to determine how the attention should be parameterized. Lastly, we design a multi-stage differentiable search algorithm to efficiently explore the search space. Extensive experiments on real-world dynamic heterogeneous graph datasets demonstrate that our proposed method significantly outperforms state-of-the-art baselines for tasks including link prediction, node classification and node regression. To the best of our knowledge, DHGAS is the first dynamic heterogeneous graph neural architecture search method.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zeyang and Zhang, Ziwei and Wang, Xin and Qin, Yijian and Qin, Zhou and Zhu, Wenwu}, year={2023}, month={Jun.}, pages={11307-11315} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26338/26110", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26338", + "pdf_size": 361967, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17528911122598195578&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;alibaba-inc.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;alibaba-inc.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Tsinghua University;Alibaba Group", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "THU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25537", + "title": "Dynamic Multi-Behavior Sequence Modeling for Next Item Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Sequential Recommender Systems (SRSs) aim to predict the next item that users will consume, by modeling the user interests within their item sequences. While most existing SRSs focus on a single type of user behavior, only a few pay attention to multi-behavior sequences, although they are very common in real-world scenarios. It is challenging to effectively capture the user interests within multi-behavior sequences, because the information about user interests is entangled throughout the sequences in complex relationships. To this end, we first address the characteristics of multi-behavior sequences that should be considered in SRSs, and then propose novel methods for Dynamic Multi-behavior Sequence modeling named DyMuS, which is a light version, and DyMuS+, which is an improved version, considering the characteristics. DyMuS first encodes each behavior sequence independently, and then combines the encoded sequences using dynamic routing, which dynamically integrates information required in the final result from among many candidates, based on correlations between the sequences. DyMuS+, furthermore, applies the dynamic routing even to encoding each behavior sequence to further capture the correlations at item-level. Moreover, we release a new, large and up-to-date dataset for multi-behavior recommendation. Our experiments on DyMuS and DyMuS+ show their superiority and the significance of capturing the characteristics of multi-behavior sequences.", + "primary_area": "data mining and knowledge management", + "author": "Junsu Cho; Dongmin Hyun; Dong won Lim; Hyeon jae Cheon; Hyoung-iel Park; Hwanjo Yu", + "authorids": "", + "aff": "Dept. of Computer Science and Engineering, POSTECH, Republic of Korea; Institute of Artificial Intelligence, POSTECH, Republic of Korea; GS Retail, Republic of Korea; GS Retail, Republic of Korea; GS Retail, Republic of Korea; Dept. of Computer Science and Engineering, POSTECH, Republic of Korea", + "bibtex": "@article{Cho_Hyun_Lim_Cheon_Park_Yu_2023, title={Dynamic Multi-Behavior Sequence Modeling for Next Item Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25537}, DOI={10.1609/aaai.v37i4.25537}, abstractNote={Sequential Recommender Systems (SRSs) aim to predict the next item that users will consume, by modeling the user interests within their item sequences. While most existing SRSs focus on a single type of user behavior, only a few pay attention to multi-behavior sequences, although they are very common in real-world scenarios. It is challenging to effectively capture the user interests within multi-behavior sequences, because the information about user interests is entangled throughout the sequences in complex relationships. To this end, we first address the characteristics of multi-behavior sequences that should be considered in SRSs, and then propose novel methods for Dynamic Multi-behavior Sequence modeling named DyMuS, which is a light version, and DyMuS+, which is an improved version, considering the characteristics. DyMuS first encodes each behavior sequence independently, and then combines the encoded sequences using dynamic routing, which dynamically integrates information required in the final result from among many candidates, based on correlations between the sequences. DyMuS+, furthermore, applies the dynamic routing even to encoding each behavior sequence to further capture the correlations at item-level. Moreover, we release a new, large and up-to-date dataset for multi-behavior recommendation. Our experiments on DyMuS and DyMuS+ show their superiority and the significance of capturing the characteristics of multi-behavior sequences.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cho, Junsu and Hyun, Dongmin and Lim, Dong won and Cheon, Hyeon jae and Park, Hyoung-iel and Yu, Hwanjo}, year={2023}, month={Jun.}, pages={4199-4207} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25537/25309", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25537", + "pdf_size": 346297, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16496285277026413288&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "postech.ac.kr;postech.ac.kr;gmail.com;gmail.com;gmail.com;postech.ac.kr", + "email": "postech.ac.kr;postech.ac.kr;gmail.com;gmail.com;gmail.com;postech.ac.kr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;1;0", + "aff_unique_norm": "POSTECH;GS Retail", + "aff_unique_dep": "Dept. of Computer Science and Engineering;", + "aff_unique_url": "https://www.postech.ac.kr;https://www.gsretail.com", + "aff_unique_abbr": "POSTECH;GS Retail", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;1;0", + "aff_country_unique": "Republic of Korea;South Korea" + }, + { + "id": "article-26845", + "title": "Dynamic Pricing with Volume Discounts in Online Settings", + "track": "iaai technical track", + "status": "Technical", + "abstract": "According to the main international reports, more pervasive industrial and business-process automation, thanks to machine learning and advanced analytic tools, will unlock more than 14 trillion USD worldwide annually by 2030. In the specific case of pricing problems, which constitute the class of problems we investigate in this paper, the estimated unlocked value will be about 0.5 trillion USD per year. In particular, this paper focuses on pricing in e-commerce when the objective function is profit maximization and only transaction data are available. This setting is one of the most common in real-world applications. Our work aims to find a pricing strategy that allows defining optimal prices at different volume thresholds to serve different classes of users. Furthermore, we face the major challenge, common in real-world settings, of dealing with limited data available. We design a two-phase online learning algorithm, namely PVD-B, capable of exploiting the data incrementally in an online fashion. The algorithm first estimates the demand curve and retrieves the optimal average price, and subsequently it offers discounts to differentiate the prices for each volume threshold. We ran a real-world 4-month-long A/B testing experiment in collaboration with an Italian e-commerce company, in which our algorithm PVD-B - corresponding to A configuration - has been compared with human pricing specialists - corresponding to B configuration. At the end of the experiment, our algorithm produced a total turnover of about 300 KEuros, outperforming the B configuration performance by about 55%. The Italian company we collaborated with decided to adopt our algorithm for more than 1,200 products since January 2022.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Marco Mussi; Gianmarco Genalti; Alessandro Nuara; Francesco Trov\u00f3; Marcello Restelli; Nicola Gatti", + "authorids": "", + "aff": "Politecnico di Milano; Politecnico di Milano; ML cube + Politecnico di Milano; Politecnico di Milano; Politecnico di Milano; Politecnico di Milano", + "bibtex": "@article{Mussi_Genalti_Nuara_Trov\u00f3_Restelli_Gatti_2024, title={Dynamic Pricing with Volume Discounts in Online Settings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26845}, DOI={10.1609/aaai.v37i13.26845}, abstractNote={According to the main international reports, more pervasive industrial and business-process automation, thanks to machine learning and advanced analytic tools, will unlock more than 14 trillion USD worldwide annually by 2030. In the specific case of pricing problems, which constitute the class of problems we investigate in this paper, the estimated unlocked value will be about 0.5 trillion USD per year. In particular, this paper focuses on pricing in e-commerce when the objective function is profit maximization and only transaction data are available. This setting is one of the most common in real-world applications. Our work aims to find a pricing strategy that allows defining optimal prices at different volume thresholds to serve different classes of users. Furthermore, we face the major challenge, common in real-world settings, of dealing with limited data available. We design a two-phase online learning algorithm, namely PVD-B, capable of exploiting the data incrementally in an online fashion. The algorithm first estimates the demand curve and retrieves the optimal average price, and subsequently it offers discounts to differentiate the prices for each volume threshold. We ran a real-world 4-month-long A/B testing experiment in collaboration with an Italian e-commerce company, in which our algorithm PVD-B - corresponding to A configuration - has been compared with human pricing specialists - corresponding to B configuration. At the end of the experiment, our algorithm produced a total turnover of about 300 KEuros, outperforming the B configuration performance by about 55%. The Italian company we collaborated with decided to adopt our algorithm for more than 1,200 products since January 2022.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mussi, Marco and Genalti, Gianmarco and Nuara, Alessandro and Trov\u00f3, Francesco and Restelli, Marcello and Gatti, Nicola}, year={2024}, month={Jul.}, pages={15560-15568} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26845/26617", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26845", + "pdf_size": 284303, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12559453278965707272&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "polimi.it;polimi.it;mlcube.com;polimi.it;polimi.it;polimi.it", + "email": "polimi.it;polimi.it;mlcube.com;polimi.it;polimi.it;polimi.it", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1+0;0;0;0", + "aff_unique_norm": "Politecnico di Milano;ML cube", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.polimi.it;", + "aff_unique_abbr": "Polimi;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Italy;" + }, + { + "id": "article-25939", + "title": "Dynamic Representation Learning with Temporal Point Processes for Higher-Order Interaction Forecasting", + "track": "main", + "status": "Technical", + "abstract": "The explosion of digital information and the growing involvement of people in social networks led to enormous research activity to develop methods that can extract meaningful information from interaction data. Commonly, interactions are represented by edges in a network or a graph, which implicitly assumes that the interactions are pairwise and static. However, real-world interactions deviate from these assumptions: (i) interactions can be multi-way, involving more than two nodes or individuals (e.g., family relationships, protein interactions), and (ii) interactions can change over a period of time (e.g., change of opinions and friendship status). While pairwise interactions have been studied in a dynamic network setting and multi-way interactions have been studied using hypergraphs in static networks, there exists no method, at present, that can predict multi-way interactions or hyperedges in dynamic settings. Existing related methods cannot answer temporal queries like what type of interaction will occur next and when it will occur. This paper proposes a temporal point process model for hyperedge prediction to address these problems. Our proposed model uses dynamic representation learning techniques for nodes in a neural point process framework to forecast hyperedges. We present several experimental results and set benchmark results. As far as our knowledge, this is the first work that uses the temporal point process to forecast hyperedges in dynamic networks.", + "primary_area": "machine learning i", + "author": "Tony Gracious; Ambedkar Dukkipati", + "authorids": "", + "aff": "Indian Institute of Science, Bangalore - 560012, INDIA; Indian Institute of Science, Bangalore - 560012, INDIA", + "bibtex": "@article{Gracious_Dukkipati_2023, title={Dynamic Representation Learning with Temporal Point Processes for Higher-Order Interaction Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25939}, DOI={10.1609/aaai.v37i6.25939}, abstractNote={The explosion of digital information and the growing involvement of people in social networks led to enormous research activity to develop methods that can extract meaningful information from interaction data. Commonly, interactions are represented by edges in a network or a graph, which implicitly assumes that the interactions are pairwise and static. However, real-world interactions deviate from these assumptions: (i) interactions can be multi-way, involving more than two nodes or individuals (e.g., family relationships, protein interactions), and (ii) interactions can change over a period of time (e.g., change of opinions and friendship status). While pairwise interactions have been studied in a dynamic network setting and multi-way interactions have been studied using hypergraphs in static networks, there exists no method, at present, that can predict multi-way interactions or hyperedges in dynamic settings. Existing related methods cannot answer temporal queries like what type of interaction will occur next and when it will occur. This paper proposes a temporal point process model for hyperedge prediction to address these problems. Our proposed model uses dynamic representation learning techniques for nodes in a neural point process framework to forecast hyperedges. We present several experimental results and set benchmark results. As far as our knowledge, this is the first work that uses the temporal point process to forecast hyperedges in dynamic networks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gracious, Tony and Dukkipati, Ambedkar}, year={2023}, month={Jun.}, pages={7748-7756} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25939/25711", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25939", + "pdf_size": 674140, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15097867323104735050&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "iisc.ac.in;iisc.ac.in", + "email": "iisc.ac.in;iisc.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indian Institute of Science", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iisc.ac.in", + "aff_unique_abbr": "IISc", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Bangalore", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26127", + "title": "Dynamic Structure Pruning for Compressing CNNs", + "track": "main", + "status": "Technical", + "abstract": "Structure pruning is an effective method to compress and accelerate neural networks. While filter and channel pruning are preferable to other structure pruning methods in terms of realistic acceleration and hardware compatibility, pruning methods with a finer granularity, such as intra-channel pruning, are expected to be capable of yielding more compact and computationally efficient networks. Typical intra-channel pruning methods utilize a static and hand-crafted pruning granularity due to a large search space, which leaves room for improvement in their pruning performance. In this work, we introduce a novel structure pruning method, termed as dynamic structure pruning, to identify optimal pruning granularities for intra-channel pruning. In contrast to existing intra-channel pruning methods, the proposed method automatically optimizes dynamic pruning granularities in each layer while training deep neural networks. To achieve this, we propose a differentiable group learning method designed to efficiently learn a pruning granularity based on gradient-based learning of filter groups. The experimental results show that dynamic structure pruning achieves state-of-the-art pruning performance and better realistic acceleration on a GPU compared with channel pruning. In particular, it reduces the FLOPs of ResNet50 by 71.85% without accuracy degradation on the ImageNet dataset. Our code is available at https://github.com/irishev/DSP.", + "primary_area": "machine learning iii", + "author": "Jun-Hyung Park; Yeachan Kim; Junho Kim; Joon-Young Choi; SangKeun Lee", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Korea University, Seoul, Republic of Korea + Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Computer Science and Engineering, Korea University, Seoul, Republic of Korea + Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea", + "bibtex": "@article{Park_Kim_Kim_Choi_Lee_2023, title={Dynamic Structure Pruning for Compressing CNNs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26127}, DOI={10.1609/aaai.v37i8.26127}, abstractNote={Structure pruning is an effective method to compress and accelerate neural networks. While filter and channel pruning are preferable to other structure pruning methods in terms of realistic acceleration and hardware compatibility, pruning methods with a finer granularity, such as intra-channel pruning, are expected to be capable of yielding more compact and computationally efficient networks. Typical intra-channel pruning methods utilize a static and hand-crafted pruning granularity due to a large search space, which leaves room for improvement in their pruning performance. In this work, we introduce a novel structure pruning method, termed as dynamic structure pruning, to identify optimal pruning granularities for intra-channel pruning. In contrast to existing intra-channel pruning methods, the proposed method automatically optimizes dynamic pruning granularities in each layer while training deep neural networks. To achieve this, we propose a differentiable group learning method designed to efficiently learn a pruning granularity based on gradient-based learning of filter groups. The experimental results show that dynamic structure pruning achieves state-of-the-art pruning performance and better realistic acceleration on a GPU compared with channel pruning. In particular, it reduces the FLOPs of ResNet50 by 71.85% without accuracy degradation on the ImageNet dataset. Our code is available at https://github.com/irishev/DSP.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Park, Jun-Hyung and Kim, Yeachan and Kim, Junho and Choi, Joon-Young and Lee, SangKeun}, year={2023}, month={Jun.}, pages={9408-9416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26127/25899", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26127", + "pdf_size": 1171527, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2570076772112795347&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "https://github.com/irishev/DSP", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0;0;0;0+0", + "aff_unique_norm": "Korea University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "http://www.korea.ac.kr", + "aff_unique_abbr": "KU", + "aff_campus_unique_index": "0+0;0;0;0;0+0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26820", + "title": "Dynamics of Cooperation and Conflict in Multiagent Systems", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Meeting today\u2019s major scientific and societal challenges requires understanding the dynamics of cooperation, coordination, and conflict in complex adaptive systems (CAS). Artificial Intelligence (AI) is intimately connected with these challenges, both as an application domain and as a source of new computational techniques: On the one hand, AI suggests new algorithmic recommendations and interaction paradigms, offering novel possibilities to engineer cooperation and alleviate conflict in multiagent (hybrid) systems; on the other hand, new learning algorithms provide improved techniques to simulate sophisticated agents and increasingly realistic CAS. My research lies at the interface between CAS and AI: I develop computational methods to understand cooperation and conflict in multiagent systems, and how these depend on systems\u2019 design and incentives. I focus on mapping interaction rules and incentives onto emerging macroscopic patterns and long-term dynamics. Examples of this research agenda, that I will survey in this talk, include modelling (1) the connection between reputation systems and cooperation dynamics, (2) the role of agents with hard-coded strategies in stabilizing fair behaviors in a population, or (3) the impact of recommendation algorithms on potential sources of conflict (e.g., radicalization and polarization) in a system composed of adaptive agents influencing each other over time.", + "primary_area": "", + "author": "Fernando P. Santos", + "authorids": "", + "aff": "Informatics Institute, University of Amsterdam", + "bibtex": "@article{Santos_2024, title={Dynamics of Cooperation and Conflict in Multiagent Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26820}, DOI={10.1609/aaai.v37i13.26820}, abstractNote={Meeting today\u2019s major scientific and societal challenges requires understanding the dynamics of cooperation, coordination, and conflict in complex adaptive systems (CAS). Artificial Intelligence (AI) is intimately connected with these challenges, both as an application domain and as a source of new computational techniques: On the one hand, AI suggests new algorithmic recommendations and interaction paradigms, offering novel possibilities to engineer cooperation and alleviate conflict in multiagent (hybrid) systems; on the other hand, new learning algorithms provide improved techniques to simulate sophisticated agents and increasingly realistic CAS. My research lies at the interface between CAS and AI: I develop computational methods to understand cooperation and conflict in multiagent systems, and how these depend on systems\u2019 design and incentives. I focus on mapping interaction rules and incentives onto emerging macroscopic patterns and long-term dynamics. Examples of this research agenda, that I will survey in this talk, include modelling (1) the connection between reputation systems and cooperation dynamics, (2) the role of agents with hard-coded strategies in stabilizing fair behaviors in a population, or (3) the impact of recommendation algorithms on potential sources of conflict (e.g., radicalization and polarization) in a system composed of adaptive agents influencing each other over time.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Santos, Fernando P.}, year={2024}, month={Jul.}, pages={15453-15453} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26820/26592", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26820", + "pdf_size": 146223, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11776511810056590551&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "uva.nl", + "email": "uva.nl", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Amsterdam", + "aff_unique_dep": "Informatics Institute", + "aff_unique_url": "https://www.uva.nl", + "aff_unique_abbr": "UvA", + "aff_country_unique_index": "0", + "aff_country_unique": "Netherlands" + }, + { + "id": "article-26069", + "title": "EASAL: Entity-Aware Subsequence-Based Active Learning for Named Entity Recognition", + "track": "main", + "status": "Technical", + "abstract": "Active learning is a critical technique for reducing labelling load by selecting the most informative data. Most previous works applied active learning on Named Entity Recognition (token-level task) similar to the text classification (sentence-level task). They failed to consider the heterogeneity of uncertainty within each sentence and required access to the entire sentence for the annotator when labelling. To overcome the mentioned limitations, in this paper, we allow the active learning algorithm to query subsequences within sentences and propose an Entity-Aware Subsequences-based Active Learning (EASAL) that utilizes an effective Head-Tail pointer to query one entity-aware subsequence for each sentence based on BERT. For other tokens outside this subsequence, we randomly select 30% of these tokens to be pseudo-labelled for training together where the model directly predicts their pseudo-labels. Experimental results on both news and biomedical datasets demonstrate the effectiveness of our proposed method. The code is released at https://github.com/lylylylylyly/EASAL.", + "primary_area": "machine learning ii", + "author": "Yang Liu; Jinpeng Hu; Zhihong Chen; Xiang Wan; Tsung-Hui Chang", + "authorids": "", + "aff": "Shenzhen Research Institute of Big Data + Chinese University of Hong Kong, Shenzhen, China; Shenzhen Research Institute of Big Data + Chinese University of Hong Kong, Shenzhen, China; Shenzhen Research Institute of Big Data + Chinese University of Hong Kong, Shenzhen, China; Shenzhen Research Institute of Big Data + Pazhou Lab, Guangzhou, 510330, China; Shenzhen Research Institute of Big Data + Chinese University of Hong Kong, Shenzhen, China", + "bibtex": "@article{Liu_Hu_Chen_Wan_Chang_2023, title={EASAL: Entity-Aware Subsequence-Based Active Learning for Named Entity Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26069}, DOI={10.1609/aaai.v37i7.26069}, abstractNote={Active learning is a critical technique for reducing labelling load by selecting the most informative data. Most previous works applied active learning on Named Entity Recognition (token-level task) similar to the text classification (sentence-level task). They failed to consider the heterogeneity of uncertainty within each sentence and required access to the entire sentence for the annotator when labelling. To overcome the mentioned limitations, in this paper, we allow the active learning algorithm to query subsequences within sentences and propose an Entity-Aware Subsequences-based Active Learning (EASAL) that utilizes an effective Head-Tail pointer to query one entity-aware subsequence for each sentence based on BERT. For other tokens outside this subsequence, we randomly select 30% of these tokens to be pseudo-labelled for training together where the model directly predicts their pseudo-labels. Experimental results on both news and biomedical datasets demonstrate the effectiveness of our proposed method. The code is released at https://github.com/lylylylylyly/EASAL.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yang and Hu, Jinpeng and Chen, Zhihong and Wan, Xiang and Chang, Tsung-Hui}, year={2023}, month={Jun.}, pages={8897-8905} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26069/25841", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26069", + "pdf_size": 1980271, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6535849451694787614&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 2, + "aff_domain": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;sribd.cn;cuhk.edu.cn", + "email": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;sribd.cn;cuhk.edu.cn", + "github": "https://github.com/lylylylylyly/EASAL", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+2;0+1", + "aff_unique_norm": "Shenzhen Research Institute of Big Data;Chinese University of Hong Kong;Pazhou Lab", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.sribd.cn;https://www.cuhk.edu.cn;", + "aff_unique_abbr": ";CUHK;", + "aff_campus_unique_index": "1;1;1;2;1", + "aff_campus_unique": ";Shenzhen;Guangzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25361", + "title": "ECO-3D: Equivariant Contrastive Learning for Pre-training on Perturbed 3D Point Cloud", + "track": "main", + "status": "Technical", + "abstract": "In this work, we investigate contrastive learning on perturbed point clouds and find that the contrasting process may widen the domain gap caused by random perturbations, making the pre-trained network fail to generalize on testing data. To this end, we propose the Equivariant COntrastive framework which closes the domain gap before contrasting, further introduces the equivariance property, and enables pre-training networks under more perturbation types to obtain meaningful features. Specifically, to close the domain gap, a pre-trained VAE is adopted to convert perturbed point clouds into less perturbed point embedding of similar domains and separated perturbation embedding. The contrastive pairs can then be generated by mixing the point embedding with different perturbation embedding. Moreover, to pursue the equivariance property, a Vector Quantizer is adopted during VAE training, discretizing the perturbation embedding into one-hot tokens which indicate the perturbation labels. By correctly predicting the perturbation labels from the perturbed point cloud, the property of equivariance can be encouraged in the learned features. Experiments on synthesized and real-world perturbed datasets show that ECO-3D outperforms most existing pre-training strategies under various downstream tasks, achieving SOTA performance for lots of perturbations.", + "primary_area": "computer vision ii", + "author": "Ruibin Wang; Xianghua Ying; Bowei Xing; Jinfa Yang", + "authorids": "", + "aff": "Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University; Key Laboratory of Machine Perception (MOE) School of Intelligence Science and Technology, Peking University", + "bibtex": "@article{Wang_Ying_Xing_Yang_2023, title={ECO-3D: Equivariant Contrastive Learning for Pre-training on Perturbed 3D Point Cloud}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25361}, DOI={10.1609/aaai.v37i2.25361}, abstractNote={In this work, we investigate contrastive learning on perturbed point clouds and find that the contrasting process may widen the domain gap caused by random perturbations, making the pre-trained network fail to generalize on testing data. To this end, we propose the Equivariant COntrastive framework which closes the domain gap before contrasting, further introduces the equivariance property, and enables pre-training networks under more perturbation types to obtain meaningful features. Specifically, to close the domain gap, a pre-trained VAE is adopted to convert perturbed point clouds into less perturbed point embedding of similar domains and separated perturbation embedding. The contrastive pairs can then be generated by mixing the point embedding with different perturbation embedding. Moreover, to pursue the equivariance property, a Vector Quantizer is adopted during VAE training, discretizing the perturbation embedding into one-hot tokens which indicate the perturbation labels. By correctly predicting the perturbation labels from the perturbed point cloud, the property of equivariance can be encouraged in the learned features. Experiments on synthesized and real-world perturbed datasets show that ECO-3D outperforms most existing pre-training strategies under various downstream tasks, achieving SOTA performance for lots of perturbations.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Ruibin and Ying, Xianghua and Xing, Bowei and Yang, Jinfa}, year={2023}, month={Jun.}, pages={2626-2634} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25361/25133", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25361", + "pdf_size": 1433894, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:sbouboIY33cJ:scholar.google.com/&scioq=ECO-3D:+Equivariant+Contrastive+Learning+for+Pre-training+on+Perturbed+3D+Point+Cloud&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Intelligence Science and Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "Peking University", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26690", + "title": "EINNs: Epidemiologically-Informed Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "We introduce EINNs, a framework crafted for epidemic forecasting that builds upon the theoretical grounds provided by mechanistic models as well as the data-driven expressibility afforded by AI models, and their capabilities to ingest heterogeneous information. Although neural forecasting models have been successful in multiple tasks, predictions well-correlated with epidemic trends and long-term predictions remain open challenges. Epidemiological ODE models contain mechanisms that can guide us in these two tasks; however, they have limited capability of ingesting data sources and modeling composite signals. Thus, we propose to leverage work in physics-informed neural networks to learn latent epidemic dynamics and transfer relevant knowledge to another neural network which ingests multiple data sources and has more appropriate inductive bias. In contrast with previous work, we do not assume the observability of complete dynamics and do not need to numerically solve the ODE equations during training. Our thorough experiments on all US states and HHS regions for COVID-19 and influenza forecasting showcase the clear benefits of our approach in both short-term and long-term forecasting as well as in learning the mechanistic dynamics over other non-trivial alternatives.", + "primary_area": "ai for social impact", + "author": "Alexander Rodr\u00edguez; Jiaming Cui; Naren Ramakrishnan; Bijaya Adhikari; B. Aditya Prakash", + "authorids": "", + "aff": "College of Computing, Georgia Institute of Technology; College of Computing, Georgia Institute of Technology; Department of Computer Science, Virginia Tech; Department of Computer Science, University of Iowa; College of Computing, Georgia Institute of Technology", + "bibtex": "@article{Rodr\u00edguez_Cui_Ramakrishnan_Adhikari_Prakash_2023, title={EINNs: Epidemiologically-Informed Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26690}, DOI={10.1609/aaai.v37i12.26690}, abstractNote={We introduce EINNs, a framework crafted for epidemic forecasting that builds upon the theoretical grounds provided by mechanistic models as well as the data-driven expressibility afforded by AI models, and their capabilities to ingest heterogeneous information. Although neural forecasting models have been successful in multiple tasks, predictions well-correlated with epidemic trends and long-term predictions remain open challenges. Epidemiological ODE models contain mechanisms that can guide us in these two tasks; however, they have limited capability of ingesting data sources and modeling composite signals. Thus, we propose to leverage work in physics-informed neural networks to learn latent epidemic dynamics and transfer relevant knowledge to another neural network which ingests multiple data sources and has more appropriate inductive bias. In contrast with previous work, we do not assume the observability of complete dynamics and do not need to numerically solve the ODE equations during training. Our thorough experiments on all US states and HHS regions for COVID-19 and influenza forecasting showcase the clear benefits of our approach in both short-term and long-term forecasting as well as in learning the mechanistic dynamics over other non-trivial alternatives.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rodr\u00edguez, Alexander and Cui, Jiaming and Ramakrishnan, Naren and Adhikari, Bijaya and Prakash, B. Aditya}, year={2023}, month={Jun.}, pages={14453-14460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26690/26462", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26690", + "pdf_size": 335490, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10661648055472082553&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 12, + "aff_domain": "gatech.edu;gatech.edu;cs.vt.edu;uiowa.edu;gatech.edu", + "email": "gatech.edu;gatech.edu;cs.vt.edu;uiowa.edu;gatech.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Georgia Institute of Technology;Virginia Tech;University of Iowa", + "aff_unique_dep": "College of Computing;Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.gatech.edu;https://www.vt.edu;https://www.uiowa.edu", + "aff_unique_abbr": "Georgia Tech;VT;UIowa", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Atlanta;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25259", + "title": "EMEF: Ensemble Multi-Exposure Image Fusion", + "track": "main", + "status": "Technical", + "abstract": "Although remarkable progress has been made in recent years, current multi-exposure image fusion (MEF) research is still bounded by the lack of real ground truth, objective evaluation function, and robust fusion strategy. In this paper, we study the MEF problem from a new perspective. We don\u2019t utilize any synthesized ground truth, design any loss function, or develop any fusion strategy. Our proposed method EMEF takes advantage of the wisdom of multiple imperfect MEF contributors including both conventional and deep learning-based methods. Specifically, EMEF consists of two main stages: pre-train an imitator network and tune the imitator in the runtime. In the first stage, we make a unified network imitate different MEF targets in a style modulation way. In the second stage, we tune the imitator network by optimizing the style code, in order to find an optimal fusion result for each input pair. In the experiment, we construct EMEF from four state-of-the-art MEF methods and then make comparisons with the individuals and several other competitive methods on the latest released MEF benchmark dataset. The promising experimental results demonstrate that our ensemble framework can \u201cget the best of all worlds\u201d. The code is available at https://github.com/medalwill/EMEF.", + "primary_area": "computer vision ii", + "author": "Renshuai Liu; Chengyang Li; Haitao Cao; Yinglin Zheng; Ming Zeng; Xuan Cheng", + "authorids": "", + "aff": "School of Informatics, Xiamen University, Xiamen 361005, China; School of Informatics, Xiamen University, Xiamen 361005, China; School of Informatics, Xiamen University, Xiamen 361005, China; School of Informatics, Xiamen University, Xiamen 361005, China; School of Informatics, Xiamen University, Xiamen 361005, China; School of Informatics, Xiamen University, Xiamen 361005, China", + "bibtex": "@article{Liu_Li_Cao_Zheng_Zeng_Cheng_2023, title={EMEF: Ensemble Multi-Exposure Image Fusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25259}, DOI={10.1609/aaai.v37i2.25259}, abstractNote={Although remarkable progress has been made in recent years, current multi-exposure image fusion (MEF) research is still bounded by the lack of real ground truth, objective evaluation function, and robust fusion strategy. In this paper, we study the MEF problem from a new perspective. We don\u2019t utilize any synthesized ground truth, design any loss function, or develop any fusion strategy. Our proposed method EMEF takes advantage of the wisdom of multiple imperfect MEF contributors including both conventional and deep learning-based methods. Specifically, EMEF consists of two main stages: pre-train an imitator network and tune the imitator in the runtime. In the first stage, we make a unified network imitate different MEF targets in a style modulation way. In the second stage, we tune the imitator network by optimizing the style code, in order to find an optimal fusion result for each input pair. In the experiment, we construct EMEF from four state-of-the-art MEF methods and then make comparisons with the individuals and several other competitive methods on the latest released MEF benchmark dataset. The promising experimental results demonstrate that our ensemble framework can \u201cget the best of all worlds\u201d. The code is available at https://github.com/medalwill/EMEF.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Renshuai and Li, Chengyang and Cao, Haitao and Zheng, Yinglin and Zeng, Ming and Cheng, Xuan}, year={2023}, month={Jun.}, pages={1710-1718} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25259/25031", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25259", + "pdf_size": 1670540, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11916743878129585599&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.xmu.edu.cn;stu.xmu.edu.cn;stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;stu.xmu.edu.cn;stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn", + "github": "https://github.com/medalwill/EMEF", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Xiamen University", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.xmu.edu.cn", + "aff_unique_abbr": "XMU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Xiamen", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26705", + "title": "ERASER: AdvERsArial Sensitive Element Remover for Image Privacy Preservation", + "track": "aaai special track", + "status": "Technical", + "abstract": "The daily practice of online image sharing enriches our lives, but also raises a severe issue of privacy leakage. To mitigate the privacy risks during image sharing, some researchers modify the sensitive elements in images with visual obfuscation methods including traditional ones like blurring and pixelating, as well as generative ones based on deep learning. However, images processed by such methods may be recovered or recognized by models, which cannot guarantee privacy. Further, traditional methods make the images very unnatural with low image quality. Although generative methods produce better images, most of them suffer from insufficiency in the frequency domain, which influences image quality. Therefore, we propose the AdvERsArial Sensitive Element Remover (ERASER) to guarantee both image privacy and image quality. 1) To preserve image privacy, for the regions containing sensitive elements, ERASER guarantees enough difference after being modified in an adversarial way. Specifically, we take both the region and global content into consideration with a Prior Transformer and obtain the corresponding region prior and global prior. Based on the priors, ERASER is trained with an adversarial Difference Loss to make the content in the regions different. As a result, ERASER can reserve the main structure and change the texture of the target regions for image privacy preservation. 2) To guarantee the image quality, ERASER improves the frequency insufficiency of current generative methods. Specifically, the region prior and global prior are processed with Fast Fourier Convolution to capture characteristics and achieve consistency in both pixel and frequency domains. Quantitative analyses demonstrate that the proposed ERASER achieves a balance between image quality and image privacy preservation, while qualitative analyses demonstrate that ERASER indeed reduces the privacy risk from the visual perception aspect.", + "primary_area": "ai for social impact", + "author": "Guang Yang; Juan Cao; Danding Wang; Peng Qi; Jintao Li", + "authorids": "", + "aff": "Zhongguancun Laboratory + Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences", + "bibtex": "@article{Yang_Cao_Wang_Qi_Li_2023, title={ERASER: AdvERsArial Sensitive Element Remover for Image Privacy Preservation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26705}, DOI={10.1609/aaai.v37i12.26705}, abstractNote={The daily practice of online image sharing enriches our lives, but also raises a severe issue of privacy leakage. To mitigate the privacy risks during image sharing, some researchers modify the sensitive elements in images with visual obfuscation methods including traditional ones like blurring and pixelating, as well as generative ones based on deep learning. However, images processed by such methods may be recovered or recognized by models, which cannot guarantee privacy. Further, traditional methods make the images very unnatural with low image quality. Although generative methods produce better images, most of them suffer from insufficiency in the frequency domain, which influences image quality. Therefore, we propose the AdvERsArial Sensitive Element Remover (ERASER) to guarantee both image privacy and image quality. 1) To preserve image privacy, for the regions containing sensitive elements, ERASER guarantees enough difference after being modified in an adversarial way. Specifically, we take both the region and global content into consideration with a Prior Transformer and obtain the corresponding region prior and global prior. Based on the priors, ERASER is trained with an adversarial Difference Loss to make the content in the regions different. As a result, ERASER can reserve the main structure and change the texture of the target regions for image privacy preservation. 2) To guarantee the image quality, ERASER improves the frequency insufficiency of current generative methods. Specifically, the region prior and global prior are processed with Fast Fourier Convolution to capture characteristics and achieve consistency in both pixel and frequency domains. Quantitative analyses demonstrate that the proposed ERASER achieves a balance between image quality and image privacy preservation, while qualitative analyses demonstrate that ERASER indeed reduces the privacy risk from the visual perception aspect.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Guang and Cao, Juan and Wang, Danding and Qi, Peng and Li, Jintao}, year={2023}, month={Jun.}, pages={14584-14592} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26705/26477", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26705", + "pdf_size": 6887107, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10250418606788205195&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zgclab.edu.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "zgclab.edu.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;1+2;1;1+2;1", + "aff_unique_norm": "Zhongguancun Laboratory;Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": ";Institute of Computing Technology;", + "aff_unique_url": ";http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": ";CAS;UCAS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27031", + "title": "ES-Mask: Evolutionary Strip Mask for Explaining Time Series Prediction (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Machine learning models are increasingly used in time series prediction with promising results. The model explanation of time series prediction falls behind the model development and makes less sense to users in understanding model decisions. This paper proposes ES-Mask, a post-hoc and model-agnostic evolutionary strip mask-based saliency approach for time series applications. ES-Mask designs the mask consisting of strips with the same salient value in consecutive time steps to produce binary and sustained feature importance scores over time for easy understanding and interpretation of time series. ES-Mask uses an evolutionary algorithm to search for the optimal mask by manipulating strips in rounds, thus is agnostic to models by involving no internal model states in the search. The initial experiments on MIMIC-III data set show that ES-Mask outperforms state-of-the-art methods.", + "primary_area": "", + "author": "Yifei Sun; Cheng Song; Feng Lu; Wei Li; Hai Jin; Albert Y. Zomaya", + "authorids": "", + "aff": "National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Centre for Distributed and High Performance Computing, School of Computer Science, The University of Sydney, Australia", + "bibtex": "@article{Sun_Song_Lu_Li_Jin_Zomaya_2024, title={ES-Mask: Evolutionary Strip Mask for Explaining Time Series Prediction (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27031}, DOI={10.1609/aaai.v37i13.27031}, abstractNote={Machine learning models are increasingly used in time series prediction with promising results. The model explanation of time series prediction falls behind the model development and makes less sense to users in understanding model decisions. This paper proposes ES-Mask, a post-hoc and model-agnostic evolutionary strip mask-based saliency approach for time series applications. ES-Mask designs the mask consisting of strips with the same salient value in consecutive time steps to produce binary and sustained feature importance scores over time for easy understanding and interpretation of time series. ES-Mask uses an evolutionary algorithm to search for the optimal mask by manipulating strips in rounds, thus is agnostic to models by involving no internal model states in the search. The initial experiments on MIMIC-III data set show that ES-Mask outperforms state-of-the-art methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Yifei and Song, Cheng and Lu, Feng and Li, Wei and Jin, Hai and Zomaya, Albert Y.}, year={2024}, month={Jul.}, pages={16342-16343} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27031/26803", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27031", + "pdf_size": 252692, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16449350530795897717&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 2, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au;hust.edu.cn;sydney.edu.au", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;sydney.edu.au;hust.edu.cn;sydney.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;1", + "aff_unique_norm": "Huazhong University of Science and Technology;The University of Sydney", + "aff_unique_dep": "School of Computer Science and Technology;School of Computer Science", + "aff_unique_url": "http://www.hust.edu.cn;https://www.sydney.edu.au", + "aff_unique_abbr": "HUST;USYD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25079", + "title": "ESL-SNNs: An Evolutionary Structure Learning Strategy for Spiking Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Spiking neural networks (SNNs) have manifested remarkable advantages in power consumption and event-driven property during the inference process. To take full advantage of low power consumption and improve the efficiency of these models further, the pruning methods have been explored to find sparse SNNs without redundancy connections after training. However, parameter redundancy still hinders the efficiency of SNNs during training. In the human brain, the rewiring process of neural networks is highly dynamic, while synaptic connections maintain relatively sparse during brain development. Inspired by this, here we propose an efficient evolutionary structure learning (ESL) framework for SNNs, named ESL-SNNs, to implement the sparse SNN training from scratch. The pruning and regeneration of synaptic connections in SNNs evolve dynamically during learning, yet keep the structural sparsity at a certain level. As a result, the ESL-SNNs can search for optimal sparse connectivity by exploring all possible parameters across time. Our experiments show that the proposed ESL-SNNs framework is able to learn SNNs with sparse structures effectively while reducing the limited accuracy. The ESL-SNNs achieve merely 0.28% accuracy loss with 10% connection density on the DVS-Cifar10 dataset. Our work presents a brand-new approach for sparse training of SNNs from scratch with biologically plausible evolutionary mechanisms, closing the gap in the expressibility between sparse training and dense training. Hence, it has great potential for SNN lightweight training and inference with low power consumption and small memory usage.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Jiangrong Shen; Qi Xu; Jian K. Liu; Yueming Wang; Gang Pan; Huajin Tang", + "authorids": "", + "aff": "The College of Computer Science and Technology, Zhejiang University, China; School of Artificial Intelligence, Dalian University of Technology, China; School of Computing, University of Leeds, UK; The College of Computer Science and Technology, Zhejiang University, China + Research Institute of Intelligent Computing, Zhejiang Lab, China; The College of Computer Science and Technology, Zhejiang University, China; The College of Computer Science and Technology, Zhejiang University, China + Research Institute of Intelligent Computing, Zhejiang Lab, China", + "bibtex": "@article{Shen_Xu_Liu_Wang_Pan_Tang_2023, title={ESL-SNNs: An Evolutionary Structure Learning Strategy for Spiking Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25079}, DOI={10.1609/aaai.v37i1.25079}, abstractNote={Spiking neural networks (SNNs) have manifested remarkable advantages in power consumption and event-driven property during the inference process. To take full advantage of low power consumption and improve the efficiency of these models further, the pruning methods have been explored to find sparse SNNs without redundancy connections after training. However, parameter redundancy still hinders the efficiency of SNNs during training. In the human brain, the rewiring process of neural networks is highly dynamic, while synaptic connections maintain relatively sparse during brain development. Inspired by this, here we propose an efficient evolutionary structure learning (ESL) framework for SNNs, named ESL-SNNs, to implement the sparse SNN training from scratch. The pruning and regeneration of synaptic connections in SNNs evolve dynamically during learning, yet keep the structural sparsity at a certain level. As a result, the ESL-SNNs can search for optimal sparse connectivity by exploring all possible parameters across time. Our experiments show that the proposed ESL-SNNs framework is able to learn SNNs with sparse structures effectively while reducing the limited accuracy. The ESL-SNNs achieve merely 0.28% accuracy loss with 10% connection density on the DVS-Cifar10 dataset. Our work presents a brand-new approach for sparse training of SNNs from scratch with biologically plausible evolutionary mechanisms, closing the gap in the expressibility between sparse training and dense training. Hence, it has great potential for SNN lightweight training and inference with low power consumption and small memory usage.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Jiangrong and Xu, Qi and Liu, Jian K. and Wang, Yueming and Pan, Gang and Tang, Huajin}, year={2023}, month={Jun.}, pages={86-93} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25079/24851", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25079", + "pdf_size": 455589, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9860552284371053761&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "zju.edu.cn;dlut.edu.cn;leeds.ac.uk;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;dlut.edu.cn;leeds.ac.uk;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0+3;0;0+3", + "aff_unique_norm": "Zhejiang University;Dalian University of Technology;University of Leeds;Zhejiang Lab", + "aff_unique_dep": "College of Computer Science and Technology;School of Artificial Intelligence;School of Computing;Research Institute of Intelligent Computing", + "aff_unique_url": "http://www.zju.edu.cn;http://en.dlut.edu.cn/;https://www.leeds.ac.uk;", + "aff_unique_abbr": "ZJU;DUT;Leeds;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0+0;0;0+0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26148", + "title": "ESPT: A Self-Supervised Episodic Spatial Pretext Task for Improving Few-Shot Learning", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised learning (SSL) techniques have recently been integrated into the few-shot learning (FSL) framework and have shown promising results in improving the few-shot image classification performance. However, existing SSL approaches used in FSL typically seek the supervision signals from the global embedding of every single image. Therefore, during the episodic training of FSL, these methods cannot capture and fully utilize the local visual information in image samples and the data structure information of the whole episode, which are beneficial to FSL. To this end, we propose to augment the few-shot learning objective with a novel self-supervised Episodic Spatial Pretext Task (ESPT). Specifically, for each few-shot episode, we generate its corresponding transformed episode by applying a random geometric transformation to all the images in it. Based on these, our ESPT objective is defined as maximizing the local spatial relationship consistency between the original episode and the transformed one. With this definition, the ESPT-augmented FSL objective promotes learning more transferable feature representations that capture the local spatial features of different images and their inter-relational structural information in each input episode, thus enabling the model to generalize better to new categories with only a few samples. Extensive experiments indicate that our ESPT method achieves new state-of-the-art performance for few-shot image classification on three mainstay benchmark datasets. The source code will be available at: https://github.com/Whut-YiRong/ESPT.", + "primary_area": "machine learning iii", + "author": "Yi Rong; Xiongbo Lu; Zhaoyang Sun; Yaxiong Chen; Shengwu Xiong", + "authorids": "", + "aff": "School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan 430070, China + Sanya Science and Education Innovation Park, Wuhan University of Technology, Sanya 572000, China + Hainan Yazhou Bay Seed Laboratory, Sanya 572025, China + Shanghai Artificial Intelligence Laboratory, Shanghai 200240, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan 430070, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan 430070, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan 430070, China + Sanya Science and Education Innovation Park, Wuhan University of Technology, Sanya 572000, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan 430070, China + Sanya Science and Education Innovation Park, Wuhan University of Technology, Sanya 572000, China + Hainan Yazhou Bay Seed Laboratory, Sanya 572025, China + Shanghai Artificial Intelligence Laboratory, Shanghai 200240, China", + "bibtex": "@article{Rong_Lu_Sun_Chen_Xiong_2023, title={ESPT: A Self-Supervised Episodic Spatial Pretext Task for Improving Few-Shot Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26148}, DOI={10.1609/aaai.v37i8.26148}, abstractNote={Self-supervised learning (SSL) techniques have recently been integrated into the few-shot learning (FSL) framework and have shown promising results in improving the few-shot image classification performance. However, existing SSL approaches used in FSL typically seek the supervision signals from the global embedding of every single image. Therefore, during the episodic training of FSL, these methods cannot capture and fully utilize the local visual information in image samples and the data structure information of the whole episode, which are beneficial to FSL. To this end, we propose to augment the few-shot learning objective with a novel self-supervised Episodic Spatial Pretext Task (ESPT). Specifically, for each few-shot episode, we generate its corresponding transformed episode by applying a random geometric transformation to all the images in it. Based on these, our ESPT objective is defined as maximizing the local spatial relationship consistency between the original episode and the transformed one. With this definition, the ESPT-augmented FSL objective promotes learning more transferable feature representations that capture the local spatial features of different images and their inter-relational structural information in each input episode, thus enabling the model to generalize better to new categories with only a few samples. Extensive experiments indicate that our ESPT method achieves new state-of-the-art performance for few-shot image classification on three mainstay benchmark datasets. The source code will be available at: https://github.com/Whut-YiRong/ESPT.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rong, Yi and Lu, Xiongbo and Sun, Zhaoyang and Chen, Yaxiong and Xiong, Shengwu}, year={2023}, month={Jun.}, pages={9596-9605} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26148/25920", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26148", + "pdf_size": 331402, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7726420822199301711&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn", + "email": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn", + "github": "https://github.com/Whut-YiRong/ESPT", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+1+2;0;0;0+0;0+0+1+2", + "aff_unique_norm": "Wuhan University of Technology;Hainan Yazhou Bay Seed Laboratory;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "School of Computer Science and Artificial Intelligence;;", + "aff_unique_url": "http://www.wut.edu.cn;;", + "aff_unique_abbr": "WUT;;", + "aff_campus_unique_index": "0+1+1+2;0;0;0+1;0+1+1+2", + "aff_campus_unique": "Wuhan;Sanya;Shanghai", + "aff_country_unique_index": "0+0+0+0;0;0;0+0;0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25590", + "title": "Easy Begun Is Half Done: Spatial-Temporal Graph Modeling with ST-Curriculum Dropout", + "track": "main", + "status": "Technical", + "abstract": "Spatial-temporal (ST) graph modeling, such as traffic speed forecasting and taxi demand prediction, is an important task in deep learning area.\nHowever, for the nodes in the graph, their ST patterns can vary greatly in difficulties for modeling, owning to the heterogeneous nature of ST data. We argue that unveiling the nodes to the model in a meaningful order, from easy to complex, can provide performance improvements over traditional training procedure. The idea has its root in Curriculum Learning, which suggests in the early stage of training models can be sensitive to noise and difficult samples. In this paper, we propose ST-Curriculum Dropout, a novel and easy-to-implement strategy for spatial-temporal graph modeling. Specifically, we evaluate the learning difficulty of each node in high-level feature space and drop those difficult ones out to ensure the model only needs to handle fundamental ST relations at the beginning, before gradually moving to hard ones. Our strategy can be applied to any canonical deep learning architecture without extra trainable parameters, and extensive experiments on a wide range of datasets are conducted to illustrate that, by controlling the difficulty level of ST relations as the training progresses, the model is able to capture better representation of the data and thus yields better generalization.", + "primary_area": "data mining and knowledge management", + "author": "Hongjun Wang; Jiyuan Chen; Tong Pan; Zipei Fan; Xuan Song; Renhe Jiang; Lingyu Zhang; Yi Xie; Zhongyi Wang; Boyuan Zhang", + "authorids": "", + "aff": "SUSTech-UTokyo Joint Research Center on Super Smart City, Southern University of Science and Technology+Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology; SUSTech-UTokyo Joint Research Center on Super Smart City, Southern University of Science and Technology+Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology; Department of Physics, The Chinese University of Hong Kong; Center for Spatial Information Science, University of Tokyo; SUSTech-UTokyo Joint Research Center on Super Smart City, Southern University of Science and Technology+Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology; Center for Spatial Information Science, University of Tokyo+Information Technology Center, University of Tokyo; SUSTech-UTokyo Joint Research Center on Super Smart City, Southern University of Science and Technology+Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology+Didichuxing Inc; Huawei Technologies CO.LTD; Huawei Technologies CO.LTD; SUSTech-UTokyo Joint Research Center on Super Smart City, Southern University of Science and Technology+Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology", + "bibtex": "@article{Wang_Chen_Pan_Fan_Song_Jiang_Zhang_Xie_Wang_Zhang_2023, title={Easy Begun Is Half Done: Spatial-Temporal Graph Modeling with ST-Curriculum Dropout}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25590}, DOI={10.1609/aaai.v37i4.25590}, abstractNote={Spatial-temporal (ST) graph modeling, such as traffic speed forecasting and taxi demand prediction, is an important task in deep learning area.\nHowever, for the nodes in the graph, their ST patterns can vary greatly in difficulties for modeling, owning to the heterogeneous nature of ST data. We argue that unveiling the nodes to the model in a meaningful order, from easy to complex, can provide performance improvements over traditional training procedure. The idea has its root in Curriculum Learning, which suggests in the early stage of training models can be sensitive to noise and difficult samples. In this paper, we propose ST-Curriculum Dropout, a novel and easy-to-implement strategy for spatial-temporal graph modeling. Specifically, we evaluate the learning difficulty of each node in high-level feature space and drop those difficult ones out to ensure the model only needs to handle fundamental ST relations at the beginning, before gradually moving to hard ones. Our strategy can be applied to any canonical deep learning architecture without extra trainable parameters, and extensive experiments on a wide range of datasets are conducted to illustrate that, by controlling the difficulty level of ST relations as the training progresses, the model is able to capture better representation of the data and thus yields better generalization.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Hongjun and Chen, Jiyuan and Pan, Tong and Fan, Zipei and Song, Xuan and Jiang, Renhe and Zhang, Lingyu and Xie, Yi and Wang, Zhongyi and Zhang, Boyuan}, year={2023}, month={Jun.}, pages={4668-4675} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25590/25362", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25590", + "pdf_size": 1244924, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13199396694244115382&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sustech.edu.cn;sustech.edu.cn;CUHK.edu.hk;iis.u-tokyo.ac.jp;sustech.edu.cn;itc.u-tokyo.ac.jp;sustech.edu.cn;huawei.com;huawei.com;sustech.edu.cn", + "email": "sustech.edu.cn;sustech.edu.cn;CUHK.edu.hk;iis.u-tokyo.ac.jp;sustech.edu.cn;itc.u-tokyo.ac.jp;sustech.edu.cn;huawei.com;huawei.com;sustech.edu.cn", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0+0;0+0;1;2;0+0;2+2;0+0+3;4;4;0+0", + "aff_unique_norm": "Southern University of Science and Technology;The Chinese University of Hong Kong;University of Tokyo;Didi Chuxing;Huawei Technologies", + "aff_unique_dep": "SUSTech-UTokyo Joint Research Center on Super Smart City;Department of Physics;Center for Spatial Information Science;;", + "aff_unique_url": "https://www.sustech.edu.cn;https://www.cuhk.edu.hk;https://www.u-tokyo.ac.jp;https://www.didichuxing.com/;https://www.huawei.com", + "aff_unique_abbr": "SUSTech;CUHK;UTokyo;Didi;Huawei", + "aff_campus_unique_index": ";;1;2;;2+2;;", + "aff_campus_unique": ";Hong Kong;Tokyo", + "aff_country_unique_index": "0+0;0+0;0;1;0+0;1+1;0+0+0;0;0;0+0", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-27065", + "title": "EasyRec: An Easy-to-Use, Extendable and Efficient Framework for Building Industrial Recommendation Systems", + "track": "demonstrations", + "status": "Technical", + "abstract": "We present EasyRec, an easy-to-use, extendable and efficient recommendation framework for building industrial recommendation systems. Our EasyRec framework is superior in the following aspects:first, EasyRec adopts a modular and pluggable design pattern to reduce the efforts to build custom models; second, EasyRec implements hyper-parameter optimization and feature selection algorithms to improve model performance automatically; third, EasyRec applies online learning to adapt to the ever-changing data distribution. The code is released: https://github.com/alibaba/EasyRec.", + "primary_area": "", + "author": "Mengli Cheng; Yue Gao; Guoqiang Liu; HongSheng Jin", + "authorids": "", + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Cheng_Gao_Liu_Jin_2024, title={EasyRec: An Easy-to-Use, Extendable and Efficient Framework for Building Industrial Recommendation Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27065}, DOI={10.1609/aaai.v37i13.27065}, abstractNote={We present EasyRec, an easy-to-use, extendable and efficient recommendation framework for building industrial recommendation systems. Our EasyRec framework is superior in the following aspects:first, EasyRec adopts a modular and pluggable design pattern to reduce the efforts to build custom models; second, EasyRec implements hyper-parameter optimization and feature selection algorithms to improve model performance automatically; third, EasyRec applies online learning to adapt to the ever-changing data distribution. The code is released: https://github.com/alibaba/EasyRec.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Mengli and Gao, Yue and Liu, Guoqiang and Jin, HongSheng}, year={2024}, month={Jul.}, pages={16419-16421} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27065/26837", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27065", + "pdf_size": 518325, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6106108576339665033&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/alibaba/EasyRec", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26400", + "title": "Echo of Neighbors: Privacy Amplification for Personalized Private Federated Learning with Shuffle Model", + "track": "main", + "status": "Technical", + "abstract": "Federated Learning, as a popular paradigm for collaborative training, is vulnerable against privacy attacks. Different privacy levels regarding users' attitudes need to be satisfied locally, while a strict privacy guarantee for the global model is also required centrally. Personalized Local Differential Privacy (PLDP) is suitable for preserving users' varying local privacy, yet only provides a central privacy guarantee equivalent to the worst-case local privacy level. Thus, achieving strong central privacy as well as personalized local privacy with a utility-promising model is a challenging problem. In this work, a general framework (APES) is built up to strengthen model privacy under personalized local privacy by leveraging the privacy amplification effect of the shuffle model. To tighten the privacy bound, we quantify the heterogeneous contributions to the central privacy user by user. The contributions are characterized by the ability of generating \u201cechos\u201d from the perturbation of each user, which is carefully measured by proposed methods Neighbor Divergence and Clip-Laplace Mechanism. Furthermore, we propose a refined framework (S-APES) with the post-sparsification technique to reduce privacy loss in high-dimension scenarios. To the best of our knowledge, the impact of shuffling on personalized local privacy is considered for the first time. We provide a strong privacy amplification effect, and the bound is tighter than the baseline result based on existing methods for uniform local privacy. Experiments demonstrate that our frameworks ensure comparable or higher accuracy for the global model.", + "primary_area": "philosophy and ethics of ai", + "author": "Yixuan Liu; Suyun Zhao; Li Xiong; Yuhan Liu; Hong Chen", + "authorids": "", + "aff": "Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China+Engineering Research Center of Ministry of Education on Database and BI+Information School, Renmin University of China; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China+Engineering Research Center of Ministry of Education on Database and BI+Information School, Renmin University of China; Department of Computer Science, Emory University; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China+Engineering Research Center of Ministry of Education on Database and BI+Information School, Renmin University of China; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China+Engineering Research Center of Ministry of Education on Database and BI+Information School, Renmin University of China", + "bibtex": "@article{Liu_Zhao_Xiong_Liu_Chen_2023, title={Echo of Neighbors: Privacy Amplification for Personalized Private Federated Learning with Shuffle Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26400}, DOI={10.1609/aaai.v37i10.26400}, abstractNote={Federated Learning, as a popular paradigm for collaborative training, is vulnerable against privacy attacks. Different privacy levels regarding users\u2019 attitudes need to be satisfied locally, while a strict privacy guarantee for the global model is also required centrally. Personalized Local Differential Privacy (PLDP) is suitable for preserving users\u2019 varying local privacy, yet only provides a central privacy guarantee equivalent to the worst-case local privacy level. Thus, achieving strong central privacy as well as personalized local privacy with a utility-promising model is a challenging problem. In this work, a general framework (APES) is built up to strengthen model privacy under personalized local privacy by leveraging the privacy amplification effect of the shuffle model. To tighten the privacy bound, we quantify the heterogeneous contributions to the central privacy user by user. The contributions are characterized by the ability of generating \u201cechos\u201d from the perturbation of each user, which is carefully measured by proposed methods Neighbor Divergence and Clip-Laplace Mechanism. Furthermore, we propose a refined framework (S-APES) with the post-sparsification technique to reduce privacy loss in high-dimension scenarios. To the best of our knowledge, the impact of shuffling on personalized local privacy is considered for the first time. We provide a strong privacy amplification effect, and the bound is tighter than the baseline result based on existing methods for uniform local privacy. Experiments demonstrate that our frameworks ensure comparable or higher accuracy for the global model.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yixuan and Zhao, Suyun and Xiong, Li and Liu, Yuhan and Chen, Hong}, year={2023}, month={Jun.}, pages={11865-11872} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26400/26172", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26400", + "pdf_size": 1684980, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8025336001707835066&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 10, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;emory.edu;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;emory.edu;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+0;0+1+0;2;0+1+0;0+1+0", + "aff_unique_norm": "Renmin University of China;Engineering Research Center of Ministry of Education;Emory University", + "aff_unique_dep": "Key Laboratory of Data Engineering and Knowledge Engineering;Database and BI;Department of Computer Science", + "aff_unique_url": "http://www.ruc.edu.cn;;https://www.emory.edu", + "aff_unique_abbr": "RUC;;Emory", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;1;0+0+0;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25318", + "title": "Edge Structure Learning via Low Rank Residuals for Robust Image Classification", + "track": "main", + "status": "Technical", + "abstract": "Traditional low-rank methods overlook residuals as corruptions, but we discovered that low-rank residuals actually keep image edges together with corrupt components. Therefore, filtering out such structural information could hamper the discriminative details in images, especially in heavy corruptions. In order to address this limitation, this paper proposes a novel method named ESL-LRR, which preserves image edges by finding image projections from low-rank residuals. Specifically, our approach is built in a manifold learning framework where residuals are regarded as another view of image data. Edge preserved image projections are then pursued using a dynamic affinity graph regularization to capture the more accurate similarity between residuals while suppressing the influence of corrupt ones. With this adaptive approach, the proposed method can also find image intrinsic low-rank representation, and much discriminative edge preserved projections. As a result, a new classification strategy is introduced, aligning both modalities to enhance accuracy. Experiments are conducted on several benchmark image datasets, including MNIST, LFW, and COIL100. The results show that the proposed method has clear advantages over compared state-of-the-art (SOTA) methods, such as Low-Rank Embedding (LRE), Low-Rank Preserving Projection via Graph Regularized Reconstruction (LRPP_GRR), and Feature Selective Projection (FSP) with more than 2% improvement, particularly in corrupted cases.", + "primary_area": "computer vision ii", + "author": "Xiang-Jun Shen; Stanley Ebhohimhen Abhadiomhen; Yang Yang; Zhifeng Liu; Sirui Tian", + "authorids": "", + "aff": "School of Computer Science and Communication Engineering, JiangSu University, JiangSu, 212013, China; School of Computer Science and Communication Engineering, JiangSu University, JiangSu, 212013, China + Department of Computer Science, University of Nigeria, Nsukka, Nigeria; School of Computer Science and Communication Engineering, JiangSu University, JiangSu, 212013, China; School of Computer Science and Communication Engineering, JiangSu University, JiangSu, 212013, China; Department of Electronic Engineering, School of Electronic and Optical Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China", + "bibtex": "@article{Shen_Abhadiomhen_Yang_Liu_Tian_2023, title={Edge Structure Learning via Low Rank Residuals for Robust Image Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25318}, DOI={10.1609/aaai.v37i2.25318}, abstractNote={Traditional low-rank methods overlook residuals as corruptions, but we discovered that low-rank residuals actually keep image edges together with corrupt components. Therefore, filtering out such structural information could hamper the discriminative details in images, especially in heavy corruptions. In order to address this limitation, this paper proposes a novel method named ESL-LRR, which preserves image edges by finding image projections from low-rank residuals. Specifically, our approach is built in a manifold learning framework where residuals are regarded as another view of image data. Edge preserved image projections are then pursued using a dynamic affinity graph regularization to capture the more accurate similarity between residuals while suppressing the influence of corrupt ones. With this adaptive approach, the proposed method can also find image intrinsic low-rank representation, and much discriminative edge preserved projections. As a result, a new classification strategy is introduced, aligning both modalities to enhance accuracy. Experiments are conducted on several benchmark image datasets, including MNIST, LFW, and COIL100. The results show that the proposed method has clear advantages over compared state-of-the-art (SOTA) methods, such as Low-Rank Embedding (LRE), Low-Rank Preserving Projection via Graph Regularized Reconstruction (LRPP_GRR), and Feature Selective Projection (FSP) with more than 2% improvement, particularly in corrupted cases.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Xiang-Jun and Abhadiomhen, Stanley Ebhohimhen and Yang, Yang and Liu, Zhifeng and Tian, Sirui}, year={2023}, month={Jun.}, pages={2236-2244} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25318/25090", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25318", + "pdf_size": 1468739, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5895173870854382061&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "ujs.edu.cn;unn.edu.ng;ujs.edu.cn;ujs.edu.cn;njust.edu.cn", + "email": "ujs.edu.cn;unn.edu.ng;ujs.edu.cn;ujs.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;2", + "aff_unique_norm": "JiangSu University;University of Nigeria;Nanjing University of Science and Technology", + "aff_unique_dep": "School of Computer Science and Communication Engineering;Department of Computer Science;Department of Electronic Engineering", + "aff_unique_url": ";https://www.unn.edu.ng;", + "aff_unique_abbr": ";;NJUST", + "aff_campus_unique_index": "0;0+1;0;0;2", + "aff_campus_unique": "JiangSu;Nsukka;Nanjing", + "aff_country_unique_index": "0;0+1;0;0;0", + "aff_country_unique": "China;Nigeria" + }, + { + "id": "article-25801", + "title": "Editing Boolean Classifiers: A Belief Change Perspective", + "track": "main", + "status": "Technical", + "abstract": "This paper is about editing Boolean classifiers, i.e., determining how a Boolean classifier should be modified when new pieces of evidence must be incorporated. Our main goal is to delineate what are the rational ways of making such edits. This goes through a number of rationality postulates inspired from those considered so far for belief revision. We give a representation theorem and present some families of edit operators satisfying the postulates.", + "primary_area": "knowledge representation and reasoning", + "author": "Nicolas Schwind; Katsumi Inoue; Pierre Marquis", + "authorids": "", + "aff": "National Institute of Advanced Industrial Science and Technology, Tokyo, Japan; National Institute of Informatics, Tokyo, Japan + The Graduate University for Advanced Studies, SOKENDAI, Tokyo, Japan; Univ. Artois, CNRS, CRIL, F-62300 Lens, France + Institut Universitaire de France", + "bibtex": "@article{Schwind_Inoue_Marquis_2023, title={Editing Boolean Classifiers: A Belief Change Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25801}, DOI={10.1609/aaai.v37i5.25801}, abstractNote={This paper is about editing Boolean classifiers, i.e., determining how a Boolean classifier should be modified when new pieces of evidence must be incorporated. Our main goal is to delineate what are the rational ways of making such edits. This goes through a number of rationality postulates inspired from those considered so far for belief revision. We give a representation theorem and present some families of edit operators satisfying the postulates.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Schwind, Nicolas and Inoue, Katsumi and Marquis, Pierre}, year={2023}, month={Jun.}, pages={6516-6524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25801/25573", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25801", + "pdf_size": 166681, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8538563019889300786&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "aist.go.jp;nii.ac.jp;cril.fr", + "email": "aist.go.jp;nii.ac.jp;cril.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;3+4", + "aff_unique_norm": "National Institute of Advanced Industrial Science and Technology;National Institute of Informatics;The Graduate University for Advanced Studies;University of Artois;Institut Universitaire de France", + "aff_unique_dep": ";;;CNRS, CRIL;", + "aff_unique_url": "https://www.aist.go.jp;https://www.nii.ac.jp;https://www.soken.ac.jp;;https://www.iuf.cnrs.fr", + "aff_unique_abbr": "AIST;NII;SOKENDAI;Univ. Artois;IUF", + "aff_campus_unique_index": "0;0+0;", + "aff_campus_unique": "Tokyo;", + "aff_country_unique_index": "0;0+0;1+1", + "aff_country_unique": "Japan;France" + }, + { + "id": "article-25923", + "title": "EffConv: Efficient Learning of Kernel Sizes for Convolution Layers of CNNs", + "track": "main", + "status": "Technical", + "abstract": "Determining kernel sizes of a CNN model is a crucial and non-trivial design choice and significantly impacts its performance. The majority of kernel size design methods rely on complex heuristic tricks or leverage neural architecture search that requires extreme computational resources. Thus, learning kernel sizes, using methods such as modeling kernels as a combination of basis functions, jointly with the model weights has been proposed as a workaround. However, previous methods cannot achieve satisfactory results or are inefficient for large-scale datasets. To fill this gap, we design a novel efficient kernel size learning method in which a size predictor model learns to predict optimal kernel sizes for a classifier given a desired number of parameters. It does so in collaboration with a kernel predictor model that predicts the weights of the kernels - given kernel sizes predicted by the size predictor - to minimize the training objective, and both models are trained end-to-end. Our method only needs a small fraction of the training epochs of the original CNN to train these two models and find proper kernel sizes for it. Thus, it offers an efficient and effective solution for the kernel size learning problem. Our extensive experiments on MNIST, CIFAR-10, STL-10, and ImageNet-32 demonstrate that our method can achieve the best training time vs. accuracy trade-off compared to previous kernel size learning methods and significantly outperform them on challenging datasets such as STL-10 and ImageNet-32. Our implementations are available at https://github.com/Alii-Ganjj/EffConv.", + "primary_area": "machine learning i", + "author": "Alireza Ganjdanesh; Shangqian Gao; Heng Huang", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA 15261, USA; Department of Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA 15261, USA; Department of Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA 15261, USA", + "bibtex": "@article{Ganjdanesh_Gao_Huang_2023, title={EffConv: Efficient Learning of Kernel Sizes for Convolution Layers of CNNs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25923}, DOI={10.1609/aaai.v37i6.25923}, abstractNote={Determining kernel sizes of a CNN model is a crucial and non-trivial design choice and significantly impacts its performance. The majority of kernel size design methods rely on complex heuristic tricks or leverage neural architecture search that requires extreme computational resources. Thus, learning kernel sizes, using methods such as modeling kernels as a combination of basis functions, jointly with the model weights has been proposed as a workaround. However, previous methods cannot achieve satisfactory results or are inefficient for large-scale datasets. To fill this gap, we design a novel efficient kernel size learning method in which a size predictor model learns to predict optimal kernel sizes for a classifier given a desired number of parameters. It does so in collaboration with a kernel predictor model that predicts the weights of the kernels - given kernel sizes predicted by the size predictor - to minimize the training objective, and both models are trained end-to-end. Our method only needs a small fraction of the training epochs of the original CNN to train these two models and find proper kernel sizes for it. Thus, it offers an efficient and effective solution for the kernel size learning problem. Our extensive experiments on MNIST, CIFAR-10, STL-10, and ImageNet-32 demonstrate that our method can achieve the best training time vs. accuracy trade-off compared to previous kernel size learning methods and significantly outperform them on challenging datasets such as STL-10 and ImageNet-32. Our implementations are available at https://github.com/Alii-Ganjj/EffConv.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ganjdanesh, Alireza and Gao, Shangqian and Huang, Heng}, year={2023}, month={Jun.}, pages={7604-7612} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25923/25695", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25923", + "pdf_size": 1597799, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1451544593055769024&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "pitt.edu;pitt.edu;pitt.edu", + "email": "pitt.edu;pitt.edu;pitt.edu", + "github": "https://github.com/Alii-Ganjj/EffConv", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Pittsburgh", + "aff_unique_dep": "Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.pitt.edu", + "aff_unique_abbr": "Pitt", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26206", + "title": "Effective Continual Learning for Text Classification with Lightweight Snapshots", + "track": "main", + "status": "Technical", + "abstract": "Continual learning is known for suffering from catastrophic forgetting, a phenomenon where previously learned concepts are forgotten upon learning new tasks. A natural remedy is to use trained models for old tasks as \u2018teachers\u2019 to regularize the update of the current model to prevent such forgetting. However, this requires storing all past models, which is very space-consuming for large models, e.g. BERT, thus impractical in real-world applications. To tackle this issue, we propose to construct snapshots of seen tasks whose key knowledge is captured in lightweight adapters. During continual learning, we transfer knowledge from past snapshots to the current model through knowledge distillation, allowing the current model to review previously learned knowledge while learning new tasks. We also design representation recalibration to better handle the class-incremental setting. Experiments over various task sequences show that our approach effectively mitigates catastrophic forgetting and outperforms all baselines.", + "primary_area": "machine learning iii", + "author": "Jue Wang; Dajie Dong; Lidan Shou; Ke Chen; Gang Chen", + "authorids": "", + "aff": "Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University+College of Computer Science and Technology, Zhejiang University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University+College of Computer Science and Technology, Zhejiang University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University+College of Computer Science and Technology, Zhejiang University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University+College of Computer Science and Technology, Zhejiang University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University+College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Wang_Dong_Shou_Chen_Chen_2023, title={Effective Continual Learning for Text Classification with Lightweight Snapshots}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26206}, DOI={10.1609/aaai.v37i8.26206}, abstractNote={Continual learning is known for suffering from catastrophic forgetting, a phenomenon where previously learned concepts are forgotten upon learning new tasks. A natural remedy is to use trained models for old tasks as \u2018teachers\u2019 to regularize the update of the current model to prevent such forgetting. However, this requires storing all past models, which is very space-consuming for large models, e.g. BERT, thus impractical in real-world applications. To tackle this issue, we propose to construct snapshots of seen tasks whose key knowledge is captured in lightweight adapters. During continual learning, we transfer knowledge from past snapshots to the current model through knowledge distillation, allowing the current model to review previously learned knowledge while learning new tasks. We also design representation recalibration to better handle the class-incremental setting. Experiments over various task sequences show that our approach effectively mitigates catastrophic forgetting and outperforms all baselines.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Jue and Dong, Dajie and Shou, Lidan and Chen, Ke and Chen, Gang}, year={2023}, month={Jun.}, pages={10122-10130} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26206/25978", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26206", + "pdf_size": 338113, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16827884761743942622&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "Key Lab of Intelligent Computing Based Big Data of Zhejiang Province", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26381", + "title": "Effective Integration of Weighted Cost-to-Go and Conflict Heuristic within Suboptimal CBS", + "track": "main", + "status": "Technical", + "abstract": "Conflict-Based Search (CBS) is a popular multi-agent path finding (MAPF) solver that employs a low-level single agent planner and a high-level constraint tree to resolve conflicts. The vast majority of modern MAPF solvers focus on improving CBS by reducing the size of this tree through various strategies with few methods modifying the low level planner. Typically low level planners in existing CBS methods use an unweighted cost-to-go heuristic, with suboptimal CBS methods also using a conflict heuristic to help the high level search. In this paper, we show that, contrary to prevailing CBS beliefs, a weighted cost-to-go heuristic can be used effectively alongside the conflict heuristic in two possible variants. In particular, one of these variants can obtain large speedups, 2-100x, across several scenarios and suboptimal CBS methods. Importantly, we discover that performance is related not to the weighted cost-to-go heuristic but rather to the relative conflict heuristic weight's ability to effectively balance low-level and high-level work. Additionally, to the best of our knowledge, we show the first theoretical relation of prioritized planning and bounded suboptimal CBS and demonstrate that our methods are their natural generalization.", + "primary_area": "multiagent systems", + "author": "Rishi Veerapaneni; Tushar Kusnur; Maxim Likhachev", + "authorids": "", + "aff": "Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University", + "bibtex": "@article{Veerapaneni_Kusnur_Likhachev_2023, title={Effective Integration of Weighted Cost-to-Go and Conflict Heuristic within Suboptimal CBS}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26381}, DOI={10.1609/aaai.v37i10.26381}, abstractNote={Conflict-Based Search (CBS) is a popular multi-agent path finding (MAPF) solver that employs a low-level single agent planner and a high-level constraint tree to resolve conflicts. The vast majority of modern MAPF solvers focus on improving CBS by reducing the size of this tree through various strategies with few methods modifying the low level planner. Typically low level planners in existing CBS methods use an unweighted cost-to-go heuristic, with suboptimal CBS methods also using a conflict heuristic to help the high level search. In this paper, we show that, contrary to prevailing CBS beliefs, a weighted cost-to-go heuristic can be used effectively alongside the conflict heuristic in two possible variants. In particular, one of these variants can obtain large speedups, 2-100x, across several scenarios and suboptimal CBS methods. Importantly, we discover that performance is related not to the weighted cost-to-go heuristic but rather to the relative conflict heuristic weight\u2019s ability to effectively balance low-level and high-level work. Additionally, to the best of our knowledge, we show the first theoretical relation of prioritized planning and bounded suboptimal CBS and demonstrate that our methods are their natural generalization.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Veerapaneni, Rishi and Kusnur, Tushar and Likhachev, Maxim}, year={2023}, month={Jun.}, pages={11691-11698} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26381/26153", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26381", + "pdf_size": 606733, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10066920097051736674&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "", + "project": "https://arxiv.org/abs/2205.11624", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Robotics Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26560", + "title": "Effective Open Intent Classification with K-center Contrastive Learning and Adjustable Decision Boundary", + "track": "main", + "status": "Technical", + "abstract": "Open intent classification, which aims to correctly classify the known intents into their corresponding classes while identifying the new unknown (open) intents, is an essential but challenging task in dialogue systems. In this paper, we introduce novel K-center contrastive learning and adjustable decision boundary learning (CLAB) to improve the effectiveness of open intent classification. First, we pre-train a feature encoder on the labeled training instances, which transfers knowledge from known intents to unknown intents. Specifically, we devise a K-center contrastive learning algorithm to learn discriminative and balanced intent features, improving the generalization of the model for recognizing open intents. Second, we devise an adjustable decision boundary learning method with expanding and shrinking (ADBES) to determine the suitable decision conditions. Concretely, we learn a decision boundary for each known intent class, which consists of a decision center and the radius of the decision boundary. We then expand the radius of the decision boundary to accommodate more in-class instances if the out-of-class instances are far from the decision boundary; otherwise, we shrink the radius of the decision boundary. Extensive experiments on three benchmark datasets clearly demonstrate the effectiveness of our method for open intent classification.For reproducibility, we submit the code at: https://github.com/lxk00/CLAP", + "primary_area": "speech natural language processing", + "author": "Xiaokang Liu; Jianquan Li; Jingjing Mu; Min Yang; Ruifeng Xu; Benyou Wang", + "authorids": "", + "aff": "China Automotive Technology and Research Center Co., Ltd.; Beijing Ultrapower Software Co.,Ltd.; Beijing Ultrapower Software Co.,Ltd.; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences; Harbin Institute of Technology, Shenzhen; The Chinese University of Hong Kong (Shenzhen)+Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen, China", + "bibtex": "@article{Liu_Li_Mu_Yang_Xu_Wang_2023, title={Effective Open Intent Classification with K-center Contrastive Learning and Adjustable Decision Boundary}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26560}, DOI={10.1609/aaai.v37i11.26560}, abstractNote={Open intent classification, which aims to correctly classify the known intents into their corresponding classes while identifying the new unknown (open) intents, is an essential but challenging task in dialogue systems. In this paper, we introduce novel K-center contrastive learning and adjustable decision boundary learning (CLAB) to improve the effectiveness of open intent classification. First, we pre-train a feature encoder on the labeled training instances, which transfers knowledge from known intents to unknown intents. Specifically, we devise a K-center contrastive learning algorithm to learn discriminative and balanced intent features, improving the generalization of the model for recognizing open intents. Second, we devise an adjustable decision boundary learning method with expanding and shrinking (ADBES) to determine the suitable decision conditions. Concretely, we learn a decision boundary for each known intent class, which consists of a decision center and the radius of the decision boundary. We then expand the radius of the decision boundary to accommodate more in-class instances if the out-of-class instances are far from the decision boundary; otherwise, we shrink the radius of the decision boundary. Extensive experiments on three benchmark datasets clearly demonstrate the effectiveness of our method for open intent classification.For reproducibility, we submit the code at: https://github.com/lxk00/CLAP}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xiaokang and Li, Jianquan and Mu, Jingjing and Yang, Min and Xu, Ruifeng and Wang, Benyou}, year={2023}, month={Jun.}, pages={13291-13299} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26560/26332", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26560", + "pdf_size": 2229182, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=574462099115777313&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "catarc.ac.cn;ultrapower.com.cn;ultrapower.com.cn;siat.ac.cn;hit.edu.cn;cuhk.edu.cn", + "email": "catarc.ac.cn;ultrapower.com.cn;ultrapower.com.cn;siat.ac.cn;hit.edu.cn;cuhk.edu.cn", + "github": "https://github.com/lxk00/CLAP", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;2;3;4+4", + "aff_unique_norm": "China Automotive Technology and Research Center;Beijing Ultrapower Software Co., Ltd.;Chinese Academy of Sciences;Harbin Institute of Technology;The Chinese University of Hong Kong", + "aff_unique_dep": ";;Shenzhen Institutes of Advanced Technology;;", + "aff_unique_url": "http://www.catarc.org.cn;http://www.ultrapower.com.cn;http://www.siat.cas.cn;http://en.hhit.edu.cn/;https://www.cuhk.edu.cn", + "aff_unique_abbr": "CATARC;;SIAT;HIT;CUHK", + "aff_campus_unique_index": "1;1;1+1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26390", + "title": "Effective and Stable Role-Based Multi-Agent Collaboration by Structural Information Principles", + "track": "main", + "status": "Technical", + "abstract": "Role-based learning is a promising approach to improving the performance of Multi-Agent Reinforcement Learning (MARL). Nevertheless, without manual assistance, current role-based methods cannot guarantee stably discovering a set of roles to effectively decompose a complex task, as they assume either a predefined role structure or practical experience for selecting hyperparameters. In this article, we propose a mathematical Structural Information principles-based Role Discovery method, namely SIRD, and then present a SIRD optimizing MARL framework, namely SR-MARL, for multi-agent collaboration. The SIRD transforms role discovery into a hierarchical action space clustering. Specifically, the SIRD consists of structuralization, sparsification, and optimization modules, where an optimal encoding tree is generated to perform abstracting to discover roles. The SIRD is agnostic to specific MARL algorithms and flexibly integrated with various value function factorization approaches. Empirical evaluations on the StarCraft II micromanagement benchmark demonstrate that, compared with state-of-the-art MARL algorithms, the SR-MARL framework improves the average test win rate by 0.17%, 6.08%, and 3.24%, and reduces the deviation by 16.67%, 30.80%, and 66.30%, under easy, hard, and super hard scenarios.", + "primary_area": "multiagent systems", + "author": "Xianghua Zeng; Hao Peng; Angsheng Li", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China+Zhongguancun Laboratory, Beijing, China", + "bibtex": "@article{Zeng_Peng_Li_2023, title={Effective and Stable Role-Based Multi-Agent Collaboration by Structural Information Principles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26390}, DOI={10.1609/aaai.v37i10.26390}, abstractNote={Role-based learning is a promising approach to improving the performance of Multi-Agent Reinforcement Learning (MARL). Nevertheless, without manual assistance, current role-based methods cannot guarantee stably discovering a set of roles to effectively decompose a complex task, as they assume either a predefined role structure or practical experience for selecting hyperparameters. In this article, we propose a mathematical Structural Information principles-based Role Discovery method, namely SIRD, and then present a SIRD optimizing MARL framework, namely SR-MARL, for multi-agent collaboration. The SIRD transforms role discovery into a hierarchical action space clustering. Specifically, the SIRD consists of structuralization, sparsification, and optimization modules, where an optimal encoding tree is generated to perform abstracting to discover roles. The SIRD is agnostic to specific MARL algorithms and flexibly integrated with various value function factorization approaches. Empirical evaluations on the StarCraft II micromanagement benchmark demonstrate that, compared with state-of-the-art MARL algorithms, the SR-MARL framework improves the average test win rate by 0.17%, 6.08%, and 3.24%, and reduces the deviation by 16.67%, 30.80%, and 66.30%, under easy, hard, and super hard scenarios.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Xianghua and Peng, Hao and Li, Angsheng}, year={2023}, month={Jun.}, pages={11772-11780} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26390/26162", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26390", + "pdf_size": 719736, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13238324143165327383&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory", + "aff_unique_dep": "State Key Laboratory of Software Development Environment;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "BUAA;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26934", + "title": "Efficient Algorithms for Regret Minimization in Billboard Advertisement (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Now-a-days, billboard advertisement has emerged as an effective outdoor advertisement technique. In this case, a commercial house approaches an influence provider for a specific number of views of their advertisement content on a payment basis. If the influence provider can satisfy this then they will receive the full payment else a partial payment. If the influence provider provides more or less than the demand then\ncertainly this is a loss to them. This is formalized as \u2018Regret\u2019\nand the goal of the influence provider will be to minimize\nthe \u2018Regret\u2019. In this paper, we propose simple and efficient\nsolution methodologies to solve this problem. Efficiency and\neffectiveness have been demonstrated by experimentation.", + "primary_area": "", + "author": "Dildar Ali; Ankit Kumar Bhagat; Suman Banerjee; Yamuna Prasad", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Indian Institute of Technology Jammu, Jammu & Kashmir 181221, India; Cluster Innovation Centre, University of Delhi, Delhi 110007, India; Department of Computer Science and Engineering, Indian Institute of Technology Jammu, Jammu & Kashmir 181221, India; Department of Computer Science and Engineering, Indian Institute of Technology Jammu, Jammu & Kashmir 181221, India", + "bibtex": "@article{Ali_Bhagat_Banerjee_Prasad_2024, title={Efficient Algorithms for Regret Minimization in Billboard Advertisement (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26934}, DOI={10.1609/aaai.v37i13.26934}, abstractNote={Now-a-days, billboard advertisement has emerged as an effective outdoor advertisement technique. In this case, a commercial house approaches an influence provider for a specific number of views of their advertisement content on a payment basis. If the influence provider can satisfy this then they will receive the full payment else a partial payment. If the influence provider provides more or less than the demand then\ncertainly this is a loss to them. This is formalized as \u2018Regret\u2019\nand the goal of the influence provider will be to minimize\nthe \u2018Regret\u2019. In this paper, we propose simple and efficient\nsolution methodologies to solve this problem. Efficiency and\neffectiveness have been demonstrated by experimentation.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Dildar and Bhagat, Ankit Kumar and Banerjee, Suman and Prasad, Yamuna}, year={2024}, month={Jul.}, pages={16148-16149} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26934/26706", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26934", + "pdf_size": 513010, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4595888383497264044&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "iitjammu.ac.in;cic.du.ac.in;iitjammu.ac.in;iitjammu.ac.in", + "email": "iitjammu.ac.in;cic.du.ac.in;iitjammu.ac.in;iitjammu.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Indian Institute of Technology Jammu;University of Delhi", + "aff_unique_dep": "Department of Computer Science and Engineering;Cluster Innovation Centre", + "aff_unique_url": "https://www.iitjammu.ac.in;http://www.du.ac.in", + "aff_unique_abbr": "IIT Jammu;", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Jammu;Delhi", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25797", + "title": "Efficient Answer Enumeration in Description Logics with Functional Roles", + "track": "main", + "status": "Technical", + "abstract": "We study the enumeration of answers to ontology-mediated queries\nwhen the ontology is formulated in a description logic that supports\nfunctional roles and the query is a CQ. In particular, we show that\nenumeration is possible with linear preprocessing and constant delay\nwhen a certain extension of the CQ (pertaining to functional roles)\nis acyclic and free-connex acyclic. This holds both for complete answers and\nfor partial answers. We provide matching lower bounds for the\ncase where the query is self-join free.", + "primary_area": "knowledge representation and reasoning", + "author": "Carsten Lutz; Marcin Przyby\u0142ko", + "authorids": "", + "aff": "Institute of Computer Science, Leipzig University, Germany; Institute of Computer Science, Leipzig University, Germany", + "bibtex": "@article{Lutz_Przyby\u0142ko_2023, title={Efficient Answer Enumeration in Description Logics with Functional Roles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25797}, DOI={10.1609/aaai.v37i5.25797}, abstractNote={We study the enumeration of answers to ontology-mediated queries\nwhen the ontology is formulated in a description logic that supports\nfunctional roles and the query is a CQ. In particular, we show that\nenumeration is possible with linear preprocessing and constant delay\nwhen a certain extension of the CQ (pertaining to functional roles)\nis acyclic and free-connex acyclic. This holds both for complete answers and\nfor partial answers. We provide matching lower bounds for the\ncase where the query is self-join free.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lutz, Carsten and Przyby\u0142ko, Marcin}, year={2023}, month={Jun.}, pages={6483-6490} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25797/25569", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25797", + "pdf_size": 159688, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1273002540476899947&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "informatik.uni-leipzig.de;informatik.uni-leipzig.de", + "email": "informatik.uni-leipzig.de;informatik.uni-leipzig.de", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Leipzig University", + "aff_unique_dep": "Institute of Computer Science", + "aff_unique_url": "https://www.uni-leipzig.de", + "aff_unique_abbr": "Uni Leipzig", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Leipzig", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25815", + "title": "Efficient Distributed Inference of Deep Neural Networks via Restructuring and Pruning", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we consider the parallel implementation of an already-trained deep model on multiple processing nodes (a.k.a. workers). Specifically, we investigate as to how a deep model should be divided into several parallel sub-models, each of which is executed efficiently by a worker. Since latency due to synchronization and data transfer among workers negatively impacts the performance of the parallel implementation, it is desirable to have minimum interdependency among parallel sub-models. To achieve this goal, we propose to rearrange the neurons in the neural network, partition them (without changing the general topology of the neural network), and modify the weights such that the interdependency among sub-models is minimized under the computations and communications constraints of the workers while minimizing its impact on the performance of the model. We propose RePurpose, a layer-wise model restructuring and pruning technique that guarantees the performance of the overall parallelized model. To efficiently apply RePurpose, we propose an approach based on L0 optimization and the Munkres assignment algorithm. We show that, compared to the existing methods, RePurpose significantly improves the efficiency of the distributed inference via parallel implementation, both in terms of communication and computational complexity.", + "primary_area": "machine learning i", + "author": "Afshin Abdi; Saeed Rashidi; Faramarz Fekri; Tushar Krishna", + "authorids": "", + "aff": "School of Electrical and Computer Engineering, Georgia Institute of Technology; School of Electrical and Computer Engineering, Georgia Institute of Technology; School of Electrical and Computer Engineering, Georgia Institute of Technology; School of Electrical and Computer Engineering, Georgia Institute of Technology", + "bibtex": "@article{Abdi_Rashidi_Fekri_Krishna_2023, title={Efficient Distributed Inference of Deep Neural Networks via Restructuring and Pruning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25815}, DOI={10.1609/aaai.v37i6.25815}, abstractNote={In this paper, we consider the parallel implementation of an already-trained deep model on multiple processing nodes (a.k.a. workers). Specifically, we investigate as to how a deep model should be divided into several parallel sub-models, each of which is executed efficiently by a worker. Since latency due to synchronization and data transfer among workers negatively impacts the performance of the parallel implementation, it is desirable to have minimum interdependency among parallel sub-models. To achieve this goal, we propose to rearrange the neurons in the neural network, partition them (without changing the general topology of the neural network), and modify the weights such that the interdependency among sub-models is minimized under the computations and communications constraints of the workers while minimizing its impact on the performance of the model. We propose RePurpose, a layer-wise model restructuring and pruning technique that guarantees the performance of the overall parallelized model. To efficiently apply RePurpose, we propose an approach based on L0 optimization and the Munkres assignment algorithm. We show that, compared to the existing methods, RePurpose significantly improves the efficiency of the distributed inference via parallel implementation, both in terms of communication and computational complexity.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abdi, Afshin and Rashidi, Saeed and Fekri, Faramarz and Krishna, Tushar}, year={2023}, month={Jun.}, pages={6640-6648} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25815/25587", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25815", + "pdf_size": 675777, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15955249299225972461&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gatech.edu;gatech.edu;ece.gatech.edu;ece.gatech.edu", + "email": "gatech.edu;gatech.edu;ece.gatech.edu;ece.gatech.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "School of Electrical and Computer Engineering", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Atlanta", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26197", + "title": "Efficient Distribution Similarity Identification in Clustered Federated Learning via Principal Angles between Client Data Subspaces", + "track": "main", + "status": "Technical", + "abstract": "Clustered federated learning (FL) has been shown to produce promising results by grouping clients into clusters.\nThis is especially effective in scenarios where separate groups of clients have significant differences in the distributions of their local data. Existing clustered FL algorithms are essentially trying to group together clients with similar distributions so that clients in the same cluster can leverage each other's data to better perform federated learning. However, prior clustered FL algorithms attempt to learn these distribution similarities indirectly during training, which can be quite time consuming as many rounds of federated learning may be required until the formation of clusters is stabilized. In this paper, we propose a new approach to federated learning that directly aims to efficiently identify distribution similarities among clients by analyzing the principal angles between the client data subspaces. Each client applies a truncated singular value decomposition (SVD) step on its local data in a single-shot manner to derive a small set of principal vectors, which provides a signature that succinctly captures the main characteristics of the underlying distribution.\nThis small set of principal vectors is provided to the server so that the server can directly identify distribution similarities among the clients to form clusters.\nThis is achieved by comparing the similarities of the principal angles between the client data subspaces spanned by those principal vectors. The approach provides a simple, yet effective clustered FL framework that addresses a broad range of data heterogeneity issues beyond simpler forms of Non-IIDness like label skews. Our clustered FL approach also enables convergence guarantees for non-convex objectives.", + "primary_area": "machine learning iii", + "author": "Saeed Vahidian; Mahdi Morafah; Weijia Wang; Vyacheslav Kungurtsev; Chen Chen; Mubarak Shah; Bill Lin", + "authorids": "", + "aff": "UC San Diego; UC San Diego; UC San Diego; Czech Technical University; UCF; UCF; UC San Diego", + "bibtex": "@article{Vahidian_Morafah_Wang_Kungurtsev_Chen_Shah_Lin_2023, title={Efficient Distribution Similarity Identification in Clustered Federated Learning via Principal Angles between Client Data Subspaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26197}, DOI={10.1609/aaai.v37i8.26197}, abstractNote={Clustered federated learning (FL) has been shown to produce promising results by grouping clients into clusters.\nThis is especially effective in scenarios where separate groups of clients have significant differences in the distributions of their local data. Existing clustered FL algorithms are essentially trying to group together clients with similar distributions so that clients in the same cluster can leverage each other\u2019s data to better perform federated learning. However, prior clustered FL algorithms attempt to learn these distribution similarities indirectly during training, which can be quite time consuming as many rounds of federated learning may be required until the formation of clusters is stabilized. In this paper, we propose a new approach to federated learning that directly aims to efficiently identify distribution similarities among clients by analyzing the principal angles between the client data subspaces. Each client applies a truncated singular value decomposition (SVD) step on its local data in a single-shot manner to derive a small set of principal vectors, which provides a signature that succinctly captures the main characteristics of the underlying distribution.\nThis small set of principal vectors is provided to the server so that the server can directly identify distribution similarities among the clients to form clusters.\nThis is achieved by comparing the similarities of the principal angles between the client data subspaces spanned by those principal vectors. The approach provides a simple, yet effective clustered FL framework that addresses a broad range of data heterogeneity issues beyond simpler forms of Non-IIDness like label skews. Our clustered FL approach also enables convergence guarantees for non-convex objectives.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vahidian, Saeed and Morafah, Mahdi and Wang, Weijia and Kungurtsev, Vyacheslav and Chen, Chen and Shah, Mubarak and Lin, Bill}, year={2023}, month={Jun.}, pages={10043-10052} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26197/25969", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26197", + "pdf_size": 1431887, + "gs_citation": 69, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6710355156901507827&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "ucsd.com;ucsd.com;ucsd.com;fel.cvut.cz;crcv.ucf.edu;crcv.ucf.edu;ucsd.com", + "email": "ucsd.com;ucsd.com;ucsd.com;fel.cvut.cz;crcv.ucf.edu;crcv.ucf.edu;ucsd.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;2;2;0", + "aff_unique_norm": "University of California, San Diego;Czech Technical University;University of Central Florida", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsd.edu;https://www.cvut.cz;https://www.ucf.edu", + "aff_unique_abbr": "UCSD;CTU;UCF", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "San Diego;", + "aff_country_unique_index": "0;0;0;1;0;0;0", + "aff_country_unique": "United States;Czech Republic" + }, + { + "id": "article-27024", + "title": "Efficient Dynamic Batch Adaptation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this paper we introduce Efficient Dynamic Batch Adaptation (EDBA), which improves on a previous method that works by adjusting the composition and the size of the current batch. Our improvements allow for Dynamic Batch Adaptation to feasibly scale up for bigger models and datasets, drastically improving model convergence and generalization. We show how the method is still able to perform especially well in data-scarce scenarios, managing to obtain a test accuracy on 100 samples of CIFAR-10 of 90.68%, while the baseline only reaches 23.79%. On the full CIFAR-10 dataset, EDBA reaches convergence in \u223c120 epochs while the baseline requires \u223c300 epochs.", + "primary_area": "", + "author": "Cristian Simionescu; George Stoica", + "authorids": "", + "aff": "\u201dAlexandru Ioan Cuza\u201d University; \u201dAlexandru Ioan Cuza\u201d University", + "bibtex": "@article{Simionescu_Stoica_2024, title={Efficient Dynamic Batch Adaptation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27024}, DOI={10.1609/aaai.v37i13.27024}, abstractNote={In this paper we introduce Efficient Dynamic Batch Adaptation (EDBA), which improves on a previous method that works by adjusting the composition and the size of the current batch. Our improvements allow for Dynamic Batch Adaptation to feasibly scale up for bigger models and datasets, drastically improving model convergence and generalization. We show how the method is still able to perform especially well in data-scarce scenarios, managing to obtain a test accuracy on 100 samples of CIFAR-10 of 90.68%, while the baseline only reaches 23.79%. On the full CIFAR-10 dataset, EDBA reaches convergence in \u223c120 epochs while the baseline requires \u223c300 epochs.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Simionescu, Cristian and Stoica, George}, year={2024}, month={Jul.}, pages={16328-16329} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27024/26796", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27024", + "pdf_size": 67177, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:3Mtyd7cZEYAJ:scholar.google.com/&scioq=Efficient+Dynamic+Batch+Adaptation+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "nexusmedia.ro;gmail.com", + "email": "nexusmedia.ro;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Alexandru Ioan Cuza University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uaic.ro", + "aff_unique_abbr": "UAIC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Romania" + }, + { + "id": "article-25330", + "title": "Efficient Edge-Preserving Multi-View Stereo Network for Depth Estimation", + "track": "main", + "status": "Technical", + "abstract": "Over the years, learning-based multi-view stereo methods have achieved great success based on their coarse-to-fine depth estimation frameworks. However, 3D CNN-based cost volume regularization inevitably leads to over-smoothing problems at object boundaries due to its smooth properties. Moreover, discrete and sparse depth hypothesis sampling exacerbates the difficulty in recovering the depth of thin structures and object boundaries. To this end, we present an Efficient edge-Preserving multi-view stereo Network (EPNet) for practical depth estimation. To keep delicate estimation at details, a Hierarchical Edge-Preserving Residual learning (HEPR) module is proposed to progressively rectify the upsampling errors and help refine multi-scale depth estimation. After that, a Cross-view Photometric Consistency (CPC) is proposed to enhance the gradient flow for detailed structures, which further boosts the estimation accuracy. Last, we design a lightweight cascade framework and inject the above two strategies into it to achieve better efficiency and performance trade-offs. Extensive experiments show that our method achieves state-of-the-art performance with fast inference speed and low memory usage. Notably, our method tops the first place on challenging Tanks and Temples advanced dataset and ETH3D high-res benchmark among all published learning-based methods. Code will be available at https://github.com/susuwj/EPNet.", + "primary_area": "computer vision ii", + "author": "Wanjuan Su; Wenbing Tao", + "authorids": "", + "aff": "National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology, China; National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology, China", + "bibtex": "@article{Su_Tao_2023, title={Efficient Edge-Preserving Multi-View Stereo Network for Depth Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25330}, DOI={10.1609/aaai.v37i2.25330}, abstractNote={Over the years, learning-based multi-view stereo methods have achieved great success based on their coarse-to-fine depth estimation frameworks. However, 3D CNN-based cost volume regularization inevitably leads to over-smoothing problems at object boundaries due to its smooth properties. Moreover, discrete and sparse depth hypothesis sampling exacerbates the difficulty in recovering the depth of thin structures and object boundaries. To this end, we present an Efficient edge-Preserving multi-view stereo Network (EPNet) for practical depth estimation. To keep delicate estimation at details, a Hierarchical Edge-Preserving Residual learning (HEPR) module is proposed to progressively rectify the upsampling errors and help refine multi-scale depth estimation. After that, a Cross-view Photometric Consistency (CPC) is proposed to enhance the gradient flow for detailed structures, which further boosts the estimation accuracy. Last, we design a lightweight cascade framework and inject the above two strategies into it to achieve better efficiency and performance trade-offs. Extensive experiments show that our method achieves state-of-the-art performance with fast inference speed and low memory usage. Notably, our method tops the first place on challenging Tanks and Temples advanced dataset and ETH3D high-res benchmark among all published learning-based methods. Code will be available at https://github.com/susuwj/EPNet.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Wanjuan and Tao, Wenbing}, year={2023}, month={Jun.}, pages={2348-2356} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25330/25102", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25330", + "pdf_size": 2488359, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17858263158492725615&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff_domain": "hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn", + "github": "https://github.com/susuwj/EPNet", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Arti\ufb01cial Intelligence and Automation", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25588", + "title": "Efficient Embeddings of Logical Variables for Query Answering over Incomplete Knowledge Graphs", + "track": "main", + "status": "Technical", + "abstract": "The problem of answering complex First-order Logic queries over incomplete knowledge graphs is receiving growing attention in the literature. A promising recent approach to this problem has been to exploit neural link predictors, which can be effective in identifying individual missing triples in the incomplete graph, in order to efficiently answer complex queries. A crucial advantage of this approach over other methods is that it does not require example answers to complex queries for training, as it relies only on the availability of a trained link predictor for the knowledge graph at hand. This approach, however, can be computationally expensive during inference, and cannot deal with queries involving negation. \n\nIn this paper, we propose a novel approach that addresses all of these limitations. Experiments on established benchmark datasets demonstrate that our approach offers superior performance while significantly reducing inference times.", + "primary_area": "data mining and knowledge management", + "author": "Dingmin Wang; Yeyuan Chen; Bernardo Cuenca Grau", + "authorids": "", + "aff": "Department of Computer Science, University of Oxford, UK; The School of Computer Science and Technology, Xi\u2019an Jiaotong University, China; Department of Computer Science, University of Oxford, UK", + "bibtex": "@article{Wang_Chen_Cuenca Grau_2023, title={Efficient Embeddings of Logical Variables for Query Answering over Incomplete Knowledge Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25588}, DOI={10.1609/aaai.v37i4.25588}, abstractNote={The problem of answering complex First-order Logic queries over incomplete knowledge graphs is receiving growing attention in the literature. A promising recent approach to this problem has been to exploit neural link predictors, which can be effective in identifying individual missing triples in the incomplete graph, in order to efficiently answer complex queries. A crucial advantage of this approach over other methods is that it does not require example answers to complex queries for training, as it relies only on the availability of a trained link predictor for the knowledge graph at hand. This approach, however, can be computationally expensive during inference, and cannot deal with queries involving negation. In this paper, we propose a novel approach that addresses all of these limitations. Experiments on established benchmark datasets demonstrate that our approach offers superior performance while significantly reducing inference times.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Dingmin and Chen, Yeyuan and Cuenca Grau, Bernardo}, year={2023}, month={Jun.}, pages={4652-4659} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25588/25360", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25588", + "pdf_size": 333531, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3904665116052332840&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "cs.ox.ac.uk;gmail.com;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;gmail.com;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Oxford;Xi'an Jiaotong University", + "aff_unique_dep": "Department of Computer Science;School of Computer Science and Technology", + "aff_unique_url": "https://www.ox.ac.uk;http://en.xjtu.edu.cn/", + "aff_unique_abbr": "Oxford;XJTU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Xi'an", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "article-25296", + "title": "Efficient End-to-End Video Question Answering with Pyramidal Multimodal Transformer", + "track": "main", + "status": "Technical", + "abstract": "This paper presents a new method for end-to-end Video Question Answering (VideoQA), aside from the current popularity of using large-scale pre-training with huge feature extractors. We achieve this with a pyramidal multimodal transformer (PMT) model, which simply incorporates a learnable word embedding layer, a few convolutional and transformer layers. We use the anisotropic pyramid to fulfill video-language interactions across different spatio-temporal scales. In addition to the canonical pyramid, which includes both bottom-up and top-down pathways with lateral connections, novel strategies are proposed to decompose the visual feature stream into spatial and temporal sub-streams at different scales and implement their interactions with the linguistic semantics while preserving the integrity of local and global semantics. We demonstrate better or on-par performances with high computational efficiency against state-of-the-art methods on five VideoQA benchmarks. Our ablation study shows the scalability of our model that achieves competitive results for text-to-video retrieval by leveraging feature extractors with reusable pre-trained weights, and also the effectiveness of the pyramid. Code available at: https://github.com/Trunpm/PMT-AAAI23.", + "primary_area": "computer vision ii", + "author": "Min Peng; Chongyang Wang; Yu Shi; Xiang-Dong Zhou", + "authorids": "", + "aff": "Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences+University of Chinese Academy of Sciences; Tsinghua University; Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences; Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences", + "bibtex": "@article{Peng_Wang_Shi_Zhou_2023, title={Efficient End-to-End Video Question Answering with Pyramidal Multimodal Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25296}, DOI={10.1609/aaai.v37i2.25296}, abstractNote={This paper presents a new method for end-to-end Video Question Answering (VideoQA), aside from the current popularity of using large-scale pre-training with huge feature extractors. We achieve this with a pyramidal multimodal transformer (PMT) model, which simply incorporates a learnable word embedding layer, a few convolutional and transformer layers. We use the anisotropic pyramid to fulfill video-language interactions across different spatio-temporal scales. In addition to the canonical pyramid, which includes both bottom-up and top-down pathways with lateral connections, novel strategies are proposed to decompose the visual feature stream into spatial and temporal sub-streams at different scales and implement their interactions with the linguistic semantics while preserving the integrity of local and global semantics. We demonstrate better or on-par performances with high computational efficiency against state-of-the-art methods on five VideoQA benchmarks. Our ablation study shows the scalability of our model that achieves competitive results for text-to-video retrieval by leveraging feature extractors with reusable pre-trained weights, and also the effectiveness of the pyramid. Code available at: https://github.com/Trunpm/PMT-AAAI23.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Min and Wang, Chongyang and Shi, Yu and Zhou, Xiang-Dong}, year={2023}, month={Jun.}, pages={2038-2046} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25296/25068", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25296", + "pdf_size": 2650288, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12684535809925304342&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cigit.ac.cn;gmail.com;cigit.ac.cn;cigit.ac.cn", + "email": "cigit.ac.cn;gmail.com;cigit.ac.cn;cigit.ac.cn", + "github": "https://github.com/Trunpm/PMT-AAAI23", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tsinghua University", + "aff_unique_dep": "Institute of Green and Intelligent Technology;;", + "aff_unique_url": "http://www.cas.cn/;http://www.ucas.ac.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "CAS;UCAS;THU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chongqing;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26451", + "title": "Efficient Enumeration of Markov Equivalent DAGs", + "track": "main", + "status": "Technical", + "abstract": "Enumerating the directed acyclic graphs (DAGs) of a Markov equivalence class (MEC) is an important primitive in causal analysis. The central resource from the perspective of computational complexity is the delay, that is, the time an algorithm that lists all members of the class requires between two consecutive outputs. Commonly used algorithms for this task utilize the rules proposed by Meek (1995) or the transformational characterization by Chickering (1995), both resulting in superlinear delay. In this paper, we present the first linear-time delay algorithm. On the theoretical side, we show that our algorithm can be generalized to enumerate DAGs represented by models that incorporate background knowledge, such as MPDAGs; on the practical side, we provide an efficient implementation and evaluate it in a series of experiments. Complementary to the linear-time delay algorithm, we also provide intriguing insights into Markov equivalence itself: All members of an MEC can be enumerated such that two successive DAGs have structural Hamming distance at most three.", + "primary_area": "reasoning under uncertainty", + "author": "Marcel Wien\u00f6bst; Malte Luttermann; Max Bannach; Maciej Liskiewicz", + "authorids": "", + "aff": "Institute for Theoretical Computer Science, University of L\u00fcbeck, Germany; Institute of Information Systems, University of L\u00fcbeck, Germany; Institute for Theoretical Computer Science, University of L\u00fcbeck, Germany; Institute for Theoretical Computer Science, University of L\u00fcbeck, Germany", + "bibtex": "@article{Wien\u00f6bst_Luttermann_Bannach_Liskiewicz_2023, title={Efficient Enumeration of Markov Equivalent DAGs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26451}, DOI={10.1609/aaai.v37i10.26451}, abstractNote={Enumerating the directed acyclic graphs (DAGs) of a Markov equivalence class (MEC) is an important primitive in causal analysis. The central resource from the perspective of computational complexity is the delay, that is, the time an algorithm that lists all members of the class requires between two consecutive outputs. Commonly used algorithms for this task utilize the rules proposed by Meek (1995) or the transformational characterization by Chickering (1995), both resulting in superlinear delay. In this paper, we present the first linear-time delay algorithm. On the theoretical side, we show that our algorithm can be generalized to enumerate DAGs represented by models that incorporate background knowledge, such as MPDAGs; on the practical side, we provide an efficient implementation and evaluate it in a series of experiments. Complementary to the linear-time delay algorithm, we also provide intriguing insights into Markov equivalence itself: All members of an MEC can be enumerated such that two successive DAGs have structural Hamming distance at most three.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wien\u00f6bst, Marcel and Luttermann, Malte and Bannach, Max and Liskiewicz, Maciej}, year={2023}, month={Jun.}, pages={12313-12320} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26451/26223", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26451", + "pdf_size": 147315, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4924047046941720898&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tcs.uni-luebeck.de;ifis.uni-luebeck.de;tcs.uni-luebeck.de;tcs.uni-luebeck.de", + "email": "tcs.uni-luebeck.de;ifis.uni-luebeck.de;tcs.uni-luebeck.de;tcs.uni-luebeck.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of L\u00fcbeck", + "aff_unique_dep": "Institute for Theoretical Computer Science", + "aff_unique_url": "https://www.uni-luebeck.de", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26224", + "title": "Efficient Exploration in Resource-Restricted Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In many real-world applications of reinforcement learning (RL), performing actions requires consuming certain types of resources that are non-replenishable in each episode. Typical applications include robotic control with limited energy and video games with consumable items. In tasks with non-replenishable resources, we observe that popular RL methods such as soft actor critic suffer from poor sample efficiency. The major reason is that, they tend to exhaust resources fast and thus the subsequent exploration is severely restricted due to the absence of resources. To address this challenge, we first formalize the aforementioned problem as a resource-restricted reinforcement learning, and then propose a novel resource-aware exploration bonus (RAEB) to make reasonable usage of resources. An appealing feature of RAEB is that, it can significantly reduce unnecessary resource-consuming trials while effectively encouraging the agent to explore unvisited states. Experiments demonstrate that the proposed RAEB significantly outperforms state-of-the-art exploration strategies in resource-restricted reinforcement learning environments, improving the sample efficiency by up to an order of magnitude.", + "primary_area": "machine learning iii", + "author": "Zhihai Wang; Taoxing Pan; Qi Zhou; Jie Wang", + "authorids": "", + "aff": "CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center", + "bibtex": "@article{Wang_Pan_Zhou_Wang_2023, title={Efficient Exploration in Resource-Restricted Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26224}, DOI={10.1609/aaai.v37i8.26224}, abstractNote={In many real-world applications of reinforcement learning (RL), performing actions requires consuming certain types of resources that are non-replenishable in each episode. Typical applications include robotic control with limited energy and video games with consumable items. In tasks with non-replenishable resources, we observe that popular RL methods such as soft actor critic suffer from poor sample efficiency. The major reason is that, they tend to exhaust resources fast and thus the subsequent exploration is severely restricted due to the absence of resources. To address this challenge, we first formalize the aforementioned problem as a resource-restricted reinforcement learning, and then propose a novel resource-aware exploration bonus (RAEB) to make reasonable usage of resources. An appealing feature of RAEB is that, it can significantly reduce unnecessary resource-consuming trials while effectively encouraging the agent to explore unvisited states. Experiments demonstrate that the proposed RAEB significantly outperforms state-of-the-art exploration strategies in resource-restricted reinforcement learning environments, improving the sample efficiency by up to an order of magnitude.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhihai and Pan, Taoxing and Zhou, Qi and Wang, Jie}, year={2023}, month={Jun.}, pages={10279-10287} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26224/25996", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26224", + "pdf_size": 1746281, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8605341849696733876&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "University of Science and Technology of China;Hefei Comprehensive National Science Center", + "aff_unique_dep": "CAS Key Laboratory of Technology in GIPAS;Institute of Artificial Intelligence", + "aff_unique_url": "http://www.ustc.edu.cn/;http://www.hfcn.edu.cn", + "aff_unique_abbr": "USTC;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26225", + "title": "Efficient Explorative Key-Term Selection Strategies for Conversational Contextual Bandits", + "track": "main", + "status": "Technical", + "abstract": "Conversational contextual bandits elicit user preferences by occasionally querying for explicit feedback on key-terms to accelerate learning. However, there are aspects of existing approaches which limit their performance. First, information gained from key-term-level conversations and arm-level recommendations is not appropriately incorporated to speed up learning. Second, it is important to ask explorative key-terms to quickly elicit the user's potential interests in various domains to accelerate the convergence of user preference estimation, which has never been considered in existing works. To tackle these issues, we first propose ``ConLinUCB\", a general framework for conversational bandits with better information incorporation, combining arm-level and key-term-level feedback to estimate user preference in one step at each time. Based on this framework, we further design two bandit algorithms with explorative key-term selection strategies, ConLinUCB-BS and ConLinUCB-MCR. We prove tighter regret upper bounds of our proposed algorithms. Particularly, ConLinUCB-BS achieves a better regret bound than the previous result. Extensive experiments on synthetic and real-world data show significant advantages of our algorithms in learning accuracy (up to 54% improvement) and computational efficiency (up to 72% improvement), compared to the classic ConUCB algorithm, showing the potential benefit to recommender systems.", + "primary_area": "machine learning iii", + "author": "Zhiyong Wang; Xutong Liu; Shuai Li; John C. S. Lui", + "authorids": "", + "aff": "The Chinese University of Hong Kong, Hong Kong SAR, China; The Chinese University of Hong Kong, Hong Kong SAR, China; Shanghai Jiao Tong University, Shanghai, China; The Chinese University of Hong Kong, Hong Kong SAR, China", + "bibtex": "@article{Wang_Liu_Li_Lui_2023, title={Efficient Explorative Key-Term Selection Strategies for Conversational Contextual Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26225}, DOI={10.1609/aaai.v37i8.26225}, abstractNote={Conversational contextual bandits elicit user preferences by occasionally querying for explicit feedback on key-terms to accelerate learning. However, there are aspects of existing approaches which limit their performance. First, information gained from key-term-level conversations and arm-level recommendations is not appropriately incorporated to speed up learning. Second, it is important to ask explorative key-terms to quickly elicit the user\u2019s potential interests in various domains to accelerate the convergence of user preference estimation, which has never been considered in existing works. To tackle these issues, we first propose ``ConLinUCB", a general framework for conversational bandits with better information incorporation, combining arm-level and key-term-level feedback to estimate user preference in one step at each time. Based on this framework, we further design two bandit algorithms with explorative key-term selection strategies, ConLinUCB-BS and ConLinUCB-MCR. We prove tighter regret upper bounds of our proposed algorithms. Particularly, ConLinUCB-BS achieves a better regret bound than the previous result. Extensive experiments on synthetic and real-world data show significant advantages of our algorithms in learning accuracy (up to 54% improvement) and computational efficiency (up to 72% improvement), compared to the classic ConUCB algorithm, showing the potential benefit to recommender systems.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhiyong and Liu, Xutong and Li, Shuai and Lui, John C. S.}, year={2023}, month={Jun.}, pages={10288-10295} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26225/25997", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26225", + "pdf_size": 7757414, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6609727980936559744&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;sjtu.edu.cn;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;sjtu.edu.cn;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Shanghai Jiao Tong University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.sjtu.edu.cn", + "aff_unique_abbr": "CUHK;SJTU", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Hong Kong SAR;Shanghai", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25808", + "title": "Efficient Extraction of EL-Ontology Deductive Modules", + "track": "main", + "status": "Technical", + "abstract": "Because widely used real-world ontologies are often complex and large, one important challenge has emerged: designing tools for users to focus on sub-ontologies corresponding to their specific interests. To this end, various modules have been introduced to provide concise ontology views. This work concentrates on extracting deductive modules that preserve logical entailment over a given vocabulary. Existing deductive module proposals are either inefficient from a computing point of view or unsatisfactory from a quality point of view because the modules extracted are not concise enough. For example, minimal modules guarantee the most concise results, but their computation is highly time-consuming, while \u22a5\u22a4\u2217-modules are easy to compute but usually contain many redundant items. To overcome computation cost and lack of quality, we propose to compute two kinds of deductive modules called pseudo-minimal modules and complete modules for EL-ontology. Our deductive module definitions rely on associating a tree representation with an ontology, and their computation is based on SAT encoding. Our experiments on real-world ontologies show that our pseudo-minimal modules are indeed minimal modules in almost all cases (98.9%), and computing pseudo-minimal modules is more efficient (99.79 times faster on average) than the state-of-the-art method Zoom for computing minimal modules. Also, our complete modules are more compact than \u22a5\u22a4\u2217-modules, but their computation time remains comparable. Finally, note that our proposal applies to EL-ontologies while Zoom only works for EL-terminologies.", + "primary_area": "knowledge representation and reasoning", + "author": "Hui Yang; Yue Ma; Nicole Bidoit", + "authorids": "", + "aff": "LISN, CNRS, Universit \u00b4e Paris-Saclay; LISN, CNRS, Universit \u00b4e Paris-Saclay; LISN, CNRS, Universit \u00b4e Paris-Saclay", + "bibtex": "@article{Yang_Ma_Bidoit_2023, title={Efficient Extraction of EL-Ontology Deductive Modules}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25808}, DOI={10.1609/aaai.v37i5.25808}, abstractNote={Because widely used real-world ontologies are often complex and large, one important challenge has emerged: designing tools for users to focus on sub-ontologies corresponding to their specific interests. To this end, various modules have been introduced to provide concise ontology views. This work concentrates on extracting deductive modules that preserve logical entailment over a given vocabulary. Existing deductive module proposals are either inefficient from a computing point of view or unsatisfactory from a quality point of view because the modules extracted are not concise enough. For example, minimal modules guarantee the most concise results, but their computation is highly time-consuming, while \u22a5\u22a4\u2217-modules are easy to compute but usually contain many redundant items. To overcome computation cost and lack of quality, we propose to compute two kinds of deductive modules called pseudo-minimal modules and complete modules for EL-ontology. Our deductive module definitions rely on associating a tree representation with an ontology, and their computation is based on SAT encoding. Our experiments on real-world ontologies show that our pseudo-minimal modules are indeed minimal modules in almost all cases (98.9%), and computing pseudo-minimal modules is more efficient (99.79 times faster on average) than the state-of-the-art method Zoom for computing minimal modules. Also, our complete modules are more compact than \u22a5\u22a4\u2217-modules, but their computation time remains comparable. Finally, note that our proposal applies to EL-ontologies while Zoom only works for EL-terminologies.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Hui and Ma, Yue and Bidoit, Nicole}, year={2023}, month={Jun.}, pages={6575-6582} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25808/25580", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25808", + "pdf_size": 238115, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15410949153126610555&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "lisn.fr;lisn.fr;lisn.fr", + "email": "lisn.fr;lisn.fr;lisn.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Universit \u00b4e Paris-Saclay", + "aff_unique_dep": "LISN", + "aff_unique_url": "https://www.universite-paris-saclay.fr", + "aff_unique_abbr": "UPSa", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26473", + "title": "Efficient Gradient Approximation Method for Constrained Bilevel Optimization", + "track": "main", + "status": "Technical", + "abstract": "Bilevel optimization has been developed for many machine learning tasks with large-scale and high-dimensional data. This paper considers a constrained bilevel optimization problem, where the lower-level optimization problem is convex with equality and inequality constraints and the upper-level optimization problem is non-convex. The overall objective function is non-convex and non-differentiable. To solve the problem, we develop a gradient-based approach, called gradient approximation method, which determines the descent direction by computing several representative gradients of the objective function inside a neighborhood of the current estimate. We show that the algorithm asymptotically converges to the set of Clarke stationary points, and demonstrate the efficacy of the algorithm by the experiments on hyperparameter optimization and meta-learning.", + "primary_area": "search and optimization", + "author": "Siyuan Xu; Minghui Zhu", + "authorids": "", + "aff": "School of Electrical Engineering and Computer Science, The Pennsylvania State University, University Park, USA; School of Electrical Engineering and Computer Science, The Pennsylvania State University, University Park, USA", + "bibtex": "@article{Xu_Zhu_2023, title={Efficient Gradient Approximation Method for Constrained Bilevel Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26473}, DOI={10.1609/aaai.v37i10.26473}, abstractNote={Bilevel optimization has been developed for many machine learning tasks with large-scale and high-dimensional data. This paper considers a constrained bilevel optimization problem, where the lower-level optimization problem is convex with equality and inequality constraints and the upper-level optimization problem is non-convex. The overall objective function is non-convex and non-differentiable. To solve the problem, we develop a gradient-based approach, called gradient approximation method, which determines the descent direction by computing several representative gradients of the objective function inside a neighborhood of the current estimate. We show that the algorithm asymptotically converges to the set of Clarke stationary points, and demonstrate the efficacy of the algorithm by the experiments on hyperparameter optimization and meta-learning.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Siyuan and Zhu, Minghui}, year={2023}, month={Jun.}, pages={12509-12517} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26473/26245", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26473", + "pdf_size": 6342660, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3547812939410938566&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "psu.edu;psu.edu", + "email": "psu.edu;psu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Pennsylvania State University", + "aff_unique_dep": "School of Electrical Engineering and Computer Science", + "aff_unique_url": "https://www.psu.edu", + "aff_unique_abbr": "PSU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "University Park", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25359", + "title": "Efficient Image Captioning for Edge Devices", + "track": "main", + "status": "Technical", + "abstract": "Recent years have witnessed the rapid progress of image captioning. However, the demands for large memory storage and heavy computational burden prevent these captioning models from being deployed on mobile devices. The main obstacles lie in the heavyweight visual feature extractors (i.e., object detectors) and complicated cross-modal fusion networks. To this end, we propose LightCap, a lightweight image captioner for resource-limited devices. The core design is built on the recent CLIP model for efficient image captioning. To be specific, on the one hand, we leverage the CLIP model to extract the compact grid features without relying on the time-consuming object detectors. On the other hand, we transfer the image-text retrieval design of CLIP to image captioning scenarios by devising a novel visual concept extractor and a cross-modal modulator. We further optimize the cross-modal fusion model and parallel prediction heads via sequential and ensemble distillations. With the carefully designed architecture, our model merely contains 40M parameters, saving the model size by more than 75% and the FLOPs by more than 98% in comparison with the current state-of-the-art methods. In spite of the low capacity, our model still exhibits state-of-the-art performance on prevalent datasets, e.g., 136.6 CIDEr on COCO Karpathy test split. Testing on the smartphone with only a single CPU, the proposed LightCap exhibits a fast inference speed of 188ms per image, which is ready for practical applications.", + "primary_area": "computer vision ii", + "author": "Ning Wang; Jiangrong Xie; Hang Luo; Qinglin Cheng; Jihao Wu; Mingbo Jia; Linlin Li", + "authorids": "", + "aff": "Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.; Huawei Inc.", + "bibtex": "@article{Wang_Xie_Luo_Cheng_Wu_Jia_Li_2023, title={Efficient Image Captioning for Edge Devices}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25359}, DOI={10.1609/aaai.v37i2.25359}, abstractNote={Recent years have witnessed the rapid progress of image captioning. However, the demands for large memory storage and heavy computational burden prevent these captioning models from being deployed on mobile devices. The main obstacles lie in the heavyweight visual feature extractors (i.e., object detectors) and complicated cross-modal fusion networks. To this end, we propose LightCap, a lightweight image captioner for resource-limited devices. The core design is built on the recent CLIP model for efficient image captioning. To be specific, on the one hand, we leverage the CLIP model to extract the compact grid features without relying on the time-consuming object detectors. On the other hand, we transfer the image-text retrieval design of CLIP to image captioning scenarios by devising a novel visual concept extractor and a cross-modal modulator. We further optimize the cross-modal fusion model and parallel prediction heads via sequential and ensemble distillations. With the carefully designed architecture, our model merely contains 40M parameters, saving the model size by more than 75% and the FLOPs by more than 98% in comparison with the current state-of-the-art methods. In spite of the low capacity, our model still exhibits state-of-the-art performance on prevalent datasets, e.g., 136.6 CIDEr on COCO Karpathy test split. Testing on the smartphone with only a single CPU, the proposed LightCap exhibits a fast inference speed of 188ms per image, which is ready for practical applications.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Ning and Xie, Jiangrong and Luo, Hang and Cheng, Qinglin and Wu, Jihao and Jia, Mingbo and Li, Linlin}, year={2023}, month={Jun.}, pages={2608-2616} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25359/25131", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25359", + "pdf_size": 468216, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11086372530701215768&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;foxmail.com; flhjeremy;outlook.com; fwujihao; jiamingbo;huawei.com", + "email": "mail.ustc.edu.cn;foxmail.com; flhjeremy;outlook.com; fwujihao; jiamingbo;huawei.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Huawei", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huawei.com", + "aff_unique_abbr": "Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25157", + "title": "Efficient Mirror Detection via Multi-Level Heterogeneous Learning", + "track": "main", + "status": "Technical", + "abstract": "We present HetNet (Multi-level Heterogeneous Network), a highly efficient mirror detection network. Current mirror detection methods focus more on performance than efficiency, limiting the real-time applications (such as drones). Their lack of efficiency is aroused by the common design of adopting homogeneous modules at different levels, which ignores the difference between different levels of features. In contrast, HetNet detects potential mirror regions initially through low-level understandings (e.g., intensity contrasts) and then combines with high-level understandings (contextual discontinuity for instance) to finalize the predictions. To perform accurate yet efficient mirror detection, HetNet follows an effective architecture that obtains specific information at different stages to detect mirrors. We further propose a multi-orientation intensity-based contrasted module (MIC) and a reflection semantic logical module (RSL), equipped on HetNet, to predict potential mirror regions by low-level understandings and analyze semantic logic in scenarios by high-level understandings, respectively. Compared to the state-of-the-art method, HetNet runs 664% faster and draws an average performance gain of 8.9% on MAE, 3.1% on IoU, and 2.0% on F-measure on two mirror detection benchmarks. The code is available at https://github.com/Catherine-R-He/HetNet.", + "primary_area": "computer vision i", + "author": "Ruozhen He; Jiaying Lin; Rynson W.H. Lau", + "authorids": "", + "aff": "Department of Computer Science, City University of Hong Kong; Department of Computer Science, City University of Hong Kong; Department of Computer Science, City University of Hong Kong", + "bibtex": "@article{He_Lin_W.H. Lau_2023, title={Efficient Mirror Detection via Multi-Level Heterogeneous Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25157}, DOI={10.1609/aaai.v37i1.25157}, abstractNote={We present HetNet (Multi-level Heterogeneous Network), a highly efficient mirror detection network. Current mirror detection methods focus more on performance than efficiency, limiting the real-time applications (such as drones). Their lack of efficiency is aroused by the common design of adopting homogeneous modules at different levels, which ignores the difference between different levels of features. In contrast, HetNet detects potential mirror regions initially through low-level understandings (e.g., intensity contrasts) and then combines with high-level understandings (contextual discontinuity for instance) to finalize the predictions. To perform accurate yet efficient mirror detection, HetNet follows an effective architecture that obtains specific information at different stages to detect mirrors. We further propose a multi-orientation intensity-based contrasted module (MIC) and a reflection semantic logical module (RSL), equipped on HetNet, to predict potential mirror regions by low-level understandings and analyze semantic logic in scenarios by high-level understandings, respectively. Compared to the state-of-the-art method, HetNet runs 664% faster and draws an average performance gain of 8.9% on MAE, 3.1% on IoU, and 2.0% on F-measure on two mirror detection benchmarks. The code is available at https://github.com/Catherine-R-He/HetNet.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Ruozhen and Lin, Jiaying and W.H. Lau, Rynson}, year={2023}, month={Jun.}, pages={790-798} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25157/24929", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25157", + "pdf_size": 2039047, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17841507894775435897&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "my.cityu.edu.hk;my.cityu.edu.hk;cityu.edu.hk", + "email": "my.cityu.edu.hk;my.cityu.edu.hk;cityu.edu.hk", + "github": "https://github.com/Catherine-R-He/HetNet", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "City University of Hong Kong", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.cityu.edu.hk", + "aff_unique_abbr": "CityU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26919", + "title": "Efficient Non-parametric Neural Density Estimation and Its Application to Outlier and Anomaly Detection", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "The main goal of this thesis is to develop efficient non-parametric density estimation methods that can be integrated with deep learning architectures, for instance, convolutional neural networks and transformers. Density estimation methods can be applied to different problems in statistics and machine learning. They may be used to solve tasks such as anomaly detection, generative models, semi-supervised learning, compression, text-to-speech, among others. The present work will mainly focus on the application of the method in anomaly and outlier detection tasks such as medical anomaly detection, fraud detection, video surveillance, time series anomaly detection, industrial damage detection, among others. A recent approach to non-parametric density estimation is neural density estimation. One advantage of these methods is that they can be integrated with deep learning architectures and trained using gradient descent. Most of these methods are based on neural network implementations of normalizing flows which transform an original simpler distribution to a more complex one. The approach of this thesis is based on a different idea that combines random Fourier features with density matrices to estimate the underlying distribution function. The method can be seen as an approximation of the popular kernel density estimation method but without the inherent computational cost.", + "primary_area": "", + "author": "Joseph A. Gallego-Mejia", + "authorids": "", + "aff": "Universidad Nacional de Colombia", + "bibtex": "@article{Gallego-Mejia_2024, title={Efficient Non-parametric Neural Density Estimation and Its Application to Outlier and Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26919}, DOI={10.1609/aaai.v37i13.26919}, abstractNote={The main goal of this thesis is to develop efficient non-parametric density estimation methods that can be integrated with deep learning architectures, for instance, convolutional neural networks and transformers. Density estimation methods can be applied to different problems in statistics and machine learning. They may be used to solve tasks such as anomaly detection, generative models, semi-supervised learning, compression, text-to-speech, among others. The present work will mainly focus on the application of the method in anomaly and outlier detection tasks such as medical anomaly detection, fraud detection, video surveillance, time series anomaly detection, industrial damage detection, among others. A recent approach to non-parametric density estimation is neural density estimation. One advantage of these methods is that they can be integrated with deep learning architectures and trained using gradient descent. Most of these methods are based on neural network implementations of normalizing flows which transform an original simpler distribution to a more complex one. The approach of this thesis is based on a different idea that combines random Fourier features with density matrices to estimate the underlying distribution function. The method can be seen as an approximation of the popular kernel density estimation method but without the inherent computational cost.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gallego-Mejia, Joseph A.}, year={2024}, month={Jul.}, pages={16117-16118} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26919/26691", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26919", + "pdf_size": 59394, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:IsUSGHPkXmoJ:scholar.google.com/&scioq=Efficient+Non-parametric+Neural+Density+Estimation+and+Its+Application+to+Outlier+and+Anomaly+Detection&hl=en&as_sdt=0,6", + "gs_version_total": 5, + "aff_domain": "unal.edu.co", + "email": "unal.edu.co", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Universidad Nacional de Colombia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unal.edu.co", + "aff_unique_abbr": "UNAL", + "aff_country_unique_index": "0", + "aff_country_unique": "Colombia" + }, + { + "id": "article-26258", + "title": "Efficient Top-K Feature Selection Using Coordinate Descent Method", + "track": "main", + "status": "Technical", + "abstract": "Sparse learning based feature selection has been widely investigated in recent years. In this study, we focus on the l2,0-norm based feature selection, which is effective for exact top-k feature selection but challenging to optimize. To solve the general l2,0-norm constrained problems, we novelly develop a parameter-free optimization framework based on the coordinate descend (CD) method, termed CD-LSR. Specifically, we devise a skillful conversion from the original problem to solving one continuous matrix and one discrete selection matrix. Then the nontrivial l2,0-norm constraint can be solved efficiently by solving the selection matrix with CD method. We impose the l2,0-norm on a vanilla least square regression (LSR) model for feature selection and optimize it with CD-LSR. Extensive experiments exhibit the efficiency of CD-LSR, as well as the discrimination ability of l2,0-norm to identify informative features. More importantly, the versatility of CD-LSR facilitates the applications of the l2,0-norm in more sophisticated models. Based on the competitive performance of l2,0-norm on the baseline LSR model, the satisfactory performance of its applications is reasonably expected. The source MATLAB code are available at: https://github.com/solerxl/Code_For_AAAI_2023.", + "primary_area": "machine learning iv", + "author": "Lei Xu; Rong Wang; Feiping Nie; Xuelong Li", + "authorids": "", + "aff": "School of Computer Science, Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China+School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China; School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China; School of Computer Science, Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China+School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China; School of Artificial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University, Xi\u2019an 710072, P.R. China", + "bibtex": "@article{Xu_Wang_Nie_Li_2023, title={Efficient Top-K Feature Selection Using Coordinate Descent Method}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26258}, DOI={10.1609/aaai.v37i9.26258}, abstractNote={Sparse learning based feature selection has been widely investigated in recent years. In this study, we focus on the l2,0-norm based feature selection, which is effective for exact top-k feature selection but challenging to optimize. To solve the general l2,0-norm constrained problems, we novelly develop a parameter-free optimization framework based on the coordinate descend (CD) method, termed CD-LSR. Specifically, we devise a skillful conversion from the original problem to solving one continuous matrix and one discrete selection matrix. Then the nontrivial l2,0-norm constraint can be solved efficiently by solving the selection matrix with CD method. We impose the l2,0-norm on a vanilla least square regression (LSR) model for feature selection and optimize it with CD-LSR. Extensive experiments exhibit the efficiency of CD-LSR, as well as the discrimination ability of l2,0-norm to identify informative features. More importantly, the versatility of CD-LSR facilitates the applications of the l2,0-norm in more sophisticated models. Based on the competitive performance of l2,0-norm on the baseline LSR model, the satisfactory performance of its applications is reasonably expected. The source MATLAB code are available at: https://github.com/solerxl/Code_For_AAAI_2023.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Lei and Wang, Rong and Nie, Feiping and Li, Xuelong}, year={2023}, month={Jun.}, pages={10594-10601} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26258/26030", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26258", + "pdf_size": 429864, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6357314558397983960&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com; ; ; ", + "email": "gmail.com; ; ; ", + "github": "https://github.com/solerxl/Code ForAAAI 2023", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0;0+0;0", + "aff_unique_norm": "Northwestern Polytechnical University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.nwpu.edu.cn", + "aff_unique_abbr": "NPU", + "aff_campus_unique_index": "0+0;0;0+0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26836", + "title": "Efficient Training of Large-Scale Industrial Fault Diagnostic Models through Federated Opportunistic Block Dropout", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Artificial intelligence (AI)-empowered industrial fault diagnostics is important in ensuring the safe operation of industrial applications. Since complex industrial systems often involve multiple industrial plants (possibly belonging to different companies or subsidiaries) with sensitive data collected and stored in a distributed manner, collaborative fault diagnostic model training often needs to leverage federated learning (FL). As the scale of the industrial fault diagnostic models are often large and communication channels in such systems are often not exclusively used for FL model training, existing deployed FL model training frameworks cannot train such models efficiently across multiple institutions. In this paper, we report our experience developing and deploying the Federated Opportunistic Block Dropout (FedOBD) approach for industrial fault diagnostic model training. By decomposing large-scale models into semantic blocks and enabling FL participants to opportunistically upload selected important blocks in a quantized manner, it significantly reduces the communication overhead while maintaining model performance. Since its deployment in ENN Group in February 2022, FedOBD has served two coal chemical plants across two cities in China to build industrial fault prediction models. It helped the company reduce the training communication overhead by over 70% compared to its previous AI Engine, while maintaining model performance at over 85% test F1 score. To our knowledge, it is the first successfully deployed dropout-based FL approach.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Yuanyuan Chen; Zichen Chen; Sheng Guo; Yansong Zhao; Zelei Liu; Pengcheng Wu; Chengyi Yang; Zengxiang Li; Han Yu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanyang Technological University, Singapore + University of California, Santa Barbara, CA, USA; School of Computer Science and Engineering, Nanyang Technological University, Singapore + University of California, Santa Barbara, CA, USA; Digital Research Institute, ENN Group, Beijing, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; Digital Research Institute, ENN Group, Beijing, China; Digital Research Institute, ENN Group, Beijing, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Chen_Chen_Guo_Zhao_Liu_Wu_Yang_Li_Yu_2024, title={Efficient Training of Large-Scale Industrial Fault Diagnostic Models through Federated Opportunistic Block Dropout}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26836}, DOI={10.1609/aaai.v37i13.26836}, abstractNote={Artificial intelligence (AI)-empowered industrial fault diagnostics is important in ensuring the safe operation of industrial applications. Since complex industrial systems often involve multiple industrial plants (possibly belonging to different companies or subsidiaries) with sensitive data collected and stored in a distributed manner, collaborative fault diagnostic model training often needs to leverage federated learning (FL). As the scale of the industrial fault diagnostic models are often large and communication channels in such systems are often not exclusively used for FL model training, existing deployed FL model training frameworks cannot train such models efficiently across multiple institutions. In this paper, we report our experience developing and deploying the Federated Opportunistic Block Dropout (FedOBD) approach for industrial fault diagnostic model training. By decomposing large-scale models into semantic blocks and enabling FL participants to opportunistically upload selected important blocks in a quantized manner, it significantly reduces the communication overhead while maintaining model performance. Since its deployment in ENN Group in February 2022, FedOBD has served two coal chemical plants across two cities in China to build industrial fault prediction models. It helped the company reduce the training communication overhead by over 70% compared to its previous AI Engine, while maintaining model performance at over 85% test F1 score. To our knowledge, it is the first successfully deployed dropout-based FL approach.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Yuanyuan and Chen, Zichen and Guo, Sheng and Zhao, Yansong and Liu, Zelei and Wu, Pengcheng and Yang, Chengyi and Li, Zengxiang and Yu, Han}, year={2024}, month={Jul.}, pages={15485-15493} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26836/26608", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26836", + "pdf_size": 1546318, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10485228414247681168&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "enn.cn;ntu.edu.sg; ; ; ; ; ; ;", + "email": "enn.cn;ntu.edu.sg; ; ; ; ; ; ;", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0+1;2;0;0;0;2;2;0", + "aff_unique_norm": "Nanyang Technological University;University of California, Santa Barbara;ENN Group", + "aff_unique_dep": "School of Computer Science and Engineering;;Digital Research Institute", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.ucsb.edu;https://www.enn.cn", + "aff_unique_abbr": "NTU;UCSB;", + "aff_campus_unique_index": "0+1;0+1;2;0;0;0;2;2;0", + "aff_campus_unique": "Singapore;Santa Barbara;Beijing", + "aff_country_unique_index": "0+1;0+1;2;0;0;0;2;2;0", + "aff_country_unique": "Singapore;United States;China" + }, + { + "id": "article-26114", + "title": "Efficient and Accurate Learning of Mixtures of Plackett-Luce Models", + "track": "main", + "status": "Technical", + "abstract": "Mixture models of Plackett-Luce (PL), one of the most fundamental ranking models, are an active research area of both theoretical and practical significance. Most previously proposed parameter estimation algorithms instantiate the EM algorithm, often with random initialization. However, such an initialization scheme may not yield a good initial estimate and the algorithms require multiple restarts, incurring a large time complexity. As for the EM procedure, while the E-step can be performed efficiently, maximizing the log-likelihood in the M-step is difficult due to the combinatorial nature of the PL likelihood function. Therefore, previous authors favor algorithms that maximize surrogate likelihood functions. However, the final estimate may deviate from the true maximum likelihood estimate as a consequence. In this paper, we address these known limitations. We propose an initialization algorithm that can provide a provably accurate initial estimate and an EM algorithm that maximizes the true log-likelihood function efficiently. Experiments on both synthetic and real datasets show that our algorithm is competitive in terms of accuracy and speed to baseline algorithms, especially on datasets with a large number of items.", + "primary_area": "machine learning iii", + "author": "Duc Nguyen; Anderson Y. Zhang", + "authorids": "", + "aff": "Depart of Computer and Information Science, University of Pennsylvania; Department of Statistics and Data Science, University of Pennsylvania", + "bibtex": "@article{Nguyen_Zhang_2023, title={Efficient and Accurate Learning of Mixtures of Plackett-Luce Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26114}, DOI={10.1609/aaai.v37i8.26114}, abstractNote={Mixture models of Plackett-Luce (PL), one of the most fundamental ranking models, are an active research area of both theoretical and practical significance. Most previously proposed parameter estimation algorithms instantiate the EM algorithm, often with random initialization. However, such an initialization scheme may not yield a good initial estimate and the algorithms require multiple restarts, incurring a large time complexity. As for the EM procedure, while the E-step can be performed efficiently, maximizing the log-likelihood in the M-step is difficult due to the combinatorial nature of the PL likelihood function. Therefore, previous authors favor algorithms that maximize surrogate likelihood functions. However, the final estimate may deviate from the true maximum likelihood estimate as a consequence. In this paper, we address these known limitations. We propose an initialization algorithm that can provide a provably accurate initial estimate and an EM algorithm that maximizes the true log-likelihood function efficiently. Experiments on both synthetic and real datasets show that our algorithm is competitive in terms of accuracy and speed to baseline algorithms, especially on datasets with a large number of items.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Duc and Zhang, Anderson Y.}, year={2023}, month={Jun.}, pages={9294-9301} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26114/25886", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26114", + "pdf_size": 282422, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6683832552576134391&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "seas.upenn.edu;wharton.upenn.edu", + "email": "seas.upenn.edu;wharton.upenn.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "Department of Computer and Information Science", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26471", + "title": "Electrophysiological Brain Source Imaging via Combinatorial Search with Provable Optimality", + "track": "main", + "status": "Technical", + "abstract": "Electrophysiological Source Imaging (ESI) refers to reconstructing the underlying brain source activation from non-invasive Electroencephalography (EEG) and Magnetoencephalography (MEG) measurements on the scalp. Estimating the source locations and their extents is a fundamental tool in clinical and neuroscience applications. However, the estimation is challenging because of the ill-posedness and high coherence in the leadfield matrix as well as the noise in the EEG/MEG data. In this work, we proposed a combinatorial search framework to address the ESI problem with a provable optimality guarantee. Specifically, by exploiting the graph neighborhood information in the brain source space, we converted the ESI problem into a graph search problem and designed a combinatorial search algorithm under the framework of A* to solve it. The proposed algorithm is guaranteed to give an optimal solution to the ESI problem. Experimental results on both synthetic data and real epilepsy EEG data demonstrated that the proposed algorithm could faithfully reconstruct the source activation in the brain.", + "primary_area": "search and optimization", + "author": "Guihong Wan; Meng Jiao; Xinglong Ju; Yu Zhang; Haim Schweitzer; Feng Liu", + "authorids": "", + "aff": "Massachusetts General Hospital, Harvard Medical School; School of Systems and Enterprises, Stevens Institute of Technology; Division of Management Information Systems, The University of Oklahoma; Department of Bioengineering, Lehigh University; Department of Computer Science, The University of Texas at Dallas; School of Systems and Enterprises, Stevens Institute of Technology", + "bibtex": "@article{Wan_Jiao_Ju_Zhang_Schweitzer_Liu_2023, title={Electrophysiological Brain Source Imaging via Combinatorial Search with Provable Optimality}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26471}, DOI={10.1609/aaai.v37i10.26471}, abstractNote={Electrophysiological Source Imaging (ESI) refers to reconstructing the underlying brain source activation from non-invasive Electroencephalography (EEG) and Magnetoencephalography (MEG) measurements on the scalp. Estimating the source locations and their extents is a fundamental tool in clinical and neuroscience applications. However, the estimation is challenging because of the ill-posedness and high coherence in the leadfield matrix as well as the noise in the EEG/MEG data. In this work, we proposed a combinatorial search framework to address the ESI problem with a provable optimality guarantee. Specifically, by exploiting the graph neighborhood information in the brain source space, we converted the ESI problem into a graph search problem and designed a combinatorial search algorithm under the framework of A* to solve it. The proposed algorithm is guaranteed to give an optimal solution to the ESI problem. Experimental results on both synthetic data and real epilepsy EEG data demonstrated that the proposed algorithm could faithfully reconstruct the source activation in the brain.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Guihong and Jiao, Meng and Ju, Xinglong and Zhang, Yu and Schweitzer, Haim and Liu, Feng}, year={2023}, month={Jun.}, pages={12491-12499} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26471/26243", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26471", + "pdf_size": 8076396, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4914430763123575203&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mgh.harvard.edu;stevens.edu;ou.edu;lehigh.edu;utdallas.edu;stevens.edu", + "email": "mgh.harvard.edu;stevens.edu;ou.edu;lehigh.edu;utdallas.edu;stevens.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;1", + "aff_unique_norm": "Massachusetts General Hospital;Stevens Institute of Technology;The University of Oklahoma;Lehigh University;The University of Texas at Dallas", + "aff_unique_dep": "Harvard Medical School;School of Systems and Enterprises;Division of Management Information Systems;Department of Bioengineering;Department of Computer Science", + "aff_unique_url": "https://www.massgeneral.org;https://www.stevens.edu;https://www.ou.edu;https://www.lehigh.edu;https://www.utdallas.edu", + "aff_unique_abbr": "MGH;SIT;OU;Lehigh;UT Dallas", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Dallas", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25898", + "title": "Eliciting Structural and Semantic Global Knowledge in Unsupervised Graph Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Graph Contrastive Learning (GCL) has recently drawn much research interest for learning generalizable node representations in a self-supervised manner. In general, the contrastive learning process in GCL is performed on top of the representations learned by a graph neural network (GNN) backbone, which transforms and propagates the node contextual information based on its local neighborhoods. However, nodes sharing similar characteristics may not always be geographically close, which poses a great challenge for unsupervised GCL efforts due to their inherent limitations in capturing such global graph knowledge. In this work, we address their inherent limitations by proposing a simple yet effective framework -- Simple Neural Networks with Structural and Semantic Contrastive Learning} (S^3-CL). Notably, by virtue of the proposed structural and semantic contrastive learning algorithms, even a simple neural network can learn expressive node representations that preserve valuable global structural and semantic patterns. Our experiments demonstrate that the node representations learned by S^3-CL) achieve superior performance on different downstream tasks compared with the state-of-the-art unsupervised GCL methods. Implementation and more experimental details are publicly available at https://github.com/kaize0409/S-3-CL.", + "primary_area": "machine learning i", + "author": "Kaize Ding; Yancheng Wang; Yingzhen Yang; Huan Liu", + "authorids": "", + "aff": "Arizona State University, School of Computing and Augmented Intelligence; Arizona State University, School of Computing and Augmented Intelligence; Arizona State University, School of Computing and Augmented Intelligence; Arizona State University, School of Computing and Augmented Intelligence", + "bibtex": "@article{Ding_Wang_Yang_Liu_2023, title={Eliciting Structural and Semantic Global Knowledge in Unsupervised Graph Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25898}, DOI={10.1609/aaai.v37i6.25898}, abstractNote={Graph Contrastive Learning (GCL) has recently drawn much research interest for learning generalizable node representations in a self-supervised manner. In general, the contrastive learning process in GCL is performed on top of the representations learned by a graph neural network (GNN) backbone, which transforms and propagates the node contextual information based on its local neighborhoods. However, nodes sharing similar characteristics may not always be geographically close, which poses a great challenge for unsupervised GCL efforts due to their inherent limitations in capturing such global graph knowledge. In this work, we address their inherent limitations by proposing a simple yet effective framework -- Simple Neural Networks with Structural and Semantic Contrastive Learning} (S^3-CL). Notably, by virtue of the proposed structural and semantic contrastive learning algorithms, even a simple neural network can learn expressive node representations that preserve valuable global structural and semantic patterns. Our experiments demonstrate that the node representations learned by S^3-CL) achieve superior performance on different downstream tasks compared with the state-of-the-art unsupervised GCL methods. Implementation and more experimental details are publicly available at https://github.com/kaize0409/S-3-CL.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Kaize and Wang, Yancheng and Yang, Yingzhen and Liu, Huan}, year={2023}, month={Jun.}, pages={7378-7386} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25898/25670", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25898", + "pdf_size": 4894606, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11131925586278641266&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "asu.edu;asu.edu;asu.edu;asu.edu", + "email": "asu.edu;asu.edu;asu.edu;asu.edu", + "github": "https://github.com/kaize0409/S-3-CL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "School of Computing and Augmented Intelligence", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25528", + "title": "Eliminating the Impossible, Whatever Remains Must Be True: On Extracting and Applying Background Knowledge in the Context of Formal Explanations", + "track": "main", + "status": "Technical", + "abstract": "The rise of AI methods to make predictions and decisions has led to a pressing need for more explainable artificial intelligence (XAI) methods. One common approach for XAI is to produce a post-hoc explanation, explaining why a black box ML model made a certain prediction. Formal approaches to post-hoc explanations provide succinct reasons for why a prediction was made, as well as why not another prediction was made. But these approaches assume that features are independent and uniformly distributed. While this means that \u201cwhy\u201d explanations are correct, they may be longer than required. It also means the \u201cwhy not\u201d explanations may be suspect as the counterexamples they rely on may not be meaningful. In this paper, we show how one can apply background knowledge to give more succinct \u201cwhy\u201d formal explanations, that are presumably easier to interpret by humans, and give more accurate \u201cwhy not\u201d explanations. In addition, we show how to use existing rule induction techniques to efficiently extract background information from a dataset.", + "primary_area": "constraint satisfaction and optimization", + "author": "Jinqiang Yu; Alexey Ignatiev; Peter J. Stuckey; Nina Narodytska; Joao Marques-Silva", + "authorids": "", + "aff": "Monash University, Melbourne, Australia+ARC Training Centre in OPTIMA, Melbourne, Australia; Monash University, Melbourne, Australia+ARC Training Centre in OPTIMA, Melbourne, Australia; Monash University, Melbourne, Australia+ARC Training Centre in OPTIMA, Melbourne, Australia; VMWare Research, Palo Alto, USA; IRIT, CNRS, Toulouse, France", + "bibtex": "@article{Yu_Ignatiev_Stuckey_Narodytska_Marques-Silva_2023, title={Eliminating the Impossible, Whatever Remains Must Be True: On Extracting and Applying Background Knowledge in the Context of Formal Explanations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25528}, DOI={10.1609/aaai.v37i4.25528}, abstractNote={The rise of AI methods to make predictions and decisions has led to a pressing need for more explainable artificial intelligence (XAI) methods. One common approach for XAI is to produce a post-hoc explanation, explaining why a black box ML model made a certain prediction. Formal approaches to post-hoc explanations provide succinct reasons for why a prediction was made, as well as why not another prediction was made. But these approaches assume that features are independent and uniformly distributed. While this means that \u201cwhy\u201d explanations are correct, they may be longer than required. It also means the \u201cwhy not\u201d explanations may be suspect as the counterexamples they rely on may not be meaningful. In this paper, we show how one can apply background knowledge to give more succinct \u201cwhy\u201d formal explanations, that are presumably easier to interpret by humans, and give more accurate \u201cwhy not\u201d explanations. In addition, we show how to use existing rule induction techniques to efficiently extract background information from a dataset.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Jinqiang and Ignatiev, Alexey and Stuckey, Peter J. and Narodytska, Nina and Marques-Silva, Joao}, year={2023}, month={Jun.}, pages={4123-4131} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25528/25300", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25528", + "pdf_size": 1095124, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5894646911995364400&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "monash.edu;monash.edu;monash.edu;vmware.com;irit.fr", + "email": "monash.edu;monash.edu;monash.edu;vmware.com;irit.fr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;3", + "aff_unique_norm": "Monash University;ARC Training Centre in OPTIMA;VMware Research;Institut de Recherche en Informatique de Toulouse", + "aff_unique_dep": ";;Research;", + "aff_unique_url": "https://www.monash.edu;;https://www.vmware.com/research.html;https://www.irit.fr", + "aff_unique_abbr": "Monash;;VMware;IRIT", + "aff_campus_unique_index": "0+0;0+0;0+0;1;2", + "aff_campus_unique": "Melbourne;Palo Alto;Toulouse", + "aff_country_unique_index": "0+0;0+0;0+0;1;2", + "aff_country_unique": "Australia;United States;France" + }, + { + "id": "article-26867", + "title": "Embedding a Long Short-Term Memory Network in a Constraint Programming Framework for Tomato Greenhouse Optimisation", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Increasing global food demand, accompanied by the limited number of expert growers, brings the need for more sustainable and efficient horticulture. The controlled environment of greenhouses enable data collection and precise control. For optimally controlling the greenhouse climate, a grower not only looks at crop production, but rather aims at maximising the profit. However this is a complex, long term optimisation task. In this paper, Constraint Programming (CP) is applied to task of optimal greenhouse economic control, by leveraging a learned greenhouse climate model through a CP embedding. In collaboration with an industrial partner, we demonstrate how to model the greenhouse climate with an LSTM model, embed this LSTM into a CP optimisation framework, and optimise the expected profit of the grower. This data-to-decision pipeline is being integrated into a decision support system for multiple greenhouses in the Netherlands.", + "primary_area": "emerging applications of ai", + "author": "Dirk van Bokkem; Max van den Hemel; Sebastijan Duman\u010di\u0107; Neil Yorke-Smith", + "authorids": "", + "aff": "Delft University of Technology, Delft, The Netherlands+Delphy B.V., Bleiswijk, The Netherlands; Delphy B.V., Bleiswijk, The Netherlands; Delft University of Technology, Delft, The Netherlands; Delft University of Technology, Delft, The Netherlands", + "bibtex": "@article{van Bokkem_van den Hemel_Duman\u010di\u0107_Yorke-Smith_2024, title={Embedding a Long Short-Term Memory Network in a Constraint Programming Framework for Tomato Greenhouse Optimisation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26867}, DOI={10.1609/aaai.v37i13.26867}, abstractNote={Increasing global food demand, accompanied by the limited number of expert growers, brings the need for more sustainable and efficient horticulture. The controlled environment of greenhouses enable data collection and precise control. For optimally controlling the greenhouse climate, a grower not only looks at crop production, but rather aims at maximising the profit. However this is a complex, long term optimisation task. In this paper, Constraint Programming (CP) is applied to task of optimal greenhouse economic control, by leveraging a learned greenhouse climate model through a CP embedding. In collaboration with an industrial partner, we demonstrate how to model the greenhouse climate with an LSTM model, embed this LSTM into a CP optimisation framework, and optimise the expected profit of the grower. This data-to-decision pipeline is being integrated into a decision support system for multiple greenhouses in the Netherlands.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={van Bokkem, Dirk and van den Hemel, Max and Duman\u010di\u0107, Sebastijan and Yorke-Smith, Neil}, year={2024}, month={Jul.}, pages={15731-15737} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26867/26639", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26867", + "pdf_size": 561509, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10116869383039864169&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "live.nl;delphy.nl;tudelft.nl;tudelft.nl", + "email": "live.nl;delphy.nl;tudelft.nl;tudelft.nl", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;0;0", + "aff_unique_norm": "Delft University of Technology;Delphy B.V.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tudelft.nl;", + "aff_unique_abbr": "TUDelft;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Delft;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-26928", + "title": "Embodied, Intelligent Communication for Multi-Agent Cooperation", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "High-performing human teams leverage intelligent and efficient communication and coordination strategies to collaboratively maximize their joint utility. Inspired by teaming behaviors among humans, I seek to develop computational methods for synthesizing intelligent communication and coordination strategies for collaborative multi-robot systems. I leverage both classical model-based control and planning approaches as well as data-driven methods such as Multi-Agent Reinforcement Learning (MARL) to provide several contributions towards enabling emergent cooperative teaming behavior across both homogeneous and heterogeneous (including agents with different capabilities) robot teams.", + "primary_area": "", + "author": "Esmaeil Seraj", + "authorids": "", + "aff": "Georgia Institute of Technology", + "bibtex": "@article{Seraj_2024, title={Embodied, Intelligent Communication for Multi-Agent Cooperation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26928}, DOI={10.1609/aaai.v37i13.26928}, abstractNote={High-performing human teams leverage intelligent and efficient communication and coordination strategies to collaboratively maximize their joint utility. Inspired by teaming behaviors among humans, I seek to develop computational methods for synthesizing intelligent communication and coordination strategies for collaborative multi-robot systems. I leverage both classical model-based control and planning approaches as well as data-driven methods such as Multi-Agent Reinforcement Learning (MARL) to provide several contributions towards enabling emergent cooperative teaming behavior across both homogeneous and heterogeneous (including agents with different capabilities) robot teams.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Seraj, Esmaeil}, year={2024}, month={Jul.}, pages={16135-16136} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26928/26700", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26928", + "pdf_size": 56525, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17440268284612167026&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gatech.edu", + "email": "gatech.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26383", + "title": "Emergence of Punishment in Social Dilemma with Environmental Feedback", + "track": "main", + "status": "Technical", + "abstract": "Altruistic punishment (or punishment) has been extensively shown as an important mechanism for promoting cooperation in human societies. In AI, the emergence of punishment has received much recent interest. In this paper, we contribute with a novel evolutionary game theoretic model to study the impacts of environmental feedback. Whereas a population of agents plays public goods games, there exists a third-party population whose payoffs depend not only on whether to punish or not, but also on the state of the environment (e.g., how cooperative the agents in a social dilemma are). Focusing on one-shot public goods games, we show that environmental feedback, by itself, can lead to the emergence of punishment. We analyze the co-evolution of punishment and cooperation, and derive conditions for their co-presence, co-dominance and co-extinction. Moreover, we show that the system can exhibit bistability as well as cyclic dynamics. Our findings provide a new explanation for the emergence of punishment. On the other hand, our results also alert the need for careful design of implementing punishment in multi-agent systems, as the resulting evolutionary dynamics can be somewhat complex.", + "primary_area": "multiagent systems", + "author": "Zhen Wang; Zhao Song; Chen Shen; Shuyue Hu", + "authorids": "", + "aff": "School of Mechanical Engineering, Northwestern Polytechnical University + School of Artifcial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University; School of Mechanical Engineering, Northwestern Polytechnical University + School of Artifcial Intelligence, OPtics and ElectroNics (iOPEN), Northwestern Polytechnical University; Faculty of Engineering Sciences, Kyushu University; Shanghai Artifcial Intelligence Laboratory", + "bibtex": "@article{Wang_Song_Shen_Hu_2023, title={Emergence of Punishment in Social Dilemma with Environmental Feedback}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26383}, DOI={10.1609/aaai.v37i10.26383}, abstractNote={Altruistic punishment (or punishment) has been extensively shown as an important mechanism for promoting cooperation in human societies. In AI, the emergence of punishment has received much recent interest. In this paper, we contribute with a novel evolutionary game theoretic model to study the impacts of environmental feedback. Whereas a population of agents plays public goods games, there exists a third-party population whose payoffs depend not only on whether to punish or not, but also on the state of the environment (e.g., how cooperative the agents in a social dilemma are). Focusing on one-shot public goods games, we show that environmental feedback, by itself, can lead to the emergence of punishment. We analyze the co-evolution of punishment and cooperation, and derive conditions for their co-presence, co-dominance and co-extinction. Moreover, we show that the system can exhibit bistability as well as cyclic dynamics. Our findings provide a new explanation for the emergence of punishment. On the other hand, our results also alert the need for careful design of implementing punishment in multi-agent systems, as the resulting evolutionary dynamics can be somewhat complex.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhen and Song, Zhao and Shen, Chen and Hu, Shuyue}, year={2023}, month={Jun.}, pages={11708-11716} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26383/26155", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26383", + "pdf_size": 1226514, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17116331596538957852&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "nwpu.edu.cn;mail.nwpu.edu.cn; ;pjlab.org.cn", + "email": "nwpu.edu.cn;mail.nwpu.edu.cn; ;pjlab.org.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1;2", + "aff_unique_norm": "Northwestern Polytechnical University;Kyushu University;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "School of Mechanical Engineering;Faculty of Engineering Sciences;", + "aff_unique_url": "https://www.nwpu.edu.cn;https://www.kyushu-u.ac.jp;http://www.shanghaiai.cn", + "aff_unique_abbr": "NWPU;Kyushu U;SHAIC", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-26363", + "title": "Emergent Quantized Communication", + "track": "main", + "status": "Technical", + "abstract": "The field of emergent communication aims to understand the characteristics of communication as it emerges from artificial agents solving tasks that require information exchange. Communication with discrete messages is considered a desired characteristic, for scientific and applied reasons. However, training a multi-agent system with discrete communication is not straightforward, requiring either reinforcement learning algorithms or relaxing the discreteness requirement via a continuous approximation such as the Gumbel-softmax. Both these solutions result in poor performance compared to fully continuous communication. In this work, we propose an alternative approach to achieve discrete communication -- quantization of communicated message. Using message quantization allows us to train the model end-to-end, achieving superior performance in multiple setups. Moreover, quantization is a natural framework that runs the gamut from continuous to discrete communication. Thus, it sets the ground for a broader view of multi-agent communication in the deep learning era.", + "primary_area": "multiagent systems", + "author": "Boaz Carmeli; Ron Meir; Yonatan Belinkov", + "authorids": "", + "aff": "Technion \u2013 Israel Institute of Technology; Technion \u2013 Israel Institute of Technology; Technion \u2013 Israel Institute of Technology", + "bibtex": "@article{Carmeli_Meir_Belinkov_2023, title={Emergent Quantized Communication}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26363}, DOI={10.1609/aaai.v37i10.26363}, abstractNote={The field of emergent communication aims to understand the characteristics of communication as it emerges from artificial agents solving tasks that require information exchange. Communication with discrete messages is considered a desired characteristic, for scientific and applied reasons. However, training a multi-agent system with discrete communication is not straightforward, requiring either reinforcement learning algorithms or relaxing the discreteness requirement via a continuous approximation such as the Gumbel-softmax. Both these solutions result in poor performance compared to fully continuous communication. In this work, we propose an alternative approach to achieve discrete communication -- quantization of communicated message. Using message quantization allows us to train the model end-to-end, achieving superior performance in multiple setups. Moreover, quantization is a natural framework that runs the gamut from continuous to discrete communication. Thus, it sets the ground for a broader view of multi-agent communication in the deep learning era.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carmeli, Boaz and Meir, Ron and Belinkov, Yonatan}, year={2023}, month={Jun.}, pages={11533-11541} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26363/26135", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26363", + "pdf_size": 286516, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2723602116838146284&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "campus.technion.ac.il;ee.technion.ac.il;technion.ac.il", + "email": "campus.technion.ac.il;ee.technion.ac.il;technion.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technion \u2013 Israel Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.technion.ac.il/en/", + "aff_unique_abbr": "Technion", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26911", + "title": "Emotion-Aware Music Recommendation", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "It is common to listen to songs that match one's mood. Thus, an AI music recommendation system that is aware of the user's emotions is likely to provide a superior user experience to one that is unaware. In this paper, we present an emotion-aware music recommendation system. Multiple models are discussed and evaluated for affect identification from a live image of the user. We propose two models: DRViT, which applies dynamic routing to vision transformers, and InvNet50, which uses involution. All considered models are trained and evaluated on the AffectNet dataset. Each model outputs the user's estimated valence and arousal under the circumplex model of affect. These values are compared to the valence and arousal values for songs in a Spotify dataset, and the top-five closest-matching songs are presented to the user. Experimental results of the models and user testing are presented.", + "primary_area": "", + "author": "Hieu Tran; Tuan Le; Anh Do; Tram Vu; Steven Bogaerts; Brian Howard", + "authorids": "", + "aff": "DePauw University; DePauw University; DePauw University; DePauw University; DePauw University; DePauw University", + "bibtex": "@article{Tran_Le_Do_Vu_Bogaerts_Howard_2024, title={Emotion-Aware Music Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26911}, DOI={10.1609/aaai.v37i13.26911}, abstractNote={It is common to listen to songs that match one\u2019s mood. Thus, an AI music recommendation system that is aware of the user\u2019s emotions is likely to provide a superior user experience to one that is unaware. In this paper, we present an emotion-aware music recommendation system. Multiple models are discussed and evaluated for affect identification from a live image of the user. We propose two models: DRViT, which applies dynamic routing to vision transformers, and InvNet50, which uses involution. All considered models are trained and evaluated on the AffectNet dataset. Each model outputs the user\u2019s estimated valence and arousal under the circumplex model of affect. These values are compared to the valence and arousal values for songs in a Spotify dataset, and the top-five closest-matching songs are presented to the user. Experimental results of the models and user testing are presented.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tran, Hieu and Le, Tuan and Do, Anh and Vu, Tram and Bogaerts, Steven and Howard, Brian}, year={2024}, month={Jul.}, pages={16087-16095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26911/26683", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26911", + "pdf_size": 786779, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4465680324610832964&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "depauw.edu;depauw.edu;depauw.edu;depauw.edu;depauw.edu;depauw.edu", + "email": "depauw.edu;depauw.edu;depauw.edu;depauw.edu;depauw.edu;depauw.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "DePauw University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.depauw.edu", + "aff_unique_abbr": "DePauw", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25959", + "title": "Enabling Knowledge Refinement upon New Concepts in Abductive Learning", + "track": "main", + "status": "Technical", + "abstract": "Recently there are great efforts on leveraging machine learning and logical reasoning. Many approaches start from a given knowledge base, and then try to utilize the knowledge to help machine learning. In real practice, however, the given knowledge base can often be incomplete or even noisy, and thus, it is crucial to develop the ability of knowledge refinement or enhancement. This paper proposes to enable the Abductive learning (ABL) paradigm to have the ability of knowledge refinement/enhancement. In particular, we focus on the problem that, in contrast to closed-environment tasks where a fixed set of symbols are enough to represent the concepts in the domain, in open-environment tasks new concepts may emerge. Ignoring those new concepts can lead to significant performance decay, whereas it is challenging to identify new concepts and add them to the existing knowledge base with potential conflicts resolved. We propose the ABL_nc approach which exploits machine learning in ABL to identify new concepts from data, exploits knowledge graph to match them with entities, and refines existing knowledge base to resolve conflicts. The refined/enhanced knowledge base can then be used in the next loop of ABL and help improve the performance of machine learning. Experiments on three neuro-symbolic learning tasks verified the effectiveness of the proposed approach.", + "primary_area": "machine learning ii", + "author": "Yu-Xuan Huang; Wang-Zhou Dai; Yuan Jiang; Zhi-Hua Zhou", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Huang_Dai_Jiang_Zhou_2023, title={Enabling Knowledge Refinement upon New Concepts in Abductive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25959}, DOI={10.1609/aaai.v37i7.25959}, abstractNote={Recently there are great efforts on leveraging machine learning and logical reasoning. Many approaches start from a given knowledge base, and then try to utilize the knowledge to help machine learning. In real practice, however, the given knowledge base can often be incomplete or even noisy, and thus, it is crucial to develop the ability of knowledge refinement or enhancement. This paper proposes to enable the Abductive learning (ABL) paradigm to have the ability of knowledge refinement/enhancement. In particular, we focus on the problem that, in contrast to closed-environment tasks where a fixed set of symbols are enough to represent the concepts in the domain, in open-environment tasks new concepts may emerge. Ignoring those new concepts can lead to significant performance decay, whereas it is challenging to identify new concepts and add them to the existing knowledge base with potential conflicts resolved. We propose the ABL_nc approach which exploits machine learning in ABL to identify new concepts from data, exploits knowledge graph to match them with entities, and refines existing knowledge base to resolve conflicts. The refined/enhanced knowledge base can then be used in the next loop of ABL and help improve the performance of machine learning. Experiments on three neuro-symbolic learning tasks verified the effectiveness of the proposed approach.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Yu-Xuan and Dai, Wang-Zhou and Jiang, Yuan and Zhou, Zhi-Hua}, year={2023}, month={Jun.}, pages={7928-7935} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25959/25731", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25959", + "pdf_size": 522224, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12950337836085760940&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 5, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26480", + "title": "End-to-End Deep Reinforcement Learning for Conversation Disentanglement", + "track": "main", + "status": "Technical", + "abstract": "Collaborative Communication platforms (e.g., Slack) support multi-party conversations which contain a large number of messages on shared channels. Multiple conversations intermingle within these messages. The task of conversation disentanglement is to cluster these intermingled messages into conversations. Existing approaches are trained using loss functions that optimize only local decisions, i.e. predicting reply-to links for each message and thereby creating clusters of conversations. In this work, we propose an end-to-end reinforcement learning (RL) approach that directly optimizes a global metric. We observe that using existing global metrics such as variation of information and adjusted rand index as a reward for the RL agent deteriorates its performance. This behaviour is because these metrics completely ignore the reply-to links between messages (local decisions) during reward computation. Therefore, we propose a novel thread-level reward function that captures the global metric without ignoring the local decisions. Through experiments on the Ubuntu IRC dataset, we demonstrate that the proposed RL model improves the performance on both link-level and conversation-level metrics.", + "primary_area": "speech natural language processing", + "author": "Karan Bhukar; Harshit Kumar; Dinesh Raghu; Ajay Gupta", + "authorids": "", + "aff": "IBM Research; IBM Research; IBM Research; Meta", + "bibtex": "@article{Bhukar_Kumar_Raghu_Gupta_2023, title={End-to-End Deep Reinforcement Learning for Conversation Disentanglement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26480}, DOI={10.1609/aaai.v37i11.26480}, abstractNote={Collaborative Communication platforms (e.g., Slack) support multi-party conversations which contain a large number of messages on shared channels. Multiple conversations intermingle within these messages. The task of conversation disentanglement is to cluster these intermingled messages into conversations. Existing approaches are trained using loss functions that optimize only local decisions, i.e. predicting reply-to links for each message and thereby creating clusters of conversations. In this work, we propose an end-to-end reinforcement learning (RL) approach that directly optimizes a global metric. We observe that using existing global metrics such as variation of information and adjusted rand index as a reward for the RL agent deteriorates its performance. This behaviour is because these metrics completely ignore the reply-to links between messages (local decisions) during reward computation. Therefore, we propose a novel thread-level reward function that captures the global metric without ignoring the local decisions. Through experiments on the Ubuntu IRC dataset, we demonstrate that the proposed RL model improves the performance on both link-level and conversation-level metrics.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bhukar, Karan and Kumar, Harshit and Raghu, Dinesh and Gupta, Ajay}, year={2023}, month={Jun.}, pages={12571-12579} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26480/26252", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26480", + "pdf_size": 920373, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10390593774498094531&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "ibm.com;in.ibm.com;in.ibm.com;fb.com", + "email": "ibm.com;in.ibm.com;in.ibm.com;fb.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "IBM;Meta Platforms, Inc.", + "aff_unique_dep": "IBM Research;", + "aff_unique_url": "https://www.ibm.com/research;https://meta.com", + "aff_unique_abbr": "IBM;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25534", + "title": "End-to-End Entity Linking with Hierarchical Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Entity linking (EL) is the task of linking the text segments to the referring entities in the knowledge graph, typically decomposed into mention detection, and entity disambiguation. Compared to traditional methods treating the two tasks separately, recent end-to-end entity linking methods exploit the mutual dependency between mentions and entities to achieve better performance. However, existing end-to-end EL methods have problems utilizing the dependency of mentions and entities in the task. To this end, we propose to model the EL task as a hierarchical decision-making process and design a hierarchical reinforcement learning algorithm to solve the problem. We conduct extensive experiments to show that the proposed method achieves state-of-the-art performance in several EL benchmark datasets. Our code is publicly available at https://github.com/lhlclhl/he2eel.", + "primary_area": "data mining and knowledge management", + "author": "Lihan Chen; Tinghui Zhu; Jingping Liu; Jiaqing Liang; Yanghua Xiao", + "authorids": "", + "aff": "Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; East China University of Science and Technology, Shanghai, China; School of Data Science, Fudan University, China; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University + Fudan-Aishu Cognitive Intelligence Joint Research Center", + "bibtex": "@article{Chen_Zhu_Liu_Liang_Xiao_2023, title={End-to-End Entity Linking with Hierarchical Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25534}, DOI={10.1609/aaai.v37i4.25534}, abstractNote={Entity linking (EL) is the task of linking the text segments to the referring entities in the knowledge graph, typically decomposed into mention detection, and entity disambiguation. Compared to traditional methods treating the two tasks separately, recent end-to-end entity linking methods exploit the mutual dependency between mentions and entities to achieve better performance. However, existing end-to-end EL methods have problems utilizing the dependency of mentions and entities in the task. To this end, we propose to model the EL task as a hierarchical decision-making process and design a hierarchical reinforcement learning algorithm to solve the problem. We conduct extensive experiments to show that the proposed method achieves state-of-the-art performance in several EL benchmark datasets. Our code is publicly available at https://github.com/lhlclhl/he2eel.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Lihan and Zhu, Tinghui and Liu, Jingping and Liang, Jiaqing and Xiao, Yanghua}, year={2023}, month={Jun.}, pages={4173-4181} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25534/25306", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25534", + "pdf_size": 285638, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4731049857448566709&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;fudan.edu.cn;ecust.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "gmail.com;fudan.edu.cn;ecust.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "https://github.com/lhlclhl/he2eel", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0+0", + "aff_unique_norm": "Fudan University;East China University of Science and Technology", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.fudan.edu.cn;http://www.ecust.edu.cn", + "aff_unique_abbr": "Fudan;ECUST", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25884", + "title": "End-to-End Learning for Optimization via Constraint-Enforcing Approximators", + "track": "main", + "status": "Technical", + "abstract": "In many real-world applications, predictive methods are used to provide inputs for downstream optimization problems. It has been shown that using the downstream task-based objective to learn the intermediate predictive model is often better than using only intermediate task objectives, such as prediction error. The learning task in the former approach is referred to as end-to-end learning. The difficulty in end-to-end learning lies in differentiating through the optimization problem. Therefore, we propose a neural network architecture that can learn to approximately solve these optimization problems, particularly ensuring its output satisfies the feasibility constraints via alternate projections. We show these projections converge at a geometric rate to the exact projection. Our approach is more computationally efficient than existing methods as we do not need to solve the original optimization problem at each iteration. Furthermore, our approach can be applied to a wider range of optimization problems. We apply this to a shortest path problem for which the first stage forecasting problem is a computer vision task of predicting edge costs from terrain maps, a capacitated multi-product newsvendor problem, and a maximum matching problem. We show that this method out-performs existing approaches in terms of final task-based loss and training time.", + "primary_area": "machine learning i", + "author": "Rares Cristian; Pavithra Harsha; Georgia Perakis; Brian L Quanz; Ioannis Spantidakis", + "authorids": "", + "aff": "Massachusetts Institute of Technology; IBM T. J Watson Research Center; Massachusetts Institute of Technology; IBM T. J Watson Research Center; Massachusetts Institute of Technology", + "bibtex": "@article{Cristian_Harsha_Perakis_Quanz_Spantidakis_2023, title={End-to-End Learning for Optimization via Constraint-Enforcing Approximators}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25884}, DOI={10.1609/aaai.v37i6.25884}, abstractNote={In many real-world applications, predictive methods are used to provide inputs for downstream optimization problems. It has been shown that using the downstream task-based objective to learn the intermediate predictive model is often better than using only intermediate task objectives, such as prediction error. The learning task in the former approach is referred to as end-to-end learning. The difficulty in end-to-end learning lies in differentiating through the optimization problem. Therefore, we propose a neural network architecture that can learn to approximately solve these optimization problems, particularly ensuring its output satisfies the feasibility constraints via alternate projections. We show these projections converge at a geometric rate to the exact projection. Our approach is more computationally efficient than existing methods as we do not need to solve the original optimization problem at each iteration. Furthermore, our approach can be applied to a wider range of optimization problems. We apply this to a shortest path problem for which the first stage forecasting problem is a computer vision task of predicting edge costs from terrain maps, a capacitated multi-product newsvendor problem, and a maximum matching problem. We show that this method out-performs existing approaches in terms of final task-based loss and training time.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cristian, Rares and Harsha, Pavithra and Perakis, Georgia and Quanz, Brian L and Spantidakis, Ioannis}, year={2023}, month={Jun.}, pages={7253-7260} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25884/25656", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25884", + "pdf_size": 383350, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13365720749558195101&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mit.edu;us.ibm.com;mit.edu;us.ibm.com;mit.edu", + "email": "mit.edu;us.ibm.com;mit.edu;us.ibm.com;mit.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;IBM", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.ibm.com/research/watson", + "aff_unique_abbr": "MIT;IBM", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";T. J Watson Research Center", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26870", + "title": "End-to-End Pipeline for Trigger Detection on Hit and Track Graphs", + "track": "iaai technical track", + "status": "Technical", + "abstract": "There has been a surge of interest in applying deep learning in particle and nuclear physics to replace labor-intensive offline data analysis with automated online machine learning tasks. This paper details a novel AI-enabled triggering solution for physics experiments in Relativistic Heavy Ion Collider and future Electron-Ion Collider. The triggering system consists of a comprehensive end-to-end pipeline based on Graph Neural Networks that classifies trigger events versus background events, makes online decisions to retain signal data, and enables efficient data acquisition. The triggering system first starts with the coordinates of pixel hits lit up by passing particles in the detector, applies three stages of event processing (hits clustering, track reconstruction, and trigger detection), and labels all processed events with the binary tag of trigger versus background events. By switching among different objective functions, we train the Graph Neural Networks in the pipeline to solve multiple tasks: the edge-level track reconstruction problem, the edge-level track adjacency matrix prediction, and the graph-level trigger detection problem. We propose a novel method to treat the events as track-graphs instead of hit-graphs. This method focuses on intertrack relations and is driven by underlying physics processing. As a result, it attains a solid performance (around 72% accuracy) for trigger detection and outperforms the baseline method using hit-graphs by 2% higher accuracy.", + "primary_area": "emerging applications of ai", + "author": "Tingting Xuan; Yimin Zhu; Giorgian Borca-Tasciuc; Ming Xiong Liu; Yu Sun; Cameron Dean; Yasser Corrales Morales; Zhaozhong Shi; Dantong Yu", + "authorids": "", + "aff": "Stony Brook University; Stony Brook University; Sunrise Technology Inc.; Los Alamos National Laboratory; Sunrise Technology Inc.; Los Alamos National Laboratory; Los Alamos National Laboratory; Los Alamos National Laboratory; New Jersey Institute of Technology", + "bibtex": "@article{Xuan_Zhu_Borca-Tasciuc_Liu_Sun_Dean_Morales_Shi_Yu_2024, title={End-to-End Pipeline for Trigger Detection on Hit and Track Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26870}, DOI={10.1609/aaai.v37i13.26870}, abstractNote={There has been a surge of interest in applying deep learning in particle and nuclear physics to replace labor-intensive offline data analysis with automated online machine learning tasks. This paper details a novel AI-enabled triggering solution for physics experiments in Relativistic Heavy Ion Collider and future Electron-Ion Collider. The triggering system consists of a comprehensive end-to-end pipeline based on Graph Neural Networks that classifies trigger events versus background events, makes online decisions to retain signal data, and enables efficient data acquisition. The triggering system first starts with the coordinates of pixel hits lit up by passing particles in the detector, applies three stages of event processing (hits clustering, track reconstruction, and trigger detection), and labels all processed events with the binary tag of trigger versus background events. By switching among different objective functions, we train the Graph Neural Networks in the pipeline to solve multiple tasks: the edge-level track reconstruction problem, the edge-level track adjacency matrix prediction, and the graph-level trigger detection problem. We propose a novel method to treat the events as track-graphs instead of hit-graphs. This method focuses on intertrack relations and is driven by underlying physics processing. As a result, it attains a solid performance (around 72% accuracy) for trigger detection and outperforms the baseline method using hit-graphs by 2% higher accuracy.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xuan, Tingting and Zhu, Yimin and Borca-Tasciuc, Giorgian and Liu, Ming Xiong and Sun, Yu and Dean, Cameron and Morales, Yasser Corrales and Shi, Zhaozhong and Yu, Dantong}, year={2024}, month={Jul.}, pages={15752-15758} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26870/26642", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26870", + "pdf_size": 3174341, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8001271488852043248&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "stonybrook.edu;stonybrook.edu;sunriseaitech.com;lanl.gov;sunriseaitech.com;lanl.gov;lanl.gov;lanl.gov;njit.edu", + "email": "stonybrook.edu;stonybrook.edu;sunriseaitech.com;lanl.gov;sunriseaitech.com;lanl.gov;lanl.gov;lanl.gov;njit.edu", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;2;1;2;2;2;3", + "aff_unique_norm": "Stony Brook University;Sunrise Technology Inc.;Los Alamos National Laboratory;New Jersey Institute of Technology", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.stonybrook.edu;;https://www.lanl.gov;https://www.njit.edu", + "aff_unique_abbr": "SBU;;LANL;NJIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25385", + "title": "End-to-End Zero-Shot HOI Detection via Vision and Language Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Most existing Human-Object Interaction (HOI) Detection methods rely heavily on full annotations with predefined HOI categories, which is limited in diversity and costly to scale further. We aim at advancing zero-shot HOI detection to detect both seen and unseen HOIs simultaneously. The fundamental challenges are to discover potential human-object pairs and identify novel HOI categories. To overcome the above challenges, we propose a novel End-to-end zero-shot HOI Detection (EoID) framework via vision-language knowledge distillation. We first design an Interactive Score module combined with a Two-stage Bipartite Matching algorithm to achieve interaction distinguishment for human-object pairs in an action-agnostic manner.\nThen we transfer the distribution of action probability from the pretrained vision-language teacher as well as the seen ground truth to the HOI model to attain zero-shot HOI classification. Extensive experiments on HICO-Det dataset demonstrate that our model discovers potential interactive pairs and enables the recognition of unseen HOIs. Finally, our method outperforms the previous SOTA under various zero-shot settings. Moreover, our method is generalizable to large-scale object detection data to further scale up the action sets. The source code is available at: https://github.com/mrwu-mac/EoID.", + "primary_area": "computer vision iii", + "author": "Mingrui Wu; Jiaxin Gu; Yunhang Shen; Mingbao Lin; Chao Chen; Xiaoshuai Sun", + "authorids": "", + "aff": "MAC Lab, School of Informatics, Xiamen University + Youtu Lab, Tencent; VIS, Baidu Inc.; Youtu Lab, Tencent; Youtu Lab, Tencent; Youtu Lab, Tencent; MAC Lab, School of Informatics, Xiamen University + Institute of Artificial Intelligence, Xiamen University + Fujian Engineering Research Center of Trusted Artificial Intelligence Analysis and Application, Xiamen University", + "bibtex": "@article{Wu_Gu_Shen_Lin_Chen_Sun_2023, title={End-to-End Zero-Shot HOI Detection via Vision and Language Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25385}, DOI={10.1609/aaai.v37i3.25385}, abstractNote={Most existing Human-Object Interaction (HOI) Detection methods rely heavily on full annotations with predefined HOI categories, which is limited in diversity and costly to scale further. We aim at advancing zero-shot HOI detection to detect both seen and unseen HOIs simultaneously. The fundamental challenges are to discover potential human-object pairs and identify novel HOI categories. To overcome the above challenges, we propose a novel End-to-end zero-shot HOI Detection (EoID) framework via vision-language knowledge distillation. We first design an Interactive Score module combined with a Two-stage Bipartite Matching algorithm to achieve interaction distinguishment for human-object pairs in an action-agnostic manner.\nThen we transfer the distribution of action probability from the pretrained vision-language teacher as well as the seen ground truth to the HOI model to attain zero-shot HOI classification. Extensive experiments on HICO-Det dataset demonstrate that our model discovers potential interactive pairs and enables the recognition of unseen HOIs. Finally, our method outperforms the previous SOTA under various zero-shot settings. Moreover, our method is generalizable to large-scale object detection data to further scale up the action sets. The source code is available at: https://github.com/mrwu-mac/EoID.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Mingrui and Gu, Jiaxin and Shen, Yunhang and Lin, Mingbao and Chen, Chao and Sun, Xiaoshuai}, year={2023}, month={Jun.}, pages={2839-2846} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25385/25157", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25385", + "pdf_size": 1431906, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4608053627772217831&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;baidu.com;gmail.com;outlook.com;tencent.com;xmu.edu.cn", + "email": "gmail.com;baidu.com;gmail.com;outlook.com;tencent.com;xmu.edu.cn", + "github": "https://github.com/mrwu-mac/EoID", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;1;1;1;0+0+0", + "aff_unique_norm": "Xiamen University;Tencent;Baidu Inc.", + "aff_unique_dep": "School of Informatics;Youtu Lab;VIS", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com;https://www.baidu.com", + "aff_unique_abbr": "XMU;Tencent;Baidu", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25978", + "title": "Energy-Motivated Equivariant Pretraining for 3D Molecular Graphs", + "track": "main", + "status": "Technical", + "abstract": "Pretraining molecular representation models without labels is fundamental to various applications. Conventional methods mainly process 2D molecular graphs and focus solely on 2D tasks, making their pretrained models incapable of characterizing 3D geometry and thus defective for downstream 3D tasks. In this work, we tackle 3D molecular pretraining in a complete and novel sense. In particular, we first propose to adopt an equivariant energy-based model as the backbone for pretraining, which enjoys the merits of fulfilling the symmetry of 3D space. Then we develop a node-level pretraining loss for force prediction, where we further exploit the Riemann-Gaussian distribution to ensure the loss to be E(3)-invariant, enabling more robustness. Moreover, a graph-level noise scale prediction task is also leveraged to further promote the eventual performance. We evaluate our model pretrained from a large-scale 3D dataset GEOM-QM9 on two challenging 3D benchmarks: MD17 and QM9. Experimental results demonstrate the efficacy of our method against current state-of-the-art pretraining approaches, and verify the validity of our design for each proposed component. Code is available at https://github.com/jiaor17/3D-EMGP.", + "primary_area": "machine learning ii", + "author": "Rui Jiao; Jiaqi Han; Wenbing Huang; Yu Rong; Yang Liu", + "authorids": "", + "aff": "Beijing National Research Center for Information Science and Technology (BNRist), Department of Computer Science and Technology, Tsinghua University+Institute for AI Industry Research (AIR), Tsinghua University+Beijing Academy of Artificial Intelligence; Beijing National Research Center for Information Science and Technology (BNRist), Department of Computer Science and Technology, Tsinghua University+Institute for AI Industry Research (AIR), Tsinghua University; Gaoling School of Artificial Intelligence, Renmin University of China+Beijing Key Laboratory of Big Data Management and Analysis Methods; Tencent AI Lab; Beijing National Research Center for Information Science and Technology (BNRist), Department of Computer Science and Technology, Tsinghua University+Institute for AI Industry Research (AIR), Tsinghua University+Beijing Academy of Artificial Intelligence", + "bibtex": "@article{Jiao_Han_Huang_Rong_Liu_2023, title={Energy-Motivated Equivariant Pretraining for 3D Molecular Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25978}, DOI={10.1609/aaai.v37i7.25978}, abstractNote={Pretraining molecular representation models without labels is fundamental to various applications. Conventional methods mainly process 2D molecular graphs and focus solely on 2D tasks, making their pretrained models incapable of characterizing 3D geometry and thus defective for downstream 3D tasks. In this work, we tackle 3D molecular pretraining in a complete and novel sense. In particular, we first propose to adopt an equivariant energy-based model as the backbone for pretraining, which enjoys the merits of fulfilling the symmetry of 3D space. Then we develop a node-level pretraining loss for force prediction, where we further exploit the Riemann-Gaussian distribution to ensure the loss to be E(3)-invariant, enabling more robustness. Moreover, a graph-level noise scale prediction task is also leveraged to further promote the eventual performance. We evaluate our model pretrained from a large-scale 3D dataset GEOM-QM9 on two challenging 3D benchmarks: MD17 and QM9. Experimental results demonstrate the efficacy of our method against current state-of-the-art pretraining approaches, and verify the validity of our design for each proposed component. Code is available at https://github.com/jiaor17/3D-EMGP.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiao, Rui and Han, Jiaqi and Huang, Wenbing and Rong, Yu and Liu, Yang}, year={2023}, month={Jun.}, pages={8096-8104} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25978/25750", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25978", + "pdf_size": 2330169, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18398894428450506253&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;126.com;hotmail.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;126.com;hotmail.com;tsinghua.edu.cn", + "github": "https://github.com/jiaor17/3D-EMGP", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+1;0+0;2+3;4;0+0+1", + "aff_unique_norm": "Tsinghua University;Beijing Academy of Artificial Intelligence;Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods;Tencent", + "aff_unique_dep": "Department of Computer Science and Technology;;Gaoling School of Artificial Intelligence;Big Data Management and Analysis;Tencent AI Lab", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.baaic.cn;http://www.ruc.edu.cn;;https://ai.tencent.com", + "aff_unique_abbr": "THU;BAAI;RUC;;Tencent AI Lab", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0+0;0+0;0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26828", + "title": "Enhance Robustness of Machine Learning with Improved Efficiency", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Robustness of machine learning, often referring to securing performance on different data, is always an active field due to the ubiquitous variety and diversity of data in practice. Many studies have been investigated to enhance the learning process robust in recent years. To this end, there is usually a trade-off that results in somewhat extra cost, e.g., more data samples, more complicated objective functions, more iterations to converge in optimization, etc. Then this problem boils down to finding a better trade-off under some conditions. My recent research focuses on robust machine learning with improved efficiency. Particularly, the efficiency here represents learning speed to find a model, and the number of data required to secure the robustness. In the talk, I will survey three pieces of my recent research by elaborating the algorithmic idea and theoretical analysis as technical contributions --- (i) epoch stochastic gradient descent ascent for min-max problems, (ii) stochastic optimization algorithm for non-convex inf-projection problems, and (iii) neighborhood conformal prediction. In the first two pieces of work, the proposed optimization algorithms are general and cover objective functions for robust machine learning. In the third one, I will elaborate an efficient conformal prediction algorithm that guarantee the robustness of prediction after model is trained. Particularly, the efficiency of conformal prediction is measured by its bandwidth.", + "primary_area": "", + "author": "Yan Yan", + "authorids": "", + "aff": "EECS, Washington State University", + "bibtex": "@article{Yan_2024, title={Enhance Robustness of Machine Learning with Improved Efficiency}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26828}, DOI={10.1609/aaai.v37i13.26828}, abstractNote={Robustness of machine learning, often referring to securing performance on different data, is always an active field due to the ubiquitous variety and diversity of data in practice. Many studies have been investigated to enhance the learning process robust in recent years. To this end, there is usually a trade-off that results in somewhat extra cost, e.g., more data samples, more complicated objective functions, more iterations to converge in optimization, etc. Then this problem boils down to finding a better trade-off under some conditions. My recent research focuses on robust machine learning with improved efficiency. Particularly, the efficiency here represents learning speed to find a model, and the number of data required to secure the robustness. In the talk, I will survey three pieces of my recent research by elaborating the algorithmic idea and theoretical analysis as technical contributions --- (i) epoch stochastic gradient descent ascent for min-max problems, (ii) stochastic optimization algorithm for non-convex inf-projection problems, and (iii) neighborhood conformal prediction. In the first two pieces of work, the proposed optimization algorithms are general and cover objective functions for robust machine learning. In the third one, I will elaborate an efficient conformal prediction algorithm that guarantee the robustness of prediction after model is trained. Particularly, the efficiency of conformal prediction is measured by its bandwidth.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Yan}, year={2024}, month={Jul.}, pages={15461-15461} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26828/26600", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26828", + "pdf_size": 51257, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3713686778298393621&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "wsu.edu", + "email": "wsu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Washington State University", + "aff_unique_dep": "EECS", + "aff_unique_url": "https://wsu.edu", + "aff_unique_abbr": "WSU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25532", + "title": "Enhanced Multi-Relationships Integration Graph Convolutional Network for Inferring Substitutable and Complementary Items", + "track": "main", + "status": "Technical", + "abstract": "Understanding the relationships between items can improve the accuracy and interpretability of recommender systems. Among these relationships, the substitute and complement relationships attract the most attention in e-commerce platforms. The substitutable items are interchangeable and might be compared with each other before purchasing, while the complementary items are used in conjunction and are usually bought together with the query item. In this paper, we focus on two issues of inferring the substitutable and complementary items: 1) how to model their mutual influence to improve the performance of downstream tasks, 2) how to further discriminate them by considering the strength of relationship for different item pairs. We propose a novel multi-task learning framework named Enhanced Multi-Relationships Integration Graph Convolutional Network (EMRIGCN). We regard the relationship inference task as a link prediction task in heterogeneous graph with different types of edges between nodes (items). To model the mutual influence between substitute and complement, EMRIGCN adopts a two-level integration module, i.e., feature and structure integration, based on experts sharing mechanism during message passing. To obtain the strength of relationship for item pairs, we build an auxiliary loss function to further increase or decrease the distances between embeddings of items with weak or strong relation in latent space. Extensive experiments on both public and industrial datasets prove that EMRIGCN significantly outperforms the state-of-the-art solutions. We also conducted A/B tests on real world recommender systems of Meituan Maicai, an online supermarket platform in China, and obtained 15.3% improvement on VBR and 15.34% improvement on RPM.", + "primary_area": "data mining and knowledge management", + "author": "Huajie Chen; Jiyuan He; Weisheng Xu; Tao Feng; Ming Liu; Tianyu Song; Runfeng Yao; Yuanyuan Qiao", + "authorids": "", + "aff": "Meituan Group, Beijing, China; Meituan Group, Beijing, China; Meituan Group, Beijing, China + Beijing University of Posts and Telecommunications, Beijing, China; Meituan Group, Beijing, China; Meituan Group, Beijing, China; Beijing University of Posts and Telecommunications, Beijing, China; Beijing University of Posts and Telecommunications, Beijing, China; Beijing University of Posts and Telecommunications, Beijing, China", + "bibtex": "@article{Chen_He_Xu_Feng_Liu_Song_Yao_Qiao_2023, title={Enhanced Multi-Relationships Integration Graph Convolutional Network for Inferring Substitutable and Complementary Items}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25532}, DOI={10.1609/aaai.v37i4.25532}, abstractNote={Understanding the relationships between items can improve the accuracy and interpretability of recommender systems. Among these relationships, the substitute and complement relationships attract the most attention in e-commerce platforms. The substitutable items are interchangeable and might be compared with each other before purchasing, while the complementary items are used in conjunction and are usually bought together with the query item. In this paper, we focus on two issues of inferring the substitutable and complementary items: 1) how to model their mutual influence to improve the performance of downstream tasks, 2) how to further discriminate them by considering the strength of relationship for different item pairs. We propose a novel multi-task learning framework named Enhanced Multi-Relationships Integration Graph Convolutional Network (EMRIGCN). We regard the relationship inference task as a link prediction task in heterogeneous graph with different types of edges between nodes (items). To model the mutual influence between substitute and complement, EMRIGCN adopts a two-level integration module, i.e., feature and structure integration, based on experts sharing mechanism during message passing. To obtain the strength of relationship for item pairs, we build an auxiliary loss function to further increase or decrease the distances between embeddings of items with weak or strong relation in latent space. Extensive experiments on both public and industrial datasets prove that EMRIGCN significantly outperforms the state-of-the-art solutions. We also conducted A/B tests on real world recommender systems of Meituan Maicai, an online supermarket platform in China, and obtained 15.3% improvement on VBR and 15.34% improvement on RPM.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Huajie and He, Jiyuan and Xu, Weisheng and Feng, Tao and Liu, Ming and Song, Tianyu and Yao, Runfeng and Qiao, Yuanyuan}, year={2023}, month={Jun.}, pages={4157-4165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25532/25304", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25532", + "pdf_size": 960768, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9322343525341779853&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "meituan.com;meituan.com;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "meituan.com;meituan.com;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0+1;0;0;1;1;1", + "aff_unique_norm": "Meituan Group;Beijing University of Posts and Telecommunications", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.meituan.com;http://www.bupt.edu.cn/", + "aff_unique_abbr": "Meituan;BUPT", + "aff_campus_unique_index": "0;0;0+0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26323", + "title": "Enhanced Tensor Low-Rank and Sparse Representation Recovery for Incomplete Multi-View Clustering", + "track": "main", + "status": "Technical", + "abstract": "Incomplete multi-view clustering (IMVC) has attracted remarkable attention due to the emergence of multi-view data with missing views in real applications. Recent methods attempt to recover the missing information to address the IMVC problem. However, they generally cannot fully explore the underlying properties and correlations of data similarities across views. This paper proposes a novel Enhanced Tensor Low-rank and Sparse Representation Recovery (ETLSRR) method, which reformulates the IMVC problem as a joint incomplete similarity graphs learning and complete tensor representation recovery problem. Specifically, ETLSRR learns the intra-view similarity graphs and constructs a 3-way tensor by stacking the graphs to explore the inter-view correlations. To alleviate the negative influence of missing views and data noise, ETLSRR decomposes the tensor into two parts: a sparse tensor and an intrinsic tensor, which models the noise and underlying true data similarities, respectively. Both global low-rank and local structured sparse characteristics of the intrinsic tensor are considered, which enhances the discrimination of similarity matrix. Moreover, instead of using the convex tensor nuclear norm, ETLSRR introduces a generalized non-convex tensor low-rank regularization to alleviate the biased approximation. Experiments on several datasets demonstrate the effectiveness of our method compared with the state-of-the-art methods.", + "primary_area": "machine learning iv", + "author": "Chao Zhang; Huaxiong Li; Wei Lv; Zizheng Huang; Yang Gao; Chunlin Chen", + "authorids": "", + "aff": "Department of Control Science and Intelligence Engineering, Nanjing University; Department of Control Science and Intelligence Engineering, Nanjing University; Department of Control Science and Intelligence Engineering, Nanjing University; Department of Control Science and Intelligence Engineering, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; Department of Control Science and Intelligence Engineering, Nanjing University", + "bibtex": "@article{Zhang_Li_Lv_Huang_Gao_Chen_2023, title={Enhanced Tensor Low-Rank and Sparse Representation Recovery for Incomplete Multi-View Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26323}, DOI={10.1609/aaai.v37i9.26323}, abstractNote={Incomplete multi-view clustering (IMVC) has attracted remarkable attention due to the emergence of multi-view data with missing views in real applications. Recent methods attempt to recover the missing information to address the IMVC problem. However, they generally cannot fully explore the underlying properties and correlations of data similarities across views. This paper proposes a novel Enhanced Tensor Low-rank and Sparse Representation Recovery (ETLSRR) method, which reformulates the IMVC problem as a joint incomplete similarity graphs learning and complete tensor representation recovery problem. Specifically, ETLSRR learns the intra-view similarity graphs and constructs a 3-way tensor by stacking the graphs to explore the inter-view correlations. To alleviate the negative influence of missing views and data noise, ETLSRR decomposes the tensor into two parts: a sparse tensor and an intrinsic tensor, which models the noise and underlying true data similarities, respectively. Both global low-rank and local structured sparse characteristics of the intrinsic tensor are considered, which enhances the discrimination of similarity matrix. Moreover, instead of using the convex tensor nuclear norm, ETLSRR introduces a generalized non-convex tensor low-rank regularization to alleviate the biased approximation. Experiments on several datasets demonstrate the effectiveness of our method compared with the state-of-the-art methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chao and Li, Huaxiong and Lv, Wei and Huang, Zizheng and Gao, Yang and Chen, Chunlin}, year={2023}, month={Jun.}, pages={11174-11182} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26323/26095", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26323", + "pdf_size": 1121180, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17250780962723695348&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "Department of Control Science and Intelligence Engineering", + "aff_unique_url": "https://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27040", + "title": "Enhancing Dynamic GCN for Node Attribute Forecasting with Meta Spatial-Temporal Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Node attribute forecasting has recently attracted considerable attention. Recent attempts have thus far utilize dynamic graph convolutional network (GCN) to predict future node attributes. However, few prior works have notice that the complex spatial and temporal interaction between nodes, which will hamper the performance of dynamic GCN. In this paper, we propose a new dynamic GCN model named meta-DGCN, leveraging meta spatial-temporal tasks to enhance the ability of dynamic GCN for better capturing node attributes in the future. Experiments show that meta-DGCN effectively modeling comprehensive spatio-temporal correlations between nodes and outperforms state-of-the-art baselines on various real-world datasets.", + "primary_area": "", + "author": "Bo Wu; Xun Liang; Xiangping Zheng; Jun Wang", + "authorids": "", + "aff": "School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; Swinburne University of Technology", + "bibtex": "@article{Wu_Liang_Zheng_Wang_2024, title={Enhancing Dynamic GCN for Node Attribute Forecasting with Meta Spatial-Temporal Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27040}, DOI={10.1609/aaai.v37i13.27040}, abstractNote={Node attribute forecasting has recently attracted considerable attention. Recent attempts have thus far utilize dynamic graph convolutional network (GCN) to predict future node attributes. However, few prior works have notice that the complex spatial and temporal interaction between nodes, which will hamper the performance of dynamic GCN. In this paper, we propose a new dynamic GCN model named meta-DGCN, leveraging meta spatial-temporal tasks to enhance the ability of dynamic GCN for better capturing node attributes in the future. Experiments show that meta-DGCN effectively modeling comprehensive spatio-temporal correlations between nodes and outperforms state-of-the-art baselines on various real-world datasets.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Bo and Liang, Xun and Zheng, Xiangping and Wang, Jun}, year={2024}, month={Jul.}, pages={16360-16361} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27040/26812", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27040", + "pdf_size": 69874, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:as6dIFzDZ3EJ:scholar.google.com/&scioq=Enhancing+Dynamic+GCN+for+Node+Attribute+Forecasting+with+Meta+Spatial-Temporal+Learning+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;swin.edu.au", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;swin.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Renmin University of China;Swinburne University of Technology", + "aff_unique_dep": "School of Information;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.swinburne.edu.au", + "aff_unique_abbr": "RUC;SUT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26917", + "title": "Enhancing Smart, Sustainable Mobility with Game Theory and Multi-Agent Reinforcement Learning With Applications to Ridesharing", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "We propose the use of game-theoretic solutions and multi- agent Reinforcement Learning in the mechanism design of smart, sustainable mobility services. In particular, we present applications to ridesharing as an example of a cost game.", + "primary_area": "", + "author": "Lucia Cipolina-Kun", + "authorids": "", + "aff": "Electrical & Electronic Engineering. University of Bristol. UK", + "bibtex": "@article{Cipolina-Kun_2024, title={Enhancing Smart, Sustainable Mobility with Game Theory and Multi-Agent Reinforcement Learning With Applications to Ridesharing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26917}, DOI={10.1609/aaai.v37i13.26917}, abstractNote={We propose the use of game-theoretic solutions and multi- agent Reinforcement Learning in the mechanism design of smart, sustainable mobility services. In particular, we present applications to ridesharing as an example of a cost game.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cipolina-Kun, Lucia}, year={2024}, month={Jul.}, pages={16113-16114} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26917/26689", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26917", + "pdf_size": 58095, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1541748995665628119&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "bristol.ac.uk", + "email": "bristol.ac.uk", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Bristol", + "aff_unique_dep": "Electrical & Electronic Engineering", + "aff_unique_url": "https://www.bristol.ac.uk", + "aff_unique_abbr": "UoB", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26065", + "title": "Enhancing the Antidote: Improved Pointwise Certifications against Poisoning Attacks", + "track": "main", + "status": "Technical", + "abstract": "Poisoning attacks can disproportionately influence model behaviour by making small changes to the training corpus. While defences against specific poisoning attacks do exist, they in general do not provide any guarantees, leaving them potentially countered by novel attacks. In contrast, by examining worst-case behaviours Certified Defences make it possible to provide guarantees of the robustness of a sample against adversarial attacks modifying a finite number of training samples, known as pointwise certification. We achieve this by exploiting both Differential Privacy and the Sampled Gaussian Mechanism to ensure the invariance of prediction for each testing instance against finite numbers of poisoned examples. In doing so, our model provides guarantees of adversarial robustness that are more than twice as large as those provided by prior certifications.", + "primary_area": "machine learning ii", + "author": "Shijie Liu; Andrew C. Cullen; Paul Montague; Sarah M. Erfani; Benjamin I. P. Rubinstein", + "authorids": "", + "aff": "School of Computing and Information Systems, University of Melbourne, Melbourne, Australia; School of Computing and Information Systems, University of Melbourne, Melbourne, Australia; Defence Science and Technology Group, Adelaide, Australia; School of Computing and Information Systems, University of Melbourne, Melbourne, Australia; School of Computing and Information Systems, University of Melbourne, Melbourne, Australia", + "bibtex": "@article{Liu_Cullen_Montague_Erfani_Rubinstein_2023, title={Enhancing the Antidote: Improved Pointwise Certifications against Poisoning Attacks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26065}, DOI={10.1609/aaai.v37i7.26065}, abstractNote={Poisoning attacks can disproportionately influence model behaviour by making small changes to the training corpus. While defences against specific poisoning attacks do exist, they in general do not provide any guarantees, leaving them potentially countered by novel attacks. In contrast, by examining worst-case behaviours Certified Defences make it possible to provide guarantees of the robustness of a sample against adversarial attacks modifying a finite number of training samples, known as pointwise certification. We achieve this by exploiting both Differential Privacy and the Sampled Gaussian Mechanism to ensure the invariance of prediction for each testing instance against finite numbers of poisoned examples. In doing so, our model provides guarantees of adversarial robustness that are more than twice as large as those provided by prior certifications.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shijie and Cullen, Andrew C. and Montague, Paul and Erfani, Sarah M. and Rubinstein, Benjamin I. P.}, year={2023}, month={Jun.}, pages={8861-8869} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26065/25837", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26065", + "pdf_size": 490025, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2734035363895442580&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "student.unimelb.edu.au;unimelb.edu.au;dst.defence.gov.au;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;unimelb.edu.au;dst.defence.gov.au;unimelb.edu.au;unimelb.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "University of Melbourne;Defence Science and Technology Group", + "aff_unique_dep": "School of Computing and Information Systems;", + "aff_unique_url": "https://www.unimelb.edu.au;", + "aff_unique_abbr": "UniMelb;", + "aff_campus_unique_index": "0;0;1;0;0", + "aff_campus_unique": "Melbourne;Adelaide", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26722", + "title": "Ensemble-in-One: Ensemble Learning within Random Gated Networks for Enhanced Adversarial Robustness", + "track": "aaai special track", + "status": "Technical", + "abstract": "Adversarial attacks have threatened modern deep learning systems by crafting adversarial examples with small perturbations to fool the convolutional neural networks (CNNs). To alleviate that, ensemble training methods are proposed to facilitate better adversarial robustness by diversifying the vulnerabilities among the sub-models, simultaneously maintaining comparable natural accuracy as standard training. Previous practices also demonstrate that enlarging the ensemble can improve the robustness. However, conventional ensemble methods are with poor scalability, owing to the rapidly increasing complexity when containing more sub-models in the ensemble. Moreover, it is usually infeasible to train or deploy an ensemble with substantial sub-models, owing to the tight hardware resource budget and latency requirement. In this work, we propose Ensemble-in-One (EIO), a simple but effective method to efficiently enlarge the ensemble with a random gated network (RGN). EIO augments a candidate model by replacing the parametrized layers with multi-path random gated blocks (RGBs) to construct an RGN. The scalability is significantly boosted because the number of paths exponentially increases with the RGN depth. Then by learning from the vulnerabilities of numerous other paths within the RGN, every path obtains better adversarial robustness. Our experiments demonstrate that EIO consistently outperforms previous ensemble training methods with smaller computational overheads, simultaneously achieving better accuracy-robustness trade-offs than adversarial training methods under black-box transfer attacks. Code is available at https://github.com/cai-y13/Ensemble-in-One.git", + "primary_area": "safe and robust ai", + "author": "Yi Cai; Xuefei Ning; Huazhong Yang; Yu Wang", + "authorids": "", + "aff": "Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University", + "bibtex": "@article{Cai_Ning_Yang_Wang_2023, title={Ensemble-in-One: Ensemble Learning within Random Gated Networks for Enhanced Adversarial Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26722}, DOI={10.1609/aaai.v37i12.26722}, abstractNote={Adversarial attacks have threatened modern deep learning systems by crafting adversarial examples with small perturbations to fool the convolutional neural networks (CNNs). To alleviate that, ensemble training methods are proposed to facilitate better adversarial robustness by diversifying the vulnerabilities among the sub-models, simultaneously maintaining comparable natural accuracy as standard training. Previous practices also demonstrate that enlarging the ensemble can improve the robustness. However, conventional ensemble methods are with poor scalability, owing to the rapidly increasing complexity when containing more sub-models in the ensemble. Moreover, it is usually infeasible to train or deploy an ensemble with substantial sub-models, owing to the tight hardware resource budget and latency requirement. In this work, we propose Ensemble-in-One (EIO), a simple but effective method to efficiently enlarge the ensemble with a random gated network (RGN). EIO augments a candidate model by replacing the parametrized layers with multi-path random gated blocks (RGBs) to construct an RGN. The scalability is significantly boosted because the number of paths exponentially increases with the RGN depth. Then by learning from the vulnerabilities of numerous other paths within the RGN, every path obtains better adversarial robustness. Our experiments demonstrate that EIO consistently outperforms previous ensemble training methods with smaller computational overheads, simultaneously achieving better accuracy-robustness trade-offs than adversarial training methods under black-box transfer attacks. Code is available at https://github.com/cai-y13/Ensemble-in-One.git}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Yi and Ning, Xuefei and Yang, Huazhong and Wang, Yu}, year={2023}, month={Jun.}, pages={14738-14747} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26722/26494", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26722", + "pdf_size": 342342, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4311417811655217253&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "gmail.com;gmail.com;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/cai-y13/Ensemble-in-One.git", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Department of Electronic Engineering", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25535", + "title": "Entity-Agnostic Representation Learning for Parameter-Efficient Knowledge Graph Embedding", + "track": "main", + "status": "Technical", + "abstract": "We propose an entity-agnostic representation learning method for handling the problem of inefficient parameter storage costs brought by embedding knowledge graphs. Conventional knowledge graph embedding methods map elements in a knowledge graph, including entities and relations, into continuous vector spaces by assigning them one or multiple specific embeddings (i.e., vector representations). Thus the number of embedding parameters increases linearly as the growth of knowledge graphs. In our proposed model, Entity-Agnostic Representation Learning (EARL), we only learn the embeddings for a small set of entities and refer to them as reserved entities. To obtain the embeddings for the full set of entities, we encode their distinguishable information from their connected relations, k-nearest reserved entities, and multi-hop neighbors. We learn universal and entity-agnostic encoders for transforming distinguishable information into entity embeddings. This approach allows our proposed EARL to have a static, efficient, and lower parameter count than conventional knowledge graph embedding methods. Experimental results show that EARL uses fewer parameters and performs better on link prediction tasks than baselines, reflecting its parameter efficiency.", + "primary_area": "data mining and knowledge management", + "author": "Mingyang Chen; Wen Zhang; Zhen Yao; Yushan Zhu; Yang Gao; Jeff Z. Pan; Huajun Chen", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; School of Software Technology, Zhejiang University; School of Software Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; Huawei Technologies Co., Ltd.; School of Informatics, The University of Edinburgh; College of Computer Science and Technology, Zhejiang University + Donghai Laboratory + Alibaba-Zhejiang University Joint Institute of Frontier Technologies", + "bibtex": "@article{Chen_Zhang_Yao_Zhu_Gao_Z. Pan_Chen_2023, title={Entity-Agnostic Representation Learning for Parameter-Efficient Knowledge Graph Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25535}, DOI={10.1609/aaai.v37i4.25535}, abstractNote={We propose an entity-agnostic representation learning method for handling the problem of inefficient parameter storage costs brought by embedding knowledge graphs. Conventional knowledge graph embedding methods map elements in a knowledge graph, including entities and relations, into continuous vector spaces by assigning them one or multiple specific embeddings (i.e., vector representations). Thus the number of embedding parameters increases linearly as the growth of knowledge graphs. In our proposed model, Entity-Agnostic Representation Learning (EARL), we only learn the embeddings for a small set of entities and refer to them as reserved entities. To obtain the embeddings for the full set of entities, we encode their distinguishable information from their connected relations, k-nearest reserved entities, and multi-hop neighbors. We learn universal and entity-agnostic encoders for transforming distinguishable information into entity embeddings. This approach allows our proposed EARL to have a static, efficient, and lower parameter count than conventional knowledge graph embedding methods. Experimental results show that EARL uses fewer parameters and performs better on link prediction tasks than baselines, reflecting its parameter efficiency.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Mingyang and Zhang, Wen and Yao, Zhen and Zhu, Yushan and Gao, Yang and Z. Pan, Jeff and Chen, Huajun}, year={2023}, month={Jun.}, pages={4182-4190} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25535/25307", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25535", + "pdf_size": 1364970, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12290783764072640087&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;huawei.com;ed.ac.uk;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;huawei.com;ed.ac.uk;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;0+3+0", + "aff_unique_norm": "Zhejiang University;Huawei Technologies;The University of Edinburgh;Donghai Laboratory", + "aff_unique_dep": "College of Computer Science and Technology;;School of Informatics;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.huawei.com;https://www.ed.ac.uk;", + "aff_unique_abbr": "ZJU;Huawei;Edinburgh;", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Edinburgh", + "aff_country_unique_index": "0;0;0;0;0;1;0+0+0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26438", + "title": "Entropy Regularization for Population Estimation", + "track": "main", + "status": "Technical", + "abstract": "Entropy regularization is known to improve exploration in sequential decision-making problems. We show that this same mechanism can also lead to nearly unbiased and lower-variance estimates of the mean reward in the optimize-and-estimate structured bandit setting. Mean reward estimation (i.e., population estimation) tasks have recently been shown to be essential for public policy settings where legal constraints often require precise estimates of population metrics. We show that leveraging entropy and KL divergence can yield a better trade-off between reward and estimator variance than existing baselines, all while remaining nearly unbiased. These properties of entropy regularization illustrate an exciting potential for bringing together the optimal exploration and estimation literature.", + "primary_area": "reasoning under uncertainty", + "author": "Ben Chugg; Peter Henderson; Jacob Goldin; Daniel E. Ho", + "authorids": "", + "aff": "Carnegie Mellon University; Stanford University; University of Chicago; Stanford University", + "bibtex": "@article{Chugg_Henderson_Goldin_Ho_2023, title={Entropy Regularization for Population Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26438}, DOI={10.1609/aaai.v37i10.26438}, abstractNote={Entropy regularization is known to improve exploration in sequential decision-making problems. We show that this same mechanism can also lead to nearly unbiased and lower-variance estimates of the mean reward in the optimize-and-estimate structured bandit setting. Mean reward estimation (i.e., population estimation) tasks have recently been shown to be essential for public policy settings where legal constraints often require precise estimates of population metrics. We show that leveraging entropy and KL divergence can yield a better trade-off between reward and estimator variance than existing baselines, all while remaining nearly unbiased. These properties of entropy regularization illustrate an exciting potential for bringing together the optimal exploration and estimation literature.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chugg, Ben and Henderson, Peter and Goldin, Jacob and Ho, Daniel E.}, year={2023}, month={Jun.}, pages={12198-12204} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26438/26210", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26438", + "pdf_size": 864214, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9884264868404063447&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cmu.edu;stanford.edu;uchicago.edu;stanford.edu", + "email": "cmu.edu;stanford.edu;uchicago.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Carnegie Mellon University;Stanford University;University of Chicago", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cmu.edu;https://www.stanford.edu;https://www.uchicago.edu", + "aff_unique_abbr": "CMU;Stanford;UChicago", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25773", + "title": "Epistemic Disjunctive Datalog for Querying Knowledge Bases", + "track": "main", + "status": "Technical", + "abstract": "The Datalog query language can express several powerful recursive properties, often crucial in real-world scenarios. While answering such queries is feasible over relational databases, the picture changes dramatically when data is enriched with intensional knowledge. It is indeed well-known that answering Datalog queries is undecidable already over lightweight knowledge bases (KBs) of the DL-Lite family. To overcome this issue, we propose a new query language based on Disjunctive Datalog rules combined with a modal epistemic operator. Rules in this language interact with the queried KB exclusively via the epistemic operator, thus extracting only the information true in every model of the KB. This form of interaction is crucial for not falling into undecidability. The contribution provided by this paper is threefold. First, we illustrate the syntax and the semantics of the novel query language. Second, we study the expressive power of different fragments of our new language and compare it with Disjunctive Datalog and its variants. Third, we outline the precise data complexity of answering queries in our new language over KBs expressed in various well-known formalisms.", + "primary_area": "knowledge representation and reasoning", + "author": "Gianluca Cima; Marco Console; Maurizio Lenzerini; Antonella Poggi", + "authorids": "", + "aff": "Sapienza University of Rome; Sapienza University of Rome; Sapienza University of Rome; Sapienza University of Rome", + "bibtex": "@article{Cima_Console_Lenzerini_Poggi_2023, title={Epistemic Disjunctive Datalog for Querying Knowledge Bases}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25773}, DOI={10.1609/aaai.v37i5.25773}, abstractNote={The Datalog query language can express several powerful recursive properties, often crucial in real-world scenarios. While answering such queries is feasible over relational databases, the picture changes dramatically when data is enriched with intensional knowledge. It is indeed well-known that answering Datalog queries is undecidable already over lightweight knowledge bases (KBs) of the DL-Lite family. To overcome this issue, we propose a new query language based on Disjunctive Datalog rules combined with a modal epistemic operator. Rules in this language interact with the queried KB exclusively via the epistemic operator, thus extracting only the information true in every model of the KB. This form of interaction is crucial for not falling into undecidability. The contribution provided by this paper is threefold. First, we illustrate the syntax and the semantics of the novel query language. Second, we study the expressive power of different fragments of our new language and compare it with Disjunctive Datalog and its variants. Third, we outline the precise data complexity of answering queries in our new language over KBs expressed in various well-known formalisms.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cima, Gianluca and Console, Marco and Lenzerini, Maurizio and Poggi, Antonella}, year={2023}, month={Jun.}, pages={6280-6288} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25773/25545", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25773", + "pdf_size": 169705, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=435619403878800495&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "diag.uniroma1.it;diag.uniroma1.it;diag.uniroma1.it;diag.uniroma1.it", + "email": "diag.uniroma1.it;diag.uniroma1.it;diag.uniroma1.it;diag.uniroma1.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Sapienza University of Rome", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uniroma1.it", + "aff_unique_abbr": "Sapienza", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Rome", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25832", + "title": "Equi-Tuning: Group Equivariant Fine-Tuning of Pretrained Models", + "track": "main", + "status": "Technical", + "abstract": "We introduce equi-tuning, a novel fine-tuning method that transforms (potentially non-equivariant) pretrained models into group equivariant models while incurring minimum L_2 loss between the feature representations of the pretrained and the equivariant models. Large pretrained models can be equi-tuned for different groups to satisfy the needs of various downstream tasks. Equi-tuned models benefit from both group equivariance as an inductive bias and semantic priors from pretrained models. We provide applications of equi-tuning on three different tasks: image classification, compositional generalization in language, and fairness in natural language generation (NLG). We also provide a novel group-theoretic definition for fairness in NLG. The effectiveness of this definition is shown by testing it against a standard empirical method of fairness in NLG. We provide experimental results for equi-tuning using a variety of pretrained models: Alexnet, Resnet, VGG, and Densenet for image classification; RNNs, GRUs, and LSTMs for compositional generalization; and GPT2 for fairness in NLG. We test these models on benchmark datasets across all considered tasks to show the generality and effectiveness of the proposed method.", + "primary_area": "machine learning i", + "author": "Sourya Basu; Prasanna Sattigeri; Karthikeyan Natesan Ramamurthy; Vijil Chenthamarakshan; Kush R. Varshney; Lav R. Varshney; Payel Das", + "authorids": "", + "aff": "IBM Research \u2013 Thomas J. Watson Research Center+University of Illinois at Urbana-Champaign; IBM Research \u2013 Thomas J. Watson Research Center; IBM Research \u2013 Thomas J. Watson Research Center; IBM Research \u2013 Thomas J. Watson Research Center; IBM Research \u2013 Thomas J. Watson Research Center; University of Illinois at Urbana-Champaign; IBM Research \u2013 Thomas J. Watson Research Center", + "bibtex": "@article{Basu_Sattigeri_Natesan Ramamurthy_Chenthamarakshan_Varshney_Varshney_Das_2023, title={Equi-Tuning: Group Equivariant Fine-Tuning of Pretrained Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25832}, DOI={10.1609/aaai.v37i6.25832}, abstractNote={We introduce equi-tuning, a novel fine-tuning method that transforms (potentially non-equivariant) pretrained models into group equivariant models while incurring minimum L_2 loss between the feature representations of the pretrained and the equivariant models. Large pretrained models can be equi-tuned for different groups to satisfy the needs of various downstream tasks. Equi-tuned models benefit from both group equivariance as an inductive bias and semantic priors from pretrained models. We provide applications of equi-tuning on three different tasks: image classification, compositional generalization in language, and fairness in natural language generation (NLG). We also provide a novel group-theoretic definition for fairness in NLG. The effectiveness of this definition is shown by testing it against a standard empirical method of fairness in NLG. We provide experimental results for equi-tuning using a variety of pretrained models: Alexnet, Resnet, VGG, and Densenet for image classification; RNNs, GRUs, and LSTMs for compositional generalization; and GPT2 for fairness in NLG. We test these models on benchmark datasets across all considered tasks to show the generality and effectiveness of the proposed method.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Basu, Sourya and Sattigeri, Prasanna and Natesan Ramamurthy, Karthikeyan and Chenthamarakshan, Vijil and Varshney, Kush R. and Varshney, Lav R. and Das, Payel}, year={2023}, month={Jun.}, pages={6788-6796} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25832/25604", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25832", + "pdf_size": 741431, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6376606670237066084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;0;0;1;0", + "aff_unique_norm": "IBM Research;University of Illinois at Urbana-Champaign", + "aff_unique_dep": "Thomas J. Watson Research Center;", + "aff_unique_url": "https://www.ibm.com/research;https://illinois.edu", + "aff_unique_abbr": "IBM;UIUC", + "aff_campus_unique_index": "0+1;0;0;0;0;1;0", + "aff_campus_unique": "Yorktown Heights;Urbana-Champaign", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26403", + "title": "Equity Promotion in Public Transportation", + "track": "main", + "status": "Technical", + "abstract": "There are many news articles reporting the obstacles confronting poverty-stricken households in access to public transits. These barriers create a great deal of inconveniences for these impoverished families and more importantly, they contribute a lot of social inequalities. A typical approach addressing the issue is to build more transport infrastructure to offer more opportunities to access the public transits especially for those deprived communities. Examples include adding more bus lines connecting needy residents to railways systems and extending existing bus lines to areas with low socioeconomic status. Recently, a new strategy is proposed, which is to harness the ubiquitous ride-hailing services to connect disadvantaged households with the nearest public transportations. Compared with the former infrastructure-based solution, the ride-hailing-based strategy enjoys a few exclusive benefits such as higher effectiveness and more flexibility.\n\nIn this paper, we propose an optimization model to study how to integrate the two approaches together for equity-promotion purposes. Specifically, we aim to design a strategy of allocating a given limited budget to different candidate programs such that the overall social equity is maximized, which is defined as the minimum covering ratio among all pre-specified protected groups of households (based on race, income, etc.). We have designed a linear-programming (LP) based rounding algorithm, which proves to achieve an optimal approximation ratio of 1-1/e. Additionally, we test our algorithm against a few baselines on real data assembled by outsourcing multiple public datasets collected in the city of Chicago. Experimental results confirm our theoretical predictions and demonstrate the effectiveness of our LP-based strategy in promoting social equity, especially when the budget is insufficient.", + "primary_area": "philosophy and ethics of ai", + "author": "Anik Pramanik; Pan Xu; Yifan Xu", + "authorids": "", + "aff": "Department of Computer Science, New Jersey Institute of Technology, Newark, USA; Department of Computer Science, New Jersey Institute of Technology, Newark, USA; School of Cyber Science and Engineering, Southeast University, Nanjing, China", + "bibtex": "@article{Pramanik_Xu_Xu_2023, title={Equity Promotion in Public Transportation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26403}, DOI={10.1609/aaai.v37i10.26403}, abstractNote={There are many news articles reporting the obstacles confronting poverty-stricken households in access to public transits. These barriers create a great deal of inconveniences for these impoverished families and more importantly, they contribute a lot of social inequalities. A typical approach addressing the issue is to build more transport infrastructure to offer more opportunities to access the public transits especially for those deprived communities. Examples include adding more bus lines connecting needy residents to railways systems and extending existing bus lines to areas with low socioeconomic status. Recently, a new strategy is proposed, which is to harness the ubiquitous ride-hailing services to connect disadvantaged households with the nearest public transportations. Compared with the former infrastructure-based solution, the ride-hailing-based strategy enjoys a few exclusive benefits such as higher effectiveness and more flexibility. In this paper, we propose an optimization model to study how to integrate the two approaches together for equity-promotion purposes. Specifically, we aim to design a strategy of allocating a given limited budget to different candidate programs such that the overall social equity is maximized, which is defined as the minimum covering ratio among all pre-specified protected groups of households (based on race, income, etc.). We have designed a linear-programming (LP) based rounding algorithm, which proves to achieve an optimal approximation ratio of 1-1/e. Additionally, we test our algorithm against a few baselines on real data assembled by outsourcing multiple public datasets collected in the city of Chicago. Experimental results confirm our theoretical predictions and demonstrate the effectiveness of our LP-based strategy in promoting social equity, especially when the budget is insufficient.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pramanik, Anik and Xu, Pan and Xu, Yifan}, year={2023}, month={Jun.}, pages={11890-11898} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26403/26175", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26403", + "pdf_size": 291802, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14382650158589843742&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "njit.edu;njit.edu;seu.edu.cn", + "email": "njit.edu;njit.edu;seu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "New Jersey Institute of Technology;Southeast University", + "aff_unique_dep": "Department of Computer Science;School of Cyber Science and Engineering", + "aff_unique_url": "https://www.njit.edu;https://www.seu.edu.cn/", + "aff_unique_abbr": "NJIT;SEU", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "Newark;Nanjing", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26673", + "title": "Equivariant Message Passing Neural Network for Crystal Material Discovery", + "track": "aaai special track", + "status": "Technical", + "abstract": "Automatic material discovery with desired properties is a fundamental challenge for material sciences. Considerable attention has recently been devoted to generating stable crystal structures. While existing work has shown impressive success on supervised tasks such as property prediction, the progress on unsupervised tasks such as material generation is still hampered by the limited extent to which the equivalent geometric representations of the same crystal are considered. To address this challenge, we propose EPGNN a periodic equivariant message-passing neural network that learns crystal lattice deformation in an unsupervised fashion. Our model equivalently acts on lattice according to the deformation action that must be performed, making it suitable for crystal generation, relaxation and optimisation. We present experimental evaluations that demonstrate the effectiveness of our approach.", + "primary_area": "ai for social impact", + "author": "Astrid Klipfel; Zied Bouraoui; Olivier Peltre; Ya\u00ebl Fregier; Najwa Harrati; Adlane Sayede", + "authorids": "", + "aff": "Univ. Artois, UMR 8188, Centre de Recherche en Informatique de Lens (CRIL), F-62300 Lens, France+Univ. Artois, UMR 8181, Unit\u00e9 de Catalyse et de Chimie du Solide (UCCS), F-62300 Lens, France+Univ. Artois, UR 2462, Laboratoire de Math\u00e9matiques de Lens (LML), F-62300 Lens, France; Univ. Artois, UMR 8188, Centre de Recherche en Informatique de Lens (CRIL), F-62300 Lens, France; Univ. Artois, UMR 8188, Centre de Recherche en Informatique de Lens (CRIL), F-62300 Lens, France+Univ. Artois, UR 2462, Laboratoire de Math\u00e9matiques de Lens (LML), F-62300 Lens, France; Univ. Artois, UR 2462, Laboratoire de Math\u00e9matiques de Lens (LML), F-62300 Lens, France; Univ. Artois, UMR 8181, Unit\u00e9 de Catalyse et de Chimie du Solide (UCCS), F-62300 Lens, France; Univ. Artois, UMR 8181, Unit\u00e9 de Catalyse et de Chimie du Solide (UCCS), F-62300 Lens, France", + "bibtex": "@article{Klipfel_Bouraoui_Peltre_Fregier_Harrati_Sayede_2023, title={Equivariant Message Passing Neural Network for Crystal Material Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26673}, DOI={10.1609/aaai.v37i12.26673}, abstractNote={Automatic material discovery with desired properties is a fundamental challenge for material sciences. Considerable attention has recently been devoted to generating stable crystal structures. While existing work has shown impressive success on supervised tasks such as property prediction, the progress on unsupervised tasks such as material generation is still hampered by the limited extent to which the equivalent geometric representations of the same crystal are considered. To address this challenge, we propose EPGNN a periodic equivariant message-passing neural network that learns crystal lattice deformation in an unsupervised fashion. Our model equivalently acts on lattice according to the deformation action that must be performed, making it suitable for crystal generation, relaxation and optimisation. We present experimental evaluations that demonstrate the effectiveness of our approach.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Klipfel, Astrid and Bouraoui, Zied and Peltre, Olivier and Fregier, Ya\u00ebl and Harrati, Najwa and Sayede, Adlane}, year={2023}, month={Jun.}, pages={14304-14311} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26673/26445", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26673", + "pdf_size": 209912, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5108786836011329163&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 10, + "aff_domain": "cril.univ-artois.fr;cril.univ-artois.fr;cril.univ-artois.fr;lml.univ-artois.fr;uccs.univ-artois.fr;uccs.univ-artois.fr", + "email": "cril.univ-artois.fr;cril.univ-artois.fr;cril.univ-artois.fr;lml.univ-artois.fr;uccs.univ-artois.fr;uccs.univ-artois.fr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0+0;0;0+0;0;0;0", + "aff_unique_norm": "Universit\u00e9 d'Artois", + "aff_unique_dep": "Centre de Recherche en Informatique de Lens (CRIL)", + "aff_unique_url": "https://www.univ-artois.fr", + "aff_unique_abbr": "Univ. Artois", + "aff_campus_unique_index": "0+0;0;0+0;0", + "aff_campus_unique": "Lens;", + "aff_country_unique_index": "0+0+0;0;0+0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-27320", + "title": "Erratum to: 3D-TOGO: Towards Text-Guided Cross-Category 3D Object Generation", + "track": "errata", + "status": "Technical", + "abstract": "", + "primary_area": "", + "author": "Zutao Jiang; Guansong Lu; Xiaodan Liang; Jihua Zhu; Wei Zhang; Xiaojun Chang; Hang Xu", + "authorids": "", + "aff": "School of Software Engineering, Xi\u2019an Jiaotong University; Huawei Noah\u2019s Ark Lab; Sun Yat-sen University + MBZUAI; School of Software Engineering, Xi\u2019an Jiaotong University; Huawei Noah\u2019s Ark Lab; ReLER, AAII, University of Technology Sydney; Huawei Noah\u2019s Ark Lab + PengCheng Laboratory", + "bibtex": "@article{Jiang_Lu_Liang_Zhu_Zhang_Chang_Xu_2023, title={Erratum to: 3D-TOGO: Towards Text-Guided Cross-Category 3D Object Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27320}, DOI={10.1609/aaai.v37i13.27320}, abstractNote={<p>The <a href="https://doi.org/10.1609/aaai.v37i1.25186">Original Article</a> was published on 26 June 2023.</p> <div class="item published">&nbsp;</div>}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Zutao and Lu, Guansong and Liang, Xiaodan and Zhu, Jihua and Zhang, Wei and Chang, Xiaojun and Xu, Hang}, year={2023}, month={Sep.}, pages={16498} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27320/27094", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27320", + "pdf_size": 34448, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:6PoU3WYzEBgJ:scholar.google.com/&scioq=Erratum+to:+3D-TOGO:+Towards+Text-Guided+Cross-Category+3D+Object+Generation&hl=en&as_sdt=0,23", + "gs_version_total": 0, + "aff_domain": "gmail.com;huawei.com;gmail.com;xjtu.edu.cn;huawei.com;uts.edu.au;gmail.com", + "email": "gmail.com;huawei.com;gmail.com;xjtu.edu.cn;huawei.com;uts.edu.au;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2+3;0;1;4;1+5", + "aff_unique_norm": "Xi'an Jiaotong University;Huawei;Sun Yat-sen University;Mohamed Bin Zayed University of Artificial Intelligence;University of Technology Sydney;PengCheng Laboratory", + "aff_unique_dep": "School of Software Engineering;Noah\u2019s Ark Lab;;;;", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.huawei.com;http://www.sysu.edu.cn/;https://www.mbzuai.ac.ae;https://www.uts.edu.au;http://www.pcl.ac.cn", + "aff_unique_abbr": "XJTU;Huawei;SYSU;MBZUAI;UTS;", + "aff_campus_unique_index": "0;;0;", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0+1;0;0;2;0+0", + "aff_country_unique": "China;United Arab Emirates;Australia" + }, + { + "id": "article-25921", + "title": "Estimating Average Causal Effects from Patient Trajectories", + "track": "main", + "status": "Technical", + "abstract": "In medical practice, treatments are selected based on the expected causal effects on patient outcomes. Here, the gold standard for estimating causal effects are randomized controlled trials; however, such trials are costly and sometimes even unethical. Instead, medical practice is increasingly interested in estimating causal effects among patient (sub)groups from electronic health records, that is, observational data. In this paper, we aim at estimating the average causal effect (ACE) from observational data (patient trajectories) that are collected over time. For this, we propose DeepACE: an end-to-end deep learning model. DeepACE leverages the iterative G-computation formula to adjust for the bias induced by time-varying confounders. Moreover, we develop a novel sequential targeting procedure which ensures that DeepACE has favorable theoretical properties, i.e., is doubly robust and asymptotically efficient. To the best of our knowledge, this is the first work that proposes an end-to-end deep learning model tailored for estimating time-varying ACEs. We compare DeepACE in an extensive number of experiments, confirming that it achieves state-of-the-art performance. We further provide a case study for patients suffering from low back pain to demonstrate that DeepACE generates important and meaningful findings for clinical practice. Our work enables practitioners to develop effective treatment recommendations based on population effects.", + "primary_area": "machine learning i", + "author": "Dennis Frauen; Tobias Hatt; Valentyn Melnychuk; Stefan Feuerriegel", + "authorids": "", + "aff": "LMU Munich+Munich Center for Machine Learning; ETH Zurich; LMU Munich+Munich Center for Machine Learning; LMU Munich+Munich Center for Machine Learning", + "bibtex": "@article{Frauen_Hatt_Melnychuk_Feuerriegel_2023, title={Estimating Average Causal Effects from Patient Trajectories}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25921}, DOI={10.1609/aaai.v37i6.25921}, abstractNote={In medical practice, treatments are selected based on the expected causal effects on patient outcomes. Here, the gold standard for estimating causal effects are randomized controlled trials; however, such trials are costly and sometimes even unethical. Instead, medical practice is increasingly interested in estimating causal effects among patient (sub)groups from electronic health records, that is, observational data. In this paper, we aim at estimating the average causal effect (ACE) from observational data (patient trajectories) that are collected over time. For this, we propose DeepACE: an end-to-end deep learning model. DeepACE leverages the iterative G-computation formula to adjust for the bias induced by time-varying confounders. Moreover, we develop a novel sequential targeting procedure which ensures that DeepACE has favorable theoretical properties, i.e., is doubly robust and asymptotically efficient. To the best of our knowledge, this is the first work that proposes an end-to-end deep learning model tailored for estimating time-varying ACEs. We compare DeepACE in an extensive number of experiments, confirming that it achieves state-of-the-art performance. We further provide a case study for patients suffering from low back pain to demonstrate that DeepACE generates important and meaningful findings for clinical practice. Our work enables practitioners to develop effective treatment recommendations based on population effects.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Frauen, Dennis and Hatt, Tobias and Melnychuk, Valentyn and Feuerriegel, Stefan}, year={2023}, month={Jun.}, pages={7586-7594} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25921/25693", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25921", + "pdf_size": 456519, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12275817668236007987&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "lmu.de;ethz.ch;lmu.de;lmu.de", + "email": "lmu.de;ethz.ch;lmu.de;lmu.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0+1;0+1", + "aff_unique_norm": "Ludwig Maximilian University of Munich;Munich Center for Machine Learning;ETH Zurich", + "aff_unique_dep": ";Center for Machine Learning;", + "aff_unique_url": "https://www.lmu.de;https://www.munich-center-for-machine-learning.de;https://www.ethz.ch", + "aff_unique_abbr": "LMU;;ETHZ", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Munich;", + "aff_country_unique_index": "0+0;1;0+0;0+0", + "aff_country_unique": "Germany;Switzerland" + }, + { + "id": "article-26657", + "title": "Estimating Geographic Spillover Effects of COVID-19 Policies from Large-Scale Mobility Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Many policies in the US are determined locally, e.g., at the county-level. Local policy regimes provide flexibility between regions, but may become less effective in the presence of geographic spillovers, where populations circumvent local restrictions by traveling to less restricted regions nearby. Due to the endogenous nature of policymaking, there have been few opportunities to reliably estimate causal spillover effects or evaluate their impact on local policies. In this work, we identify a novel setting and develop a suitable methodology that allow us to make unconfounded estimates of spillover effects of local policies. Focusing on California\u2019s Blueprint for a Safer Economy, we leverage how county-level mobility restrictions were deterministically set by public COVID-19 severity statistics, enabling a regression discontinuity design framework to estimate spillovers between counties. We estimate these effects using a mobility network with billions of timestamped edges and find significant spillover movement, with larger effects in retail, eating places, and gyms. Contrasting local and global policy regimes, our spillover estimates suggest that county-level restrictions are only 54% as effective as statewide restrictions at reducing mobility. However, an intermediate strategy of macro-county restrictions---where we optimize county partitions by solving a minimum k-cut problem on a graph weighted by our spillover estimates---can recover over 90% of statewide mobility reductions, while maintaining substantial flexibility between counties.", + "primary_area": "ai for social impact", + "author": "Serina Chang; Damir Vrabac; Jure Leskovec; Johan Ugander", + "authorids": "", + "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Management Science & Engineering, Stanford University", + "bibtex": "@article{Chang_Vrabac_Leskovec_Ugander_2023, title={Estimating Geographic Spillover Effects of COVID-19 Policies from Large-Scale Mobility Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26657}, DOI={10.1609/aaai.v37i12.26657}, abstractNote={Many policies in the US are determined locally, e.g., at the county-level. Local policy regimes provide flexibility between regions, but may become less effective in the presence of geographic spillovers, where populations circumvent local restrictions by traveling to less restricted regions nearby. Due to the endogenous nature of policymaking, there have been few opportunities to reliably estimate causal spillover effects or evaluate their impact on local policies. In this work, we identify a novel setting and develop a suitable methodology that allow us to make unconfounded estimates of spillover effects of local policies. Focusing on California\u2019s Blueprint for a Safer Economy, we leverage how county-level mobility restrictions were deterministically set by public COVID-19 severity statistics, enabling a regression discontinuity design framework to estimate spillovers between counties. We estimate these effects using a mobility network with billions of timestamped edges and find significant spillover movement, with larger effects in retail, eating places, and gyms. Contrasting local and global policy regimes, our spillover estimates suggest that county-level restrictions are only 54% as effective as statewide restrictions at reducing mobility. However, an intermediate strategy of macro-county restrictions---where we optimize county partitions by solving a minimum k-cut problem on a graph weighted by our spillover estimates---can recover over 90% of statewide mobility reductions, while maintaining substantial flexibility between counties.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chang, Serina and Vrabac, Damir and Leskovec, Jure and Ugander, Johan}, year={2023}, month={Jun.}, pages={14161-14169} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26657/26429", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26657", + "pdf_size": 629310, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8322170050147047596&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "email": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25188", + "title": "Estimating Reflectance Layer from a Single Image: Integrating Reflectance Guidance and Shadow/Specular Aware Learning", + "track": "main", + "status": "Technical", + "abstract": "Estimating the reflectance layer from a single image is a challenging task. It becomes more challenging when the input image contains shadows or specular highlights, which often render an inaccurate estimate of the reflectance layer. Therefore, we propose a two-stage learning method, including reflectance guidance and a Shadow/Specular-Aware (S-Aware) network to tackle the problem. In the first stage, an initial reflectance layer free from shadows and specularities is obtained with the constraint of novel losses that are guided by prior-based shadow-free and specular-free images. To further enforce the reflectance layer to be independent of shadows and specularities in the second-stage refinement, we introduce an S-Aware network that distinguishes the reflectance image from the input image. Our network employs a classifier to categorize shadow/shadow-free, specular/specular-free classes, enabling the activation features to function as attention maps that focus on shadow/specular regions. Our quantitative and qualitative evaluations show that our method outperforms the state-of-the-art methods in the reflectance layer estimation that is free from shadows and specularities.", + "primary_area": "computer vision i", + "author": "Yeying Jin; Ruoteng Li; Wenhan Yang; Robby T. Tan", + "authorids": "", + "aff": "National University of Singapore, Singapore; National University of Singapore, Singapore + ByteDance, Singapore; Peng Cheng Laboratory, China; National University of Singapore, Singapore + Yale-NUS College, Singapore", + "bibtex": "@article{Jin_Li_Yang_Tan_2023, title={Estimating Reflectance Layer from a Single Image: Integrating Reflectance Guidance and Shadow/Specular Aware Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25188}, DOI={10.1609/aaai.v37i1.25188}, abstractNote={Estimating the reflectance layer from a single image is a challenging task. It becomes more challenging when the input image contains shadows or specular highlights, which often render an inaccurate estimate of the reflectance layer. Therefore, we propose a two-stage learning method, including reflectance guidance and a Shadow/Specular-Aware (S-Aware) network to tackle the problem. In the first stage, an initial reflectance layer free from shadows and specularities is obtained with the constraint of novel losses that are guided by prior-based shadow-free and specular-free images. To further enforce the reflectance layer to be independent of shadows and specularities in the second-stage refinement, we introduce an S-Aware network that distinguishes the reflectance image from the input image. Our network employs a classifier to categorize shadow/shadow-free, specular/specular-free classes, enabling the activation features to function as attention maps that focus on shadow/specular regions. Our quantitative and qualitative evaluations show that our method outperforms the state-of-the-art methods in the reflectance layer estimation that is free from shadows and specularities.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Yeying and Li, Ruoteng and Yang, Wenhan and Tan, Robby T.}, year={2023}, month={Jun.}, pages={1069-1077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25188/24960", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25188", + "pdf_size": 12098480, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5187362077304630678&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "u.nus.edu;u.nus.edu;pcl.ac.cn;nus.edu.sg", + "email": "u.nus.edu;u.nus.edu;pcl.ac.cn;nus.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;2;0+3", + "aff_unique_norm": "National University of Singapore;ByteDance;Peng Cheng Laboratory;Yale-NUS College", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.bytedance.com;;https://www.yale-nus.edu.sg", + "aff_unique_abbr": "NUS;;;Yale-NUS", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;1;0+0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25948", + "title": "Estimating Regression Predictive Distributions with Sample Networks", + "track": "main", + "status": "Technical", + "abstract": "Estimating the uncertainty in deep neural network predictions is crucial for many real-world applications. A common approach to model uncertainty is to choose a parametric distribution and fit the data to it using maximum likelihood estimation. The chosen parametric form can be a poor fit to the data-generating distribution, resulting in unreliable uncertainty estimates. In this work, we propose SampleNet, a flexible and scalable architecture for modeling uncertainty that avoids specifying a parametric form on the output distribution. SampleNets do so by defining an empirical distribution using samples that are learned with the Energy Score and regularized with the Sinkhorn Divergence. SampleNets are shown to be able to well-fit a wide range of distributions and to outperform baselines on large-scale real-world regression tasks.", + "primary_area": "machine learning i", + "author": "Ali Harakeh; Jordan Sir Kwang Hu; Naiqing Guan; Steven Waslander; Liam Paull", + "authorids": "", + "aff": "Mila - Quebec AI Institute + Universit \u00b4e de Montr \u00b4eal; University of Toronto Institute for Aerospace Studies + University of Toronto; University of Toronto; University of Toronto Institute for Aerospace Studies + University of Toronto; Mila - Quebec AI Institute + Universit \u00b4e de Montr \u00b4eal", + "bibtex": "@article{Harakeh_Hu_Guan_Waslander_Paull_2023, title={Estimating Regression Predictive Distributions with Sample Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25948}, DOI={10.1609/aaai.v37i6.25948}, abstractNote={Estimating the uncertainty in deep neural network predictions is crucial for many real-world applications. A common approach to model uncertainty is to choose a parametric distribution and fit the data to it using maximum likelihood estimation. The chosen parametric form can be a poor fit to the data-generating distribution, resulting in unreliable uncertainty estimates. In this work, we propose SampleNet, a flexible and scalable architecture for modeling uncertainty that avoids specifying a parametric form on the output distribution. SampleNets do so by defining an empirical distribution using samples that are learned with the Energy Score and regularized with the Sinkhorn Divergence. SampleNets are shown to be able to well-fit a wide range of distributions and to outperform baselines on large-scale real-world regression tasks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Harakeh, Ali and Hu, Jordan Sir Kwang and Guan, Naiqing and Waslander, Steven and Paull, Liam}, year={2023}, month={Jun.}, pages={7830-7838} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25948/25720", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25948", + "pdf_size": 8109870, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2236015373238797036&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mila.quebec; ; ; ; ", + "email": "mila.quebec; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2+2;2;2+2;0+1", + "aff_unique_norm": "Quebec AI Institute;Universit\u00e9 de Montr\u00e9al;University of Toronto", + "aff_unique_dep": "AI Institute;;Institute for Aerospace Studies", + "aff_unique_url": "https://mila.quebec;https://www.umontreal.ca;https://www.utoronto.ca", + "aff_unique_abbr": "Mila;UdeM;UTIAS", + "aff_campus_unique_index": ";1;1;", + "aff_campus_unique": ";Toronto", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25844", + "title": "Estimating Treatment Effects from Irregular Time Series Observations with Hidden Confounders", + "track": "main", + "status": "Technical", + "abstract": "Causal analysis for time series data, in particular estimating individualized treatment effect (ITE), is a key task in many real world applications, such as finance, retail, healthcare, etc. Real world time series, i.e., large-scale irregular or sparse and intermittent time series, raise significant challenges to existing work attempting to estimate treatment effects. Specifically, the existence of hidden confounders can lead to biased treatment estimates and complicate the causal inference process. In particular, anomaly hidden confounders which exceed the typical range can lead to high variance estimates. Moreover, in continuous time settings with irregular samples, it is challenging to directly handle the dynamics of causality. In this paper, we leverage recent advances in Lipschitz regularization and neural controlled differential equations (CDE) to develop an effective and scalable solution, namely LipCDE, to address the above challenges. LipCDE can directly model the dynamic causal relationships between historical data and outcomes with irregular samples by considering the boundary of hidden confounders given by Lipschitz constrained neural networks. Furthermore, we conduct extensive experiments on both synthetic and real world datasets to demonstrate the effectiveness and scalability of LipCDE.", + "primary_area": "machine learning i", + "author": "Defu Cao; James Enouen; Yujing Wang; Xiangchen Song; Chuizheng Meng; Hao Niu; Yan Liu", + "authorids": "", + "aff": "University of Southern California; University of Southern California; Peking University; Carnegie Mellon University; University of Southern California; KDDI Research, Inc.; University of Southern California", + "bibtex": "@article{Cao_Enouen_Wang_Song_Meng_Niu_Liu_2023, title={Estimating Treatment Effects from Irregular Time Series Observations with Hidden Confounders}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25844}, DOI={10.1609/aaai.v37i6.25844}, abstractNote={Causal analysis for time series data, in particular estimating individualized treatment effect (ITE), is a key task in many real world applications, such as finance, retail, healthcare, etc. Real world time series, i.e., large-scale irregular or sparse and intermittent time series, raise significant challenges to existing work attempting to estimate treatment effects. Specifically, the existence of hidden confounders can lead to biased treatment estimates and complicate the causal inference process. In particular, anomaly hidden confounders which exceed the typical range can lead to high variance estimates. Moreover, in continuous time settings with irregular samples, it is challenging to directly handle the dynamics of causality. In this paper, we leverage recent advances in Lipschitz regularization and neural controlled differential equations (CDE) to develop an effective and scalable solution, namely LipCDE, to address the above challenges. LipCDE can directly model the dynamic causal relationships between historical data and outcomes with irregular samples by considering the boundary of hidden confounders given by Lipschitz constrained neural networks. Furthermore, we conduct extensive experiments on both synthetic and real world datasets to demonstrate the effectiveness and scalability of LipCDE.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Defu and Enouen, James and Wang, Yujing and Song, Xiangchen and Meng, Chuizheng and Niu, Hao and Liu, Yan}, year={2023}, month={Jun.}, pages={6897-6905} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25844/25616", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25844", + "pdf_size": 457302, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12175513870648605717&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "usc.edu;usc.edu;pku.edu.cn;cmu.edu;usc.edu;kddi.com;usc.edu", + "email": "usc.edu;usc.edu;pku.edu.cn;cmu.edu;usc.edu;kddi.com;usc.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;0;3;0", + "aff_unique_norm": "University of Southern California;Peking University;Carnegie Mellon University;KDDI Research", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.usc.edu;http://www.pku.edu.cn;https://www.cmu.edu;https://www.kddi-research.com", + "aff_unique_abbr": "USC;Peking U;CMU;KDDI", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;1;0;0;2;0", + "aff_country_unique": "United States;China;Japan" + }, + { + "id": "article-26697", + "title": "Evaluating Digital Agriculture Recommendations with Causal Inference", + "track": "aaai special track", + "status": "Technical", + "abstract": "In contrast to the rapid digitalization of several industries, agriculture suffers from low adoption of smart farming tools. Even though recent advancements in AI-driven digital agriculture can offer high-performing predictive functionalities, they lack tangible quantitative evidence on their benefits to the farmers. Field experiments can derive such evidence, but are often costly, time consuming and hence limited in scope and scale of application. To this end, we propose an observational causal inference framework for the empirical evaluation of the impact of digital tools on target farm performance indicators (e.g., yield in this case). This way, we can increase farmers' trust via enhancing the transparency of the digital agriculture market, and in turn accelerate the adoption of technologies that aim to secure farmer income resilience and global agricultural sustainability against a changing climate. As a case study, we designed and implemented a recommendation system for the optimal sowing time of cotton based on numerical weather predictions, which was used by a farmers' cooperative during the growing season of 2021. We then leverage agricultural knowledge, collected yield data, and environmental information to develop a causal graph of the farm system. Using the back-door criterion, we identify the impact of sowing recommendations on the yield and subsequently estimate it using linear regression, matching, inverse propensity score weighting and meta-learners. The results revealed that a field sown according to our recommendations exhibited a statistically significant yield increase that ranged from 12% to 17%, depending on the method. The effect estimates were robust, as indicated by the agreement among the estimation methods and four successful refutation tests. We argue that this approach can be implemented for decision support systems of other fields, extending their evaluation beyond a performance assessment of internal functionalities.", + "primary_area": "ai for social impact", + "author": "Ilias Tsoumas; Georgios Giannarakis; Vasileios Sitokonstantinou; Alkiviadis Koukos; Dimitra Loka; Nikolaos Bartsotas; Charalampos Kontoes; Ioannis Athanasiadis", + "authorids": "", + "aff": "BEYOND Centre, IAASARS, National Observatory of Athens+ Wageningen University & Research; BEYOND Centre, IAASARS, National Observatory of Athens+ Wageningen University & Research; BEYOND Centre, IAASARS, National Observatory of Athens; BEYOND Centre, IAASARS, National Observatory of Athens; Hellenic Agricultural Organization ELGO DIMITRA; BEYOND Centre, IAASARS, National Observatory of Athens; BEYOND Centre, IAASARS, National Observatory of Athens; Wageningen University & Research", + "bibtex": "@article{Tsoumas_Giannarakis_Sitokonstantinou_Koukos_Loka_Bartsotas_Kontoes_Athanasiadis_2023, title={Evaluating Digital Agriculture Recommendations with Causal Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26697}, DOI={10.1609/aaai.v37i12.26697}, abstractNote={In contrast to the rapid digitalization of several industries, agriculture suffers from low adoption of smart farming tools. Even though recent advancements in AI-driven digital agriculture can offer high-performing predictive functionalities, they lack tangible quantitative evidence on their benefits to the farmers. Field experiments can derive such evidence, but are often costly, time consuming and hence limited in scope and scale of application. To this end, we propose an observational causal inference framework for the empirical evaluation of the impact of digital tools on target farm performance indicators (e.g., yield in this case). This way, we can increase farmers\u2019 trust via enhancing the transparency of the digital agriculture market, and in turn accelerate the adoption of technologies that aim to secure farmer income resilience and global agricultural sustainability against a changing climate. As a case study, we designed and implemented a recommendation system for the optimal sowing time of cotton based on numerical weather predictions, which was used by a farmers\u2019 cooperative during the growing season of 2021. We then leverage agricultural knowledge, collected yield data, and environmental information to develop a causal graph of the farm system. Using the back-door criterion, we identify the impact of sowing recommendations on the yield and subsequently estimate it using linear regression, matching, inverse propensity score weighting and meta-learners. The results revealed that a field sown according to our recommendations exhibited a statistically significant yield increase that ranged from 12% to 17%, depending on the method. The effect estimates were robust, as indicated by the agreement among the estimation methods and four successful refutation tests. We argue that this approach can be implemented for decision support systems of other fields, extending their evaluation beyond a performance assessment of internal functionalities.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tsoumas, Ilias and Giannarakis, Georgios and Sitokonstantinou, Vasileios and Koukos, Alkiviadis and Loka, Dimitra and Bartsotas, Nikolaos and Kontoes, Charalampos and Athanasiadis, Ioannis}, year={2023}, month={Jun.}, pages={14514-14522} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26697/26469", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26697", + "pdf_size": 755475, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10482708909087198292&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "noa.gr;noa.gr;noa.gr;noa.gr;elgo.gr;noa.gr;noa.gr;wur.nl", + "email": "noa.gr;noa.gr;noa.gr;noa.gr;elgo.gr;noa.gr;noa.gr;wur.nl", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0;0;2;0;0;1", + "aff_unique_norm": "National Observatory of Athens;Wageningen University & Research;Hellenic Agricultural Organization", + "aff_unique_dep": "BEYOND Centre, IAASARS;;", + "aff_unique_url": "http://www.noa.gr/;https://www.wur.nl;https://www.elgo.gr", + "aff_unique_abbr": ";WUR;ELGO DIMITRA", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0;0;0;0;0;1", + "aff_country_unique": "Greece;Netherlands" + }, + { + "id": "article-25778", + "title": "Evaluating Epistemic Logic Programs via Answer Set Programming with Quantifiers", + "track": "main", + "status": "Technical", + "abstract": "In this paper we introduce a simple way to evaluate epistemic logic programs by means of answer set programming with quantifiers, a recently proposed extension of answer set programming. The method can easily be adapted for most of the many semantics that were proposed for epistemic logic programs. We evaluate the proposed transformation on existing benchmarks using a recently proposed solver for answer set programming with quantifiers, which relies on QBF solvers.", + "primary_area": "knowledge representation and reasoning", + "author": "Wolfgang Faber; Michael Morak", + "authorids": "", + "aff": "University of Klagenfurt, Austria; University of Klagenfurt, Austria", + "bibtex": "@article{Faber_Morak_2023, title={Evaluating Epistemic Logic Programs via Answer Set Programming with Quantifiers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25778}, DOI={10.1609/aaai.v37i5.25778}, abstractNote={In this paper we introduce a simple way to evaluate epistemic logic programs by means of answer set programming with quantifiers, a recently proposed extension of answer set programming. The method can easily be adapted for most of the many semantics that were proposed for epistemic logic programs. We evaluate the proposed transformation on existing benchmarks using a recently proposed solver for answer set programming with quantifiers, which relies on QBF solvers.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Faber, Wolfgang and Morak, Michael}, year={2023}, month={Jun.}, pages={6322-6329} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25778/25550", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25778", + "pdf_size": 137617, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17841071383592001774&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "aau.at;aau.at", + "email": "aau.at;aau.at", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Klagenfurt", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uni-klagenfurt.at", + "aff_unique_abbr": "Uni Klagenfurt", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Austria" + }, + { + "id": "article-27011", + "title": "Evaluating Factors Influencing COVID-19 Outcomes across Countries Using Decision Trees (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "While humanity prepares for a post-pandemic world and a return to normality through worldwide vaccination campaigns, each country experienced different levels of impact based on natural, political, regulatory, and socio-economic factors. To prepare for a possible future with COVID-19 and similar outbreaks, it is imperative to understand how each of these factors impacted spread and mortality. We train and tune two decision tree regression models to predict COVID-related cases and deaths using a multitude of features. Our findings suggest that, at the country-level, GDP per capita and comorbidity mortality rate are best predictors for both outcomes. Furthermore, latitude and smoking prevalence are also significantly related to COVID-related spread and mortality.", + "primary_area": "", + "author": "Aniruddha Pokhrel; Nikesh Subedi; Saurav Keshari Aryal", + "authorids": "", + "aff": "Howard University; Howard University; Howard University", + "bibtex": "@article{Pokhrel_Subedi_Aryal_2024, title={Evaluating Factors Influencing COVID-19 Outcomes across Countries Using Decision Trees (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27011}, DOI={10.1609/aaai.v37i13.27011}, abstractNote={While humanity prepares for a post-pandemic world and a return to normality through worldwide vaccination campaigns, each country experienced different levels of impact based on natural, political, regulatory, and socio-economic factors. To prepare for a possible future with COVID-19 and similar outbreaks, it is imperative to understand how each of these factors impacted spread and mortality. We train and tune two decision tree regression models to predict COVID-related cases and deaths using a multitude of features. Our findings suggest that, at the country-level, GDP per capita and comorbidity mortality rate are best predictors for both outcomes. Furthermore, latitude and smoking prevalence are also significantly related to COVID-related spread and mortality.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pokhrel, Aniruddha and Subedi, Nikesh and Aryal, Saurav Keshari}, year={2024}, month={Jul.}, pages={16302-16303} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27011/26783", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27011", + "pdf_size": 59786, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:XbMm8Fybq2UJ:scholar.google.com/&scioq=Evaluating+Factors+Influencing+COVID-19+Outcomes+across+Countries+Using+Decision+Trees+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "bison.howard.edu;bison.howard.edu;howard.edu", + "email": "bison.howard.edu;bison.howard.edu;howard.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Howard University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.howard.edu", + "aff_unique_abbr": "HU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26786", + "title": "Evaluating Model-Free Reinforcement Learning toward Safety-Critical Tasks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Safety comes first in many real-world applications involving autonomous agents. Despite a large number of reinforcement learning (RL) methods focusing on safety-critical tasks, there is still a lack of high-quality evaluation of those algorithms that adheres to safety constraints at each decision step under complex and unknown dynamics. In this paper, we revisit prior work in this scope from the perspective of state-wise safe RL and categorize them as projection-based, recovery-based, and optimization-based approaches, respectively. Furthermore, we propose Unrolling Safety Layer (USL), a joint method that combines safety optimization and safety projection. This novel technique explicitly enforces hard constraints via the deep unrolling architecture and enjoys structural advantages in navigating the trade-off between reward improvement and constraint satisfaction. To facilitate further research in this area, we reproduce related algorithms in a unified pipeline and incorporate them into SafeRL-Kit, a toolkit that provides off-the-shelf interfaces and evaluation utilities for safety-critical tasks. We then perform a comparative study of the involved algorithms on six benchmarks ranging from robotic control to autonomous driving. The empirical results provide an insight into their applicability and robustness in learning zero-cost-return policies without task-dependent handcrafting. The project page is available at https://sites.google.com/view/saferlkit.", + "primary_area": "safe and robust ai", + "author": "Linrui Zhang; Qin Zhang; Li Shen; Bo Yuan; Xueqian Wang; Dacheng Tao", + "authorids": "", + "aff": ";;;;;", + "bibtex": "@article{Zhang_Zhang_Shen_Yuan_Wang_Tao_2023, title={Evaluating Model-Free Reinforcement Learning toward Safety-Critical Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26786}, DOI={10.1609/aaai.v37i12.26786}, abstractNote={Safety comes first in many real-world applications involving autonomous agents. Despite a large number of reinforcement learning (RL) methods focusing on safety-critical tasks, there is still a lack of high-quality evaluation of those algorithms that adheres to safety constraints at each decision step under complex and unknown dynamics. In this paper, we revisit prior work in this scope from the perspective of state-wise safe RL and categorize them as projection-based, recovery-based, and optimization-based approaches, respectively. Furthermore, we propose Unrolling Safety Layer (USL), a joint method that combines safety optimization and safety projection. This novel technique explicitly enforces hard constraints via the deep unrolling architecture and enjoys structural advantages in navigating the trade-off between reward improvement and constraint satisfaction. To facilitate further research in this area, we reproduce related algorithms in a unified pipeline and incorporate them into SafeRL-Kit, a toolkit that provides off-the-shelf interfaces and evaluation utilities for safety-critical tasks. We then perform a comparative study of the involved algorithms on six benchmarks ranging from robotic control to autonomous driving. The empirical results provide an insight into their applicability and robustness in learning zero-cost-return policies without task-dependent handcrafting. The project page is available at https://sites.google.com/view/saferlkit.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Linrui and Zhang, Qin and Shen, Li and Yuan, Bo and Wang, Xueqian and Tao, Dacheng}, year={2023}, month={Jun.}, pages={15313-15321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26786/26558", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26786", + "pdf_size": 673881, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10895496788290167255&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "https://sites.google.com/view/saferlkit", + "author_num": 6 + }, + { + "id": "article-26986", + "title": "Evaluating Robustness of Vision Transformers on Imbalanced Datasets (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Data in the real world is commonly imbalanced across classes. Training neural networks on imbalanced datasets often leads to poor performance on rare classes. Existing work in this area has primarily focused on Convolution Neural Networks (CNN), which are increasingly being replaced by Self-Attention-based Vision Transformers (ViT). Fundamentally, ViTs differ from CNNs in that they offer the flexibility in learning the appropriate inductive bias conducive to improving performance. This work is among the first to evaluate the performance of ViTs under class imbalance. We find that accuracy degradation in the presence of class imbalance is much more prominent in ViTs compared to CNNs. This degradation can be partially mitigated through loss reweighting - a popular strategy that increases the loss contributed by rare classes. We investigate the impact of loss reweighting on different components of a ViT, namely, the patch embedding, self-attention backbone, and linear classifier. Our ongoing investigations reveal that loss reweighting impacts mostly the linear classifier and self-attention backbone while having a small and negligible effect on the embedding layer.", + "primary_area": "", + "author": "Kevin Li; Rahul Duggal; Duen Horng Chau", + "authorids": "", + "aff": "Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology", + "bibtex": "@article{Li_Duggal_Chau_2024, title={Evaluating Robustness of Vision Transformers on Imbalanced Datasets (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26986}, DOI={10.1609/aaai.v37i13.26986}, abstractNote={Data in the real world is commonly imbalanced across classes. Training neural networks on imbalanced datasets often leads to poor performance on rare classes. Existing work in this area has primarily focused on Convolution Neural Networks (CNN), which are increasingly being replaced by Self-Attention-based Vision Transformers (ViT). Fundamentally, ViTs differ from CNNs in that they offer the flexibility in learning the appropriate inductive bias conducive to improving performance. This work is among the first to evaluate the performance of ViTs under class imbalance. We find that accuracy degradation in the presence of class imbalance is much more prominent in ViTs compared to CNNs. This degradation can be partially mitigated through loss reweighting - a popular strategy that increases the loss contributed by rare classes. We investigate the impact of loss reweighting on different components of a ViT, namely, the patch embedding, self-attention backbone, and linear classifier. Our ongoing investigations reveal that loss reweighting impacts mostly the linear classifier and self-attention backbone while having a small and negligible effect on the embedding layer.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Kevin and Duggal, Rahul and Chau, Duen Horng}, year={2024}, month={Jul.}, pages={16252-16253} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26986/26758", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26986", + "pdf_size": 64935, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15915218935628862148&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gatech.edu;gatech.edu;gatech.edu", + "email": "gatech.edu;gatech.edu;gatech.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25746", + "title": "Evaluating and Improving Interactions with Hazy Oracles", + "track": "main", + "status": "Technical", + "abstract": "Many AI systems integrate sensor inputs, world knowledge, and human-provided information to perform inference. While such systems often treat the human input as flawless, humans are better thought of as hazy oracles whose input may be ambiguous or outside of the AI system's understanding. In such situations it makes sense for the AI system to defer its inference while it disambiguates the human-provided information by, for example, asking the human to rephrase the query. Though this approach has been considered in the past, current work is typically limited to application-specific methods and non-standardized human experiments. We instead introduce and formalize a general notion of deferred inference. Using this formulation, we then propose a novel evaluation centered around the Deferred Error Volume (DEV) metric, which explicitly considers the tradeoff between error reduction and the additional human effort required to achieve it. We demonstrate this new formalization and an innovative deferred inference method on the disparate tasks of Single-Target Video Object Tracking and Referring Expression Comprehension, ultimately reducing error by up to 48% without any change to the underlying model or its parameters.", + "primary_area": "humans and ai", + "author": "Stephan J. Lemmer; Jason J. Corso", + "authorids": "", + "aff": "University of Michigan, Ann Arbor, Michigan, USA; University of Michigan, Ann Arbor, Michigan, USA", + "bibtex": "@article{Lemmer_Corso_2023, title={Evaluating and Improving Interactions with Hazy Oracles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25746}, DOI={10.1609/aaai.v37i5.25746}, abstractNote={Many AI systems integrate sensor inputs, world knowledge, and human-provided information to perform inference. While such systems often treat the human input as flawless, humans are better thought of as hazy oracles whose input may be ambiguous or outside of the AI system\u2019s understanding. In such situations it makes sense for the AI system to defer its inference while it disambiguates the human-provided information by, for example, asking the human to rephrase the query. Though this approach has been considered in the past, current work is typically limited to application-specific methods and non-standardized human experiments. We instead introduce and formalize a general notion of deferred inference. Using this formulation, we then propose a novel evaluation centered around the Deferred Error Volume (DEV) metric, which explicitly considers the tradeoff between error reduction and the additional human effort required to achieve it. We demonstrate this new formalization and an innovative deferred inference method on the disparate tasks of Single-Target Video Object Tracking and Referring Expression Comprehension, ultimately reducing error by up to 48% without any change to the underlying model or its parameters.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lemmer, Stephan J. and Corso, Jason J.}, year={2023}, month={Jun.}, pages={6039-6047} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25746/25518", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25746", + "pdf_size": 3357872, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18158967038226172021&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "umich.edu;umich.edu", + "email": "umich.edu;umich.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Ann Arbor", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26643", + "title": "Event Process Typing via Hierarchical Optimal Transport", + "track": "main", + "status": "Technical", + "abstract": "Understanding intention behind event processes in texts is important to many applications. One challenging task in this line is event process typing, which aims to tag the process with one action label and one object label describing the overall action of the process and object the process likely affects respectively. To tackle this task, existing methods mainly rely on the matching of the event process level and label level representation, which ignores two important characteristics: Process Hierarchy and Label Hierarchy. In this paper, we propose a Hierarchical Optimal Transport (HOT) method to address the above problem. Specifically, we first explicitly extract the process hierarchy and label hierarchy. Then the HOT optimally matches the two types of hierarchy. Experimental results show that our model outperforms the baseline models, illustrating the effectiveness of our model.", + "primary_area": "speech natural language processing", + "author": "Bo Zhou; Yubo Chen; Kang Liu; Jun Zhao", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences + National Laboratory of Pattern Recognition, CASIA; School of Artificial Intelligence, University of Chinese Academy of Sciences + National Laboratory of Pattern Recognition, CASIA; School of Artificial Intelligence, University of Chinese Academy of Sciences + National Laboratory of Pattern Recognition, CASIA + Beijing Academy of Artificial Intelligence; School of Artificial Intelligence, University of Chinese Academy of Sciences + National Laboratory of Pattern Recognition, CASIA", + "bibtex": "@article{Zhou_Chen_Liu_Zhao_2023, title={Event Process Typing via Hierarchical Optimal Transport}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26643}, DOI={10.1609/aaai.v37i11.26643}, abstractNote={Understanding intention behind event processes in texts is important to many applications. One challenging task in this line is event process typing, which aims to tag the process with one action label and one object label describing the overall action of the process and object the process likely affects respectively. To tackle this task, existing methods mainly rely on the matching of the event process level and label level representation, which ignores two important characteristics: Process Hierarchy and Label Hierarchy. In this paper, we propose a Hierarchical Optimal Transport (HOT) method to address the above problem. Specifically, we first explicitly extract the process hierarchy and label hierarchy. Then the HOT optimally matches the two types of hierarchy. Experimental results show that our model outperforms the baseline models, illustrating the effectiveness of our model.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Bo and Chen, Yubo and Liu, Kang and Zhao, Jun}, year={2023}, month={Jun.}, pages={14038-14046} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26643/26415", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26643", + "pdf_size": 412859, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10518114898810451334&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1+2;0+1", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences, Institute of Automation;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "School of Artificial Intelligence;National Laboratory of Pattern Recognition;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ia.cas.cn;https://www.baaic.cn", + "aff_unique_abbr": "UCAS;CASIA;BAAI", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26698", + "title": "Everyone\u2019s Voice Matters: Quantifying Annotation Disagreement Using Demographic Information", + "track": "aaai special track", + "status": "Technical", + "abstract": "In NLP annotation, it is common to have multiple annotators label the text and then obtain the ground truth labels based on major annotators\u2019 agreement. However, annotators are individuals with different backgrounds and various voices. When annotation tasks become subjective, such as detecting politeness, offense, and social norms, annotators\u2019 voices differ and vary. Their diverse voices may represent the true distribution of people\u2019s opinions on subjective matters. Therefore, it is crucial to study the disagreement from annotation to understand which content is controversial from the annotators. In our research, we extract disagreement labels from five subjective datasets, then fine-tune language models to predict annotators\u2019 disagreement. Our results show that knowing annotators\u2019 demographic information (e.g., gender, ethnicity, education level), in addition to the task text, helps predict the disagreement. To investigate the effect of annotators\u2019 demographics on their disagreement level, we simulate different combinations of their artificial demographics and explore the variance of the prediction to distinguish the disagreement from the inherent controversy from text content and the disagreement in the annotators\u2019 perspective. Overall, we propose an innovative disagreement prediction mechanism for better design of the annotation process that will achieve more accurate and inclusive results for NLP systems. Our code and dataset are publicly available.", + "primary_area": "ai for social impact", + "author": "Ruyuan Wan; Jaehyung Kim; Dongyeop Kang", + "authorids": "", + "aff": "University of Notre Dame; KAIST; University of Minnesota", + "bibtex": "@article{Wan_Kim_Kang_2023, title={Everyone\u2019s Voice Matters: Quantifying Annotation Disagreement Using Demographic Information}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26698}, DOI={10.1609/aaai.v37i12.26698}, abstractNote={In NLP annotation, it is common to have multiple annotators label the text and then obtain the ground truth labels based on major annotators\u2019 agreement. However, annotators are individuals with different backgrounds and various voices. When annotation tasks become subjective, such as detecting politeness, offense, and social norms, annotators\u2019 voices differ and vary. Their diverse voices may represent the true distribution of people\u2019s opinions on subjective matters. Therefore, it is crucial to study the disagreement from annotation to understand which content is controversial from the annotators. In our research, we extract disagreement labels from five subjective datasets, then fine-tune language models to predict annotators\u2019 disagreement. Our results show that knowing annotators\u2019 demographic information (e.g., gender, ethnicity, education level), in addition to the task text, helps predict the disagreement. To investigate the effect of annotators\u2019 demographics on their disagreement level, we simulate different combinations of their artificial demographics and explore the variance of the prediction to distinguish the disagreement from the inherent controversy from text content and the disagreement in the annotators\u2019 perspective. Overall, we propose an innovative disagreement prediction mechanism for better design of the annotation process that will achieve more accurate and inclusive results for NLP systems. Our code and dataset are publicly available.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Ruyuan and Kim, Jaehyung and Kang, Dongyeop}, year={2023}, month={Jun.}, pages={14523-14530} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26698/26470", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26698", + "pdf_size": 364382, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3263319260569156131&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "nd.edu;kaist.ac.kr;umn.edu", + "email": "nd.edu;kaist.ac.kr;umn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Notre Dame;Korea Advanced Institute of Science and Technology;University of Minnesota", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nd.edu;https://www.kaist.ac.kr;https://www.minnesota.edu", + "aff_unique_abbr": "Notre Dame;KAIST;UMN", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "article-26125", + "title": "Evidential Conditional Neural Processes", + "track": "main", + "status": "Technical", + "abstract": "The Conditional Neural Process (CNP) family of models offer a promising direction to tackle few-shot problems by achieving better scalability and competitive predictive performance. However, the current CNP models only capture the overall uncertainty for the prediction made on a target data point. They lack a systematic fine-grained quantification on the distinct sources of uncertainty that are essential for model training and decision-making under the few-shot setting. We propose Evidential Conditional Neural Processes (ECNP), which replace the standard Gaussian distribution used by CNP with a much richer hierarchical Bayesian structure through evidential learning to achieve epistemic-aleatoric uncertainty decomposition. The evidential hierarchical structure also leads to a theoretically justified robustness over noisy training tasks. Theoretical analysis on the proposed ECNP establishes the relationship with CNP while offering deeper insights on the roles of the evidential parameters. Extensive experiments conducted on both synthetic and real-world data demonstrate the effectiveness of our proposed model in various few-shot settings.", + "primary_area": "machine learning iii", + "author": "Deep Shankar Pandey; Qi Yu", + "authorids": "", + "aff": "Rochester Institute of Technology; Rochester Institute of Technology", + "bibtex": "@article{Pandey_Yu_2023, title={Evidential Conditional Neural Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26125}, DOI={10.1609/aaai.v37i8.26125}, abstractNote={The Conditional Neural Process (CNP) family of models offer a promising direction to tackle few-shot problems by achieving better scalability and competitive predictive performance. However, the current CNP models only capture the overall uncertainty for the prediction made on a target data point. They lack a systematic fine-grained quantification on the distinct sources of uncertainty that are essential for model training and decision-making under the few-shot setting. We propose Evidential Conditional Neural Processes (ECNP), which replace the standard Gaussian distribution used by CNP with a much richer hierarchical Bayesian structure through evidential learning to achieve epistemic-aleatoric uncertainty decomposition. The evidential hierarchical structure also leads to a theoretically justified robustness over noisy training tasks. Theoretical analysis on the proposed ECNP establishes the relationship with CNP while offering deeper insights on the roles of the evidential parameters. Extensive experiments conducted on both synthetic and real-world data demonstrate the effectiveness of our proposed model in various few-shot settings.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pandey, Deep Shankar and Yu, Qi}, year={2023}, month={Jun.}, pages={9389-9397} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26125/25897", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26125", + "pdf_size": 1562615, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12928496243827689429&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "rit.edu;rit.edu", + "email": "rit.edu;rit.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26134", + "title": "Experimental Observations of the Topology of Convolutional Neural Network Activations", + "track": "main", + "status": "Technical", + "abstract": "Topological data analysis (TDA) is a branch of computational mathematics, bridging algebraic topology and data science, that provides compact, noise-robust representations of complex structures. Deep neural networks (DNNs) learn millions of parameters associated with a series of transformations defined by the model architecture resulting in high-dimensional, difficult to interpret internal representations of input data. As DNNs become more ubiquitous across multiple sectors of our society, there is increasing recognition that mathematical methods are needed to aid analysts, researchers, and practitioners in understanding and interpreting how these models' internal representations relate to the final classification. In this paper we apply cutting edge techniques from TDA with the goal of gaining insight towards interpretability of convolutional neural networks used for image classification. We use two common TDA approaches to explore several methods for modeling hidden layer activations as high-dimensional point clouds, and provide experimental evidence that these point clouds capture valuable structural information about the model's process. First, we demonstrate that a distance metric based on persistent homology can be used to quantify meaningful differences between layers and discuss these distances in the broader context of existing representational similarity metrics for neural network interpretability. Second, we show that a mapper graph can provide semantic insight as to how these models organize hierarchical class knowledge at each layer. These observations demonstrate that TDA is a useful tool to help deep learning practitioners unlock the hidden structures of their models.", + "primary_area": "machine learning iii", + "author": "Emilie Purvine; Davis Brown; Brett Jefferson; Cliff Joslyn; Brenda Praggastis; Archit Rathore; Madelyn Shapiro; Bei Wang; Youjia Zhou", + "authorids": "", + "aff": "Pacific Northwest National Laboratory; Pacific Northwest National Laboratory; Pacific Northwest National Laboratory; Pacific Northwest National Laboratory; Pacific Northwest National Laboratory; Scientific Computing and Imaging (SCI) Institute and School of Computing, University of Utah; Pacific Northwest National Laboratory; Scientific Computing and Imaging (SCI) Institute and School of Computing, University of Utah; Scientific Computing and Imaging (SCI) Institute and School of Computing, University of Utah", + "bibtex": "@article{Purvine_Brown_Jefferson_Joslyn_Praggastis_Rathore_Shapiro_Wang_Zhou_2023, title={Experimental Observations of the Topology of Convolutional Neural Network Activations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26134}, DOI={10.1609/aaai.v37i8.26134}, abstractNote={Topological data analysis (TDA) is a branch of computational mathematics, bridging algebraic topology and data science, that provides compact, noise-robust representations of complex structures. Deep neural networks (DNNs) learn millions of parameters associated with a series of transformations defined by the model architecture resulting in high-dimensional, difficult to interpret internal representations of input data. As DNNs become more ubiquitous across multiple sectors of our society, there is increasing recognition that mathematical methods are needed to aid analysts, researchers, and practitioners in understanding and interpreting how these models\u2019 internal representations relate to the final classification. In this paper we apply cutting edge techniques from TDA with the goal of gaining insight towards interpretability of convolutional neural networks used for image classification. We use two common TDA approaches to explore several methods for modeling hidden layer activations as high-dimensional point clouds, and provide experimental evidence that these point clouds capture valuable structural information about the model\u2019s process. First, we demonstrate that a distance metric based on persistent homology can be used to quantify meaningful differences between layers and discuss these distances in the broader context of existing representational similarity metrics for neural network interpretability. Second, we show that a mapper graph can provide semantic insight as to how these models organize hierarchical class knowledge at each layer. These observations demonstrate that TDA is a useful tool to help deep learning practitioners unlock the hidden structures of their models.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Purvine, Emilie and Brown, Davis and Jefferson, Brett and Joslyn, Cliff and Praggastis, Brenda and Rathore, Archit and Shapiro, Madelyn and Wang, Bei and Zhou, Youjia}, year={2023}, month={Jun.}, pages={9470-9479} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26134/25906", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26134", + "pdf_size": 13076176, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12416522971381062821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "pnnl.gov;pnnl.gov;pnnl.gov;pnnl.gov;pnnl.gov;gmail.com;pnnl.gov;sci.utah.edu;sci.utah.edu", + "email": "pnnl.gov;pnnl.gov;pnnl.gov;pnnl.gov;pnnl.gov;gmail.com;pnnl.gov;sci.utah.edu;sci.utah.edu", + "github": "", + "project": "https://arxiv.org/", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;0;1;1", + "aff_unique_norm": "Pacific Northwest National Laboratory;University of Utah", + "aff_unique_dep": ";Scientific Computing and Imaging (SCI) Institute, School of Computing", + "aff_unique_url": "https://www.pnnl.gov;https://www.utah.edu", + "aff_unique_abbr": "PNNL;Utah", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Salt Lake City", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26970", + "title": "Expert Data Augmentation in Imitation Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Behavioral Cloning (BC) is a simple and effective imitation learning algorithm, which suffers from compounding error due to covariate shift. One solution is to use enough data for training. However, the amount of expert demonstrations available is usually limited. So we propose an effective method to augment expert demonstrations to alleviate the problem of compounding error in BC. It operates by estimating the similarity of states and filtering out transitions that can go back to the states similar to ones in expert demonstrations during the process of sampling. The data filtered out along with original expert demonstrations are used for training. We evaluate the performance of our method on several Atari tasks and continuous MuJoCo control tasks. Empirically, BC trained with the augmented data significantly outperform BC trained with the original expert demonstrations.", + "primary_area": "", + "author": "Fuguang Han; Zongzhang Zhang", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Han_Zhang_2024, title={Expert Data Augmentation in Imitation Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26970}, DOI={10.1609/aaai.v37i13.26970}, abstractNote={Behavioral Cloning (BC) is a simple and effective imitation learning algorithm, which suffers from compounding error due to covariate shift. One solution is to use enough data for training. However, the amount of expert demonstrations available is usually limited. So we propose an effective method to augment expert demonstrations to alleviate the problem of compounding error in BC. It operates by estimating the similarity of states and filtering out transitions that can go back to the states similar to ones in expert demonstrations during the process of sampling. The data filtered out along with original expert demonstrations are used for training. We evaluate the performance of our method on several Atari tasks and continuous MuJoCo control tasks. Empirically, BC trained with the augmented data significantly outperform BC trained with the original expert demonstrations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Fuguang and Zhang, Zongzhang}, year={2024}, month={Jul.}, pages={16220-16221} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26970/26742", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26970", + "pdf_size": 166506, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3374378842490414741&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26526", + "title": "Explaining (Sarcastic) Utterances to Enhance Affect Understanding in Multimodal Dialogues", + "track": "main", + "status": "Technical", + "abstract": "Conversations emerge as the primary media for exchanging ideas and conceptions. From the listener\u2019s perspective, identifying various affective qualities, such as sarcasm, humour, and emotions, is paramount for comprehending the true connotation of the emitted utterance. However, one of the major hurdles faced in learning these affect dimensions is the presence of figurative language, viz. irony, metaphor, or sarcasm. We hypothesize that any detection system constituting the exhaustive and explicit presentation of the emitted utterance would improve the overall comprehension of the dialogue. To this end, we explore the task of Sarcasm Explanation in Dialogues, which aims to unfold the hidden irony behind sarcastic utterances. We propose MOSES, a deep neural network which takes a multimodal (sarcastic) dialogue instance as an input and generates a natural language sentence as its explanation. Subsequently, we leverage the generated explanation for various natural language understanding tasks in a conversational dialogue setup, such as sarcasm detection, humour identification, and emotion recognition. Our evaluation shows that MOSES outperforms the state-of-the-art system for SED by an average of \u223c2% on different evaluation metrics, such as ROUGE, BLEU, and METEOR. Further, we observe that leveraging the generated explanation advances three downstream tasks for affect classification \u2013 an average improvement of ~14% F1-score in the sarcasm detection task and \u223c2% in the humour identification and emotion recognition task. We also perform extensive analyses to assess the quality of the results.", + "primary_area": "speech natural language processing", + "author": "Shivani Kumar; Ishani Mondal; Md Shad Akhtar; Tanmoy Chakraborty", + "authorids": "", + "aff": "Indraprastha Institute of Information Technology Delhi, India; University of Maryland, College Park; Indraprastha Institute of Information Technology Delhi, India; Indian Institute of Technology Delhi, India", + "bibtex": "@article{Kumar_Mondal_Akhtar_Chakraborty_2023, title={Explaining (Sarcastic) Utterances to Enhance Affect Understanding in Multimodal Dialogues}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26526}, DOI={10.1609/aaai.v37i11.26526}, abstractNote={Conversations emerge as the primary media for exchanging ideas and conceptions. From the listener\u2019s perspective, identifying various affective qualities, such as sarcasm, humour, and emotions, is paramount for comprehending the true connotation of the emitted utterance. However, one of the major hurdles faced in learning these affect dimensions is the presence of figurative language, viz. irony, metaphor, or sarcasm. We hypothesize that any detection system constituting the exhaustive and explicit presentation of the emitted utterance would improve the overall comprehension of the dialogue. To this end, we explore the task of Sarcasm Explanation in Dialogues, which aims to unfold the hidden irony behind sarcastic utterances. We propose MOSES, a deep neural network which takes a multimodal (sarcastic) dialogue instance as an input and generates a natural language sentence as its explanation. Subsequently, we leverage the generated explanation for various natural language understanding tasks in a conversational dialogue setup, such as sarcasm detection, humour identification, and emotion recognition. Our evaluation shows that MOSES outperforms the state-of-the-art system for SED by an average of \u223c2% on different evaluation metrics, such as ROUGE, BLEU, and METEOR. Further, we observe that leveraging the generated explanation advances three downstream tasks for affect classification \u2013 an average improvement of ~14% F1-score in the sarcasm detection task and \u223c2% in the humour identification and emotion recognition task. We also perform extensive analyses to assess the quality of the results.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Shivani and Mondal, Ishani and Akhtar, Md Shad and Chakraborty, Tanmoy}, year={2023}, month={Jun.}, pages={12986-12994} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26526/26298", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26526", + "pdf_size": 275781, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1704699699916487077&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 12, + "aff_domain": "iiitd.ac.in;gmail.com;iiitd.ac.in;iitd.ac.in", + "email": "iiitd.ac.in;gmail.com;iiitd.ac.in;iitd.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Indraprastha Institute of Information Technology;University of Maryland;Indian Institute of Technology Delhi", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.iiitd.ac.in;https://www/umd.edu;https://www.iitdelhi.ac.in", + "aff_unique_abbr": "IIIT Delhi;UMD;IIT Delhi", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Delhi;College Park", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-27014", + "title": "Explaining Large Language Model-Based Neural Semantic Parsers (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "While large language models (LLMs) have demonstrated strong capability in structured prediction tasks such as semantic parsing, few amounts of research have explored the underlying mechanisms of their success. Our work studies different methods for explaining an LLM-based semantic parser and qualitatively discusses the explained model behaviors, hoping to inspire future research toward better understanding them.", + "primary_area": "", + "author": "Daking Rai; Yilun Zhou; Bailin Wang; Ziyu Yao", + "authorids": "", + "aff": "George Mason University; Massachusetts Institute of Technology; Massachusetts Institute of Technology; George Mason University", + "bibtex": "@article{Rai_Zhou_Wang_Yao_2024, title={Explaining Large Language Model-Based Neural Semantic Parsers (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27014}, DOI={10.1609/aaai.v37i13.27014}, abstractNote={While large language models (LLMs) have demonstrated strong capability in structured prediction tasks such as semantic parsing, few amounts of research have explored the underlying mechanisms of their success. Our work studies different methods for explaining an LLM-based semantic parser and qualitatively discusses the explained model behaviors, hoping to inspire future research toward better understanding them.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rai, Daking and Zhou, Yilun and Wang, Bailin and Yao, Ziyu}, year={2024}, month={Jul.}, pages={16308-16309} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27014/26786", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27014", + "pdf_size": 725687, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10754727383475472200&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmu.edu;mit.edu;mit.edu;gmu.edu", + "email": "gmu.edu;mit.edu;mit.edu;gmu.edu", + "github": "https://github.com/HKUNLP/UnifiedSKG", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "George Mason University;Massachusetts Institute of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.gmu.edu;https://web.mit.edu", + "aff_unique_abbr": "GMU;MIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26399", + "title": "Explaining Model Confidence Using Counterfactuals", + "track": "main", + "status": "Technical", + "abstract": "Displaying confidence scores in human-AI interaction has been shown to help build trust between humans and AI systems. However, most existing research uses only the confidence score as a form of communication. As confidence scores are just another model output, users may want to understand why the algorithm is confident to determine whether to accept the confidence score. In this paper, we show that counterfactual explanations of confidence scores help study participants to better understand and better trust a machine learning model's prediction. We present two methods for understanding model confidence using counterfactual explanation: (1) based on counterfactual examples; and (2) based on visualisation of the counterfactual space. Both increase understanding and trust for study participants over a baseline of no explanation, but qualitative results show that they are used quite differently, leading to recommendations of when to use each one and directions of designing better explanations.", + "primary_area": "philosophy and ethics of ai", + "author": "Thao Le; Tim Miller; Ronal Singh; Liz Sonenberg", + "authorids": "", + "aff": "School of Computing and Information Systems, The University of Melbourne; School of Computing and Information Systems, The University of Melbourne; School of Computing and Information Systems, The University of Melbourne; School of Computing and Information Systems, The University of Melbourne", + "bibtex": "@article{Le_Miller_Singh_Sonenberg_2023, title={Explaining Model Confidence Using Counterfactuals}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26399}, DOI={10.1609/aaai.v37i10.26399}, abstractNote={Displaying confidence scores in human-AI interaction has been shown to help build trust between humans and AI systems. However, most existing research uses only the confidence score as a form of communication. As confidence scores are just another model output, users may want to understand why the algorithm is confident to determine whether to accept the confidence score. In this paper, we show that counterfactual explanations of confidence scores help study participants to better understand and better trust a machine learning model\u2019s prediction. We present two methods for understanding model confidence using counterfactual explanation: (1) based on counterfactual examples; and (2) based on visualisation of the counterfactual space. Both increase understanding and trust for study participants over a baseline of no explanation, but qualitative results show that they are used quite differently, leading to recommendations of when to use each one and directions of designing better explanations.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Le, Thao and Miller, Tim and Singh, Ronal and Sonenberg, Liz}, year={2023}, month={Jun.}, pages={11856-11864} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26399/26171", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26399", + "pdf_size": 289029, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9597589304006965574&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The University of Melbourne", + "aff_unique_dep": "School of Computing and Information Systems", + "aff_unique_url": "https://www.unimelb.edu.au", + "aff_unique_abbr": "UniMelb", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Melbourne", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26132", + "title": "Explaining Random Forests Using Bipolar Argumentation and Markov Networks", + "track": "main", + "status": "Technical", + "abstract": "Random forests are decision tree ensembles that can be used \nto solve a variety of machine learning problems. However, as\nthe number of trees and their individual size can be large,\ntheir decision making process is often incomprehensible.\nWe show that their decision process can be naturally represented \nas an argumentation problem, which allows creating global explanations \nvia argumentative reasoning. We generalize sufficient and necessary \nargumentative explanations using a Markov network encoding, discuss \nthe relevance of these explanations and establish relationships to\nfamilies of abductive explanations from the literature. As the complexity \nof the explanation problems is high, we present an efficient approximation algorithm with probabilistic approximation guarantees.", + "primary_area": "machine learning iii", + "author": "Nico Potyka; Xiang Yin; Francesca Toni", + "authorids": "", + "aff": "Department of Computing, Imperial College London, London, UK; Department of Computing, Imperial College London, London, UK; Department of Computing, Imperial College London, London, UK", + "bibtex": "@article{Potyka_Yin_Toni_2023, title={Explaining Random Forests Using Bipolar Argumentation and Markov Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26132}, DOI={10.1609/aaai.v37i8.26132}, abstractNote={Random forests are decision tree ensembles that can be used to solve a variety of machine learning problems. However, as\nthe number of trees and their individual size can be large,\ntheir decision making process is often incomprehensible.\nWe show that their decision process can be naturally represented as an argumentation problem, which allows creating global explanations via argumentative reasoning. We generalize sufficient and necessary argumentative explanations using a Markov network encoding, discuss the relevance of these explanations and establish relationships to\nfamilies of abductive explanations from the literature. As the complexity of the explanation problems is high, we present an efficient approximation algorithm with probabilistic approximation guarantees.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Potyka, Nico and Yin, Xiang and Toni, Francesca}, year={2023}, month={Jun.}, pages={9453-9460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26132/25904", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26132", + "pdf_size": 192133, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5822289180328550959&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "imperial.ac.uk;imperial.ac.uk;imperial.ac.uk", + "email": "imperial.ac.uk;imperial.ac.uk;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Imperial College London", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.imperial.ac.uk", + "aff_unique_abbr": "Imperial", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26920", + "title": "Explaining the Uncertainty in AI-Assisted Decision Making", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "The aim of this project is to improve human decision-making using explainability; specifically, how to explain the (un)certainty of machine learning models. Prior research has used uncertainty measures to promote trust and decision-making. However, the direction of explaining why the AI prediction is confident (or not confident) in its prediction needs to be addressed. By explaining the model uncertainty, we can promote trust, improve understanding and improve decision-making for users.", + "primary_area": "", + "author": "Thao Le", + "authorids": "", + "aff": "School of Computing and Information Systems, The University of Melbourne", + "bibtex": "@article{Le_2024, title={Explaining the Uncertainty in AI-Assisted Decision Making}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26920}, DOI={10.1609/aaai.v37i13.26920}, abstractNote={The aim of this project is to improve human decision-making using explainability; specifically, how to explain the (un)certainty of machine learning models. Prior research has used uncertainty measures to promote trust and decision-making. However, the direction of explaining why the AI prediction is confident (or not confident) in its prediction needs to be addressed. By explaining the model uncertainty, we can promote trust, improve understanding and improve decision-making for users.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Le, Thao}, year={2024}, month={Jul.}, pages={16119-16120} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26920/26692", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26920", + "pdf_size": 55153, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8559925239588522922&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 3, + "aff_domain": "student.unimelb.edu.au", + "email": "student.unimelb.edu.au", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "The University of Melbourne", + "aff_unique_dep": "School of Computing and Information Systems", + "aff_unique_url": "https://www.unimelb.edu.au", + "aff_unique_abbr": "UniMelb", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Melbourne", + "aff_country_unique_index": "0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25098", + "title": "Explicit Invariant Feature Induced Cross-Domain Crowd Counting", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain crowd counting has shown progressively improved performance. However, most methods fail to explicitly consider the transferability of different features between source and target domains. In this paper, we propose an innovative explicit Invariant Feature induced Cross-domain Knowledge Transformation framework to address the inconsistent domain-invariant features of different domains. The main idea is to explicitly extract domain-invariant features from both source and target domains, which builds a bridge to transfer more rich knowledge between two domains. The framework consists of three parts, global feature decoupling (GFD), relation exploration and alignment (REA), and graph-guided knowledge enhancement (GKE). In the GFD module, domain-invariant features are efficiently decoupled from domain-specific ones in two domains, which allows the model to distinguish crowds features from backgrounds in the complex scenes. In the REA module both inter-domain relation graph (Inter-RG) and intra-domain relation graph (Intra-RG) are built. Specifically, Inter-RG aggregates multi-scale domain-invariant features between two domains and further aligns local-level invariant features. Intra-RG preserves taskrelated specific information to assist the domain alignment. Furthermore, GKE strategy models the confidence of pseudolabels to further enhance the adaptability of the target domain. Various experiments show our method achieves state-of-theart performance on the standard benchmarks. Code is available at https://github.com/caiyiqing/IF-CKT.", + "primary_area": "computer vision i", + "author": "Yiqing Cai; Lianggangxu Chen; Haoyue Guan; Shaohui Lin; Changhong Lu; Changbo Wang; Gaoqi He", + "authorids": "", + "aff": "School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; Johns Hopkins University, Mason Hall, USA; School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Mathematical Sciences, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; Innovation Center for AI and Drug Discovery, East China Normal University, Shanghai, China + Chongqing Key Laboratory of Precision Optics, Chongqing Institute of East China Normal University, Chongqing, China", + "bibtex": "@article{Cai_Chen_Guan_Lin_Lu_Wang_He_2023, title={Explicit Invariant Feature Induced Cross-Domain Crowd Counting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25098}, DOI={10.1609/aaai.v37i1.25098}, abstractNote={Cross-domain crowd counting has shown progressively improved performance. However, most methods fail to explicitly consider the transferability of different features between source and target domains. In this paper, we propose an innovative explicit Invariant Feature induced Cross-domain Knowledge Transformation framework to address the inconsistent domain-invariant features of different domains. The main idea is to explicitly extract domain-invariant features from both source and target domains, which builds a bridge to transfer more rich knowledge between two domains. The framework consists of three parts, global feature decoupling (GFD), relation exploration and alignment (REA), and graph-guided knowledge enhancement (GKE). In the GFD module, domain-invariant features are efficiently decoupled from domain-specific ones in two domains, which allows the model to distinguish crowds features from backgrounds in the complex scenes. In the REA module both inter-domain relation graph (Inter-RG) and intra-domain relation graph (Intra-RG) are built. Specifically, Inter-RG aggregates multi-scale domain-invariant features between two domains and further aligns local-level invariant features. Intra-RG preserves taskrelated specific information to assist the domain alignment. Furthermore, GKE strategy models the confidence of pseudolabels to further enhance the adaptability of the target domain. Various experiments show our method achieves state-of-theart performance on the standard benchmarks. Code is available at https://github.com/caiyiqing/IF-CKT.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Yiqing and Chen, Lianggangxu and Guan, Haoyue and Lin, Shaohui and Lu, Changhong and Wang, Changbo and He, Gaoqi}, year={2023}, month={Jun.}, pages={259-267} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25098/24870", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25098", + "pdf_size": 1647920, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12734524377974752323&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;163.com;cs.ecnu.edu.cn;math.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;163.com;cs.ecnu.edu.cn;math.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "https://github.com/caiyiqing/IF-CKT", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;0+0", + "aff_unique_norm": "East China Normal University;Johns Hopkins University", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.jhu.edu", + "aff_unique_abbr": "ECNU;JHU", + "aff_campus_unique_index": "0;0;1;0;0;0;0+2", + "aff_campus_unique": "Shanghai;Mason Hall;Chongqing", + "aff_country_unique_index": "0;0;1;0;0;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25140", + "title": "Exploit Domain-Robust Optical Flow in Domain Adaptive Video Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Domain adaptive semantic segmentation aims to exploit the pixel-level annotated samples on source domain to assist the segmentation of unlabeled samples on target domain. For such a task, the key is to construct reliable supervision signals on target domain. However, existing methods can only provide unreliable supervision signals constructed by segmentation model (SegNet) that are generally domain-sensitive. In this work, we try to find a domain-robust clue to construct more reliable supervision signals. Particularly, we experimentally observe the domain-robustness of optical flow in video tasks as it mainly represents the motion characteristics of scenes. However, optical flow cannot be directly used as supervision signals of semantic segmentation since both of them essentially represent different information. To tackle this issue, we first propose a novel Segmentation-to-Flow Module (SFM) that converts semantic segmentation maps to optical flows, named the segmentation-based flow (SF), and then propose a Segmentation-based Flow Consistency (SFC) method to impose consistency between SF and optical flow, which can implicitly supervise the training of segmentation model. The extensive experiments on two challenging benchmarks demonstrate the effectiveness of our method, and it outperforms previous state-of-the-art methods with considerable performance improvement. Our code is available at https://github.com/EdenHazardan/SFC.", + "primary_area": "computer vision i", + "author": "Yuan Gao; Zilei Wang; Jiafan Zhuang; Yixin Zhang; Junjie Li", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China; Shantou University; University of Science and Technology of China+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center; University of Science and Technology of China", + "bibtex": "@article{Gao_Wang_Zhuang_Zhang_Li_2023, title={Exploit Domain-Robust Optical Flow in Domain Adaptive Video Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25140}, DOI={10.1609/aaai.v37i1.25140}, abstractNote={Domain adaptive semantic segmentation aims to exploit the pixel-level annotated samples on source domain to assist the segmentation of unlabeled samples on target domain. For such a task, the key is to construct reliable supervision signals on target domain. However, existing methods can only provide unreliable supervision signals constructed by segmentation model (SegNet) that are generally domain-sensitive. In this work, we try to find a domain-robust clue to construct more reliable supervision signals. Particularly, we experimentally observe the domain-robustness of optical flow in video tasks as it mainly represents the motion characteristics of scenes. However, optical flow cannot be directly used as supervision signals of semantic segmentation since both of them essentially represent different information. To tackle this issue, we first propose a novel Segmentation-to-Flow Module (SFM) that converts semantic segmentation maps to optical flows, named the segmentation-based flow (SF), and then propose a Segmentation-based Flow Consistency (SFC) method to impose consistency between SF and optical flow, which can implicitly supervise the training of segmentation model. The extensive experiments on two challenging benchmarks demonstrate the effectiveness of our method, and it outperforms previous state-of-the-art methods with considerable performance improvement. Our code is available at https://github.com/EdenHazardan/SFC.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Yuan and Wang, Zilei and Zhuang, Jiafan and Zhang, Yixin and Li, Junjie}, year={2023}, month={Jun.}, pages={641-649} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25140/24912", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25140", + "pdf_size": 676981, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13189212537057447154&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;stu.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn;stu.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn", + "github": "https://github.com/EdenHazardan/SFC", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0+2;0", + "aff_unique_norm": "University of Science and Technology of China;Shantou University;Hefei Comprehensive National Science Center", + "aff_unique_dep": ";;Institute of Artificial Intelligence", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.stu.edu.cn;http://www.hfcn.edu.cn", + "aff_unique_abbr": "USTC;STU;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27054", + "title": "Exploiting High-Order Interaction Relations to Explore User Intent (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "This paper studies the problem of exploring the user intent for session-based recommendations. Its challenges come from the uncertainty of user behavior and limited information. However, current endeavors cannot fully explore the mutual interactions among sessions and do not explicitly model the complex high-order relations among items. To circumvent these critical issues, we innovatively propose a HyperGraph Convolutional Contrastive framework (termed HGCC) that consists of two crucial tasks: 1) The session-based recommendation (SBR task) that aims to capture the beyond pair-wise relationships between items and sessions. 2) The self-supervised learning (SSL task) acted as the auxiliary task to boost the former task. By jointly optimizing the two tasks, the performance of the recommendation task achieves decent gains. Experiments on multiple real-world datasets demonstrate the superiority of the proposed approach over the state-of-the-art methods.", + "primary_area": "", + "author": "Xiangping Zheng; Xun Liang; Bo Wu", + "authorids": "", + "aff": "School of Information, Renmin University of China, Beijing, China 100872; School of Information, Renmin University of China, Beijing, China 100872; School of Information, Renmin University of China, Beijing, China 100872", + "bibtex": "@article{Zheng_Liang_Wu_2024, title={Exploiting High-Order Interaction Relations to Explore User Intent (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27054}, DOI={10.1609/aaai.v37i13.27054}, abstractNote={This paper studies the problem of exploring the user intent for session-based recommendations. Its challenges come from the uncertainty of user behavior and limited information. However, current endeavors cannot fully explore the mutual interactions among sessions and do not explicitly model the complex high-order relations among items. To circumvent these critical issues, we innovatively propose a HyperGraph Convolutional Contrastive framework (termed HGCC) that consists of two crucial tasks: 1) The session-based recommendation (SBR task) that aims to capture the beyond pair-wise relationships between items and sessions. 2) The self-supervised learning (SSL task) acted as the auxiliary task to boost the former task. By jointly optimizing the two tasks, the performance of the recommendation task achieves decent gains. Experiments on multiple real-world datasets demonstrate the superiority of the proposed approach over the state-of-the-art methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Xiangping and Liang, Xun and Wu, Bo}, year={2024}, month={Jul.}, pages={16388-16389} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27054/26826", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27054", + "pdf_size": 71579, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14419055308005288875&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "School of Information", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25881", + "title": "Exploiting Multiple Abstractions in Episodic RL via Reward Shaping", + "track": "main", + "status": "Technical", + "abstract": "One major limitation to the applicability of Reinforcement Learning (RL) to many practical domains is the large number of samples required to learn an optimal policy. To address this problem and improve learning efficiency, we consider a linear hierarchy of abstraction layers of the Markov Decision Process (MDP) underlying the target domain. Each layer is an MDP representing a coarser model of the one immediately below in the hierarchy. In this work, we propose a novel form of Reward Shaping where the solution obtained at the abstract level is used to offer rewards to the more concrete MDP, in such a way that the abstract solution guides the learning in the more complex domain. In contrast with other works in Hierarchical RL, our technique has few requirements in the design of the abstract models and it is also tolerant to modeling errors, thus making the proposed approach practical. We formally analyze the relationship between the abstract models and the exploration heuristic induced in the lower-level domain. Moreover, we prove that the method guarantees optimal convergence and we demonstrate its effectiveness experimentally.", + "primary_area": "machine learning i", + "author": "Roberto Cipollone; Giuseppe De Giacomo; Marco Favorito; Luca Iocchi; Fabio Patrizi", + "authorids": "", + "aff": "DIAG, Universit `a degli Studi di Roma \u201cLa Sapienza\u201d, Italy; DIAG, Universit `a degli Studi di Roma \u201cLa Sapienza\u201d, Italy + Department of Computer Science, University of Oxford, U.K.; Banca d\u2019Italia, Italy; DIAG, Universit `a degli Studi di Roma \u201cLa Sapienza\u201d, Italy; DIAG, Universit `a degli Studi di Roma \u201cLa Sapienza\u201d, Italy", + "bibtex": "@article{Cipollone_De Giacomo_Favorito_Iocchi_Patrizi_2023, title={Exploiting Multiple Abstractions in Episodic RL via Reward Shaping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25881}, DOI={10.1609/aaai.v37i6.25881}, abstractNote={One major limitation to the applicability of Reinforcement Learning (RL) to many practical domains is the large number of samples required to learn an optimal policy. To address this problem and improve learning efficiency, we consider a linear hierarchy of abstraction layers of the Markov Decision Process (MDP) underlying the target domain. Each layer is an MDP representing a coarser model of the one immediately below in the hierarchy. In this work, we propose a novel form of Reward Shaping where the solution obtained at the abstract level is used to offer rewards to the more concrete MDP, in such a way that the abstract solution guides the learning in the more complex domain. In contrast with other works in Hierarchical RL, our technique has few requirements in the design of the abstract models and it is also tolerant to modeling errors, thus making the proposed approach practical. We formally analyze the relationship between the abstract models and the exploration heuristic induced in the lower-level domain. Moreover, we prove that the method guarantees optimal convergence and we demonstrate its effectiveness experimentally.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cipollone, Roberto and De Giacomo, Giuseppe and Favorito, Marco and Iocchi, Luca and Patrizi, Fabio}, year={2023}, month={Jun.}, pages={7227-7234} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25881/25653", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25881", + "pdf_size": 381874, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4698040965254617865&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "diag.uniroma1.it;diag.uniroma1.it;bancaditalia.it;diag.uniroma1.it;diag.uniroma1.it", + "email": "diag.uniroma1.it;diag.uniroma1.it;bancaditalia.it;diag.uniroma1.it;diag.uniroma1.it", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;0;0", + "aff_unique_norm": "Universit\u00e0 degli Studi di Roma \u201cLa Sapienza\u201d;University of Oxford;Banca d'Italia", + "aff_unique_dep": "DIAG;Department of Computer Science;", + "aff_unique_url": "https://www.uniroma1.it;https://www.ox.ac.uk;https://www.bancaditalia.it", + "aff_unique_abbr": ";Oxford;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0;0;0", + "aff_country_unique": "Italy;United Kingdom" + }, + { + "id": "article-27032", + "title": "Exploration on Physics-Informed Neural Networks on Partial Differential Equations (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Data-driven related solutions are dominating various scientific fields with the assistance of machine learning and data analytics. Finding effective solutions has long been discussed in the area of machine learning. The recent decade has witnessed the promising performance of the Physics-Informed Neural Networks (PINN) in bridging the gap between real-world scientific problems and machine learning models. In this paper, we explore the behavior of PINN in a particular range of different diffusion coefficients under specific boundary conditions. In addition, different initial conditions of partial differential equations are solved by applying the proposed PINN. Our paper illustrates how the effectiveness of the PINN can change under various scenarios. As a result, we demonstrate a better insight into the behaviors of the PINN and how to make the proposed method more robust while encountering different scientific and engineering problems.", + "primary_area": "", + "author": "Hoa Ta; Shi Wen Wong; Nathan McClanahan; Jung-Han Kimn; Kaiqun Fu", + "authorids": "", + "aff": "University of California of Irvine; South Dakota State University; South Dakota State University; South Dakota State University; South Dakota State University", + "bibtex": "@article{Ta_Wong_McClanahan_Kimn_Fu_2024, title={Exploration on Physics-Informed Neural Networks on Partial Differential Equations (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27032}, DOI={10.1609/aaai.v37i13.27032}, abstractNote={Data-driven related solutions are dominating various scientific fields with the assistance of machine learning and data analytics. Finding effective solutions has long been discussed in the area of machine learning. The recent decade has witnessed the promising performance of the Physics-Informed Neural Networks (PINN) in bridging the gap between real-world scientific problems and machine learning models. In this paper, we explore the behavior of PINN in a particular range of different diffusion coefficients under specific boundary conditions. In addition, different initial conditions of partial differential equations are solved by applying the proposed PINN. Our paper illustrates how the effectiveness of the PINN can change under various scenarios. As a result, we demonstrate a better insight into the behaviors of the PINN and how to make the proposed method more robust while encountering different scientific and engineering problems.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ta, Hoa and Wong, Shi Wen and McClanahan, Nathan and Kimn, Jung-Han and Fu, Kaiqun}, year={2024}, month={Jul.}, pages={16344-16345} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27032/26804", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27032", + "pdf_size": 713947, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6877541943894720408&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "uci.edu;sdstate.edu;sdstate.edu;sdstate.edu;sdstate.edu", + "email": "uci.edu;sdstate.edu;sdstate.edu;sdstate.edu;sdstate.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "University of California, Irvine;South Dakota State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uci.edu;https://www.sdsu.edu", + "aff_unique_abbr": "UCI;SDSU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26164", + "title": "Exploration via Epistemic Value Estimation", + "track": "main", + "status": "Technical", + "abstract": "How to efficiently explore in reinforcement learning is an open problem. Many exploration algorithms employ the epistemic uncertainty of their own value predictions -- for instance to compute an exploration bonus or upper confidence bound. Unfortunately the required uncertainty is difficult to estimate in general with function approximation.\n\nWe propose epistemic value estimation (EVE): a recipe that is compatible with sequential decision making and with neural network function approximators. It equips agents with a tractable posterior over all their parameters from which epistemic value uncertainty can be computed efficiently.\n\nWe use the recipe to derive an epistemic Q-Learning agent and observe competitive performance on a series of benchmarks. Experiments confirm that the EVE recipe facilitates efficient exploration in hard exploration tasks.", + "primary_area": "machine learning iii", + "author": "Simon Schmitt; John Shawe-Taylor; Hado van Hasselt", + "authorids": "", + "aff": "DeepMind+University College London, UK; University College London, UK; DeepMind", + "bibtex": "@article{Schmitt_Shawe-Taylor_van Hasselt_2023, title={Exploration via Epistemic Value Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26164}, DOI={10.1609/aaai.v37i8.26164}, abstractNote={How to efficiently explore in reinforcement learning is an open problem. Many exploration algorithms employ the epistemic uncertainty of their own value predictions -- for instance to compute an exploration bonus or upper confidence bound. Unfortunately the required uncertainty is difficult to estimate in general with function approximation. We propose epistemic value estimation (EVE): a recipe that is compatible with sequential decision making and with neural network function approximators. It equips agents with a tractable posterior over all their parameters from which epistemic value uncertainty can be computed efficiently. We use the recipe to derive an epistemic Q-Learning agent and observe competitive performance on a series of benchmarks. Experiments confirm that the EVE recipe facilitates efficient exploration in hard exploration tasks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Schmitt, Simon and Shawe-Taylor, John and van Hasselt, Hado}, year={2023}, month={Jun.}, pages={9742-9751} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26164/25936", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26164", + "pdf_size": 620410, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8783063999405268540&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "google.com; ; ", + "email": "google.com; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "DeepMind;University College London", + "aff_unique_dep": ";", + "aff_unique_url": "https://deepmind.com;https://www.ucl.ac.uk", + "aff_unique_abbr": "DeepMind;UCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25488", + "title": "Exploratory Inference Learning for Scribble Supervised Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Scribble supervised semantic segmentation has achieved great advances in pseudo label exploitation, yet suffers insufficient label exploration for the mass of unannotated regions. In this work, we propose a novel exploratory inference learning (EIL) framework, which facilitates efficient probing on unlabeled pixels and promotes selecting confident candidates for boosting the evolved segmentation. The exploration of unannotated regions is formulated as an iterative decision-making process, where a policy searcher learns to infer in the unknown space and the reward to the exploratory policy is based on a contrastive measurement of candidates. In particular, we devise the contrastive reward with the intra-class attraction and the inter-class repulsion in the feature space w.r.t the pseudo labels. The unlabeled exploration and the labeled exploitation are jointly balanced to improve the segmentation, and framed in a close-looping end-to-end network. Comprehensive evaluations on the benchmark datasets (PASCAL VOC 2012 and PASCAL Context) demonstrate the superiority of our proposed EIL when compared with other state-of-the-art methods for the scribble-supervised semantic segmentation problem.", + "primary_area": "computer vision iii", + "author": "Chuanwei Zhou; Zhen Cui; Chunyan Xu; Cao Han; Jian Yang", + "authorids": "", + "aff": "PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Zhou_Cui_Xu_Han_Yang_2023, title={Exploratory Inference Learning for Scribble Supervised Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25488}, DOI={10.1609/aaai.v37i3.25488}, abstractNote={Scribble supervised semantic segmentation has achieved great advances in pseudo label exploitation, yet suffers insufficient label exploration for the mass of unannotated regions. In this work, we propose a novel exploratory inference learning (EIL) framework, which facilitates efficient probing on unlabeled pixels and promotes selecting confident candidates for boosting the evolved segmentation. The exploration of unannotated regions is formulated as an iterative decision-making process, where a policy searcher learns to infer in the unknown space and the reward to the exploratory policy is based on a contrastive measurement of candidates. In particular, we devise the contrastive reward with the intra-class attraction and the inter-class repulsion in the feature space w.r.t the pseudo labels. The unlabeled exploration and the labeled exploitation are jointly balanced to improve the segmentation, and framed in a close-looping end-to-end network. Comprehensive evaluations on the benchmark datasets (PASCAL VOC 2012 and PASCAL Context) demonstrate the superiority of our proposed EIL when compared with other state-of-the-art methods for the scribble-supervised semantic segmentation problem.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Chuanwei and Cui, Zhen and Xu, Chunyan and Han, Cao and Yang, Jian}, year={2023}, month={Jun.}, pages={3760-3768} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25488/25260", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25488", + "pdf_size": 473540, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1537660506440079224&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Nanjing University of Science and Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.nust.edu.cn", + "aff_unique_abbr": "NJUST", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26899", + "title": "Exploring Artificial Intelligence in English Language Arts with StoryQ", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Exploring Artificial Intelligence (AI) in English Language Arts (ELA) with StoryQ is a 10-hour curriculum module designed for high school ELA classes. The module introduces students to fundamental AI concepts and essential machine learning workflow using StoryQ, a web-based GUI environment for Grades 6-12 learners. In this module, students work with unstructured text data and learn to train, test, and improve text classification models such as intent recognition, clickbait filter, and sentiment analysis. As they interact with machine-learning language models deeply, students also gain a nuanced understanding of language and how to wield it, not just as a data structure, but as a tool in our human-human encounters as well. The current version contains eight lessons, all delivered through a full-featured online learning and teaching platform. Computers and Internet access are required to implement the module. The module was piloted in an ELA class in the Spring of 2022, and the student learning outcomes were positive. The module is currently undergoing revision and will be further tested and improved in Fall 2022.", + "primary_area": "", + "author": "Jie Chao; Rebecca Ellis; Shiyan Jiang; Carolyn Ros\u00e9; William Finzer; Cansu Tatar; James Fiacco; Kenia Wiedemann", + "authorids": "", + "aff": "Concord Consortium; Concord Consortium; North Carolina State University; Carnegie Mellon University; Concord Consortium; North Carolina State University; Carnegie Mellon University; Concord Consortium", + "bibtex": "@article{Chao_Ellis_Jiang_Ros\u00e9_Finzer_Tatar_Fiacco_Wiedemann_2024, title={Exploring Artificial Intelligence in English Language Arts with StoryQ}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26899}, DOI={10.1609/aaai.v37i13.26899}, abstractNote={Exploring Artificial Intelligence (AI) in English Language Arts (ELA) with StoryQ is a 10-hour curriculum module designed for high school ELA classes. The module introduces students to fundamental AI concepts and essential machine learning workflow using StoryQ, a web-based GUI environment for Grades 6-12 learners. In this module, students work with unstructured text data and learn to train, test, and improve text classification models such as intent recognition, clickbait filter, and sentiment analysis. As they interact with machine-learning language models deeply, students also gain a nuanced understanding of language and how to wield it, not just as a data structure, but as a tool in our human-human encounters as well. The current version contains eight lessons, all delivered through a full-featured online learning and teaching platform. Computers and Internet access are required to implement the module. The module was piloted in an ELA class in the Spring of 2022, and the student learning outcomes were positive. The module is currently undergoing revision and will be further tested and improved in Fall 2022.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chao, Jie and Ellis, Rebecca and Jiang, Shiyan and Ros\u00e9, Carolyn and Finzer, William and Tatar, Cansu and Fiacco, James and Wiedemann, Kenia}, year={2024}, month={Jul.}, pages={15999-16003} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26899/26671", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26899", + "pdf_size": 486073, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6048745347663554433&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "concord.org;concord.org;ncsu.edu;andrew.cmu.edu;concord.org;ncsu.edu;cs.cmu.edu;concord.org", + "email": "concord.org;concord.org;ncsu.edu;andrew.cmu.edu;concord.org;ncsu.edu;cs.cmu.edu;concord.org", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;0;1;2;0", + "aff_unique_norm": "Concord Consortium;North Carolina State University;Carnegie Mellon University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://concordconsortium.org;https://www.ncsu.edu;https://www.cmu.edu", + "aff_unique_abbr": ";NCSU;CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25353", + "title": "Exploring CLIP for Assessing the Look and Feel of Images", + "track": "main", + "status": "Technical", + "abstract": "Measuring the perception of visual content is a long-standing problem in computer vision. Many mathematical models have been developed to evaluate the look or quality of an image. Despite the effectiveness of such tools in quantifying degradations such as noise and blurriness levels, such quantification is loosely coupled with human language. When it comes to more abstract perception about the feel of visual content, existing methods can only rely on supervised models that are explicitly trained with labeled data collected via laborious user study. In this paper, we go beyond the conventional paradigms by exploring the rich visual language prior encapsulated in Contrastive Language-Image Pre-training (CLIP) models for assessing both the quality perception (look) and abstract perception (feel) of images without explicit task-specific training. In particular, we discuss effective prompt designs and show an effective prompt pairing strategy to harness the prior. We also provide extensive experiments on controlled datasets and Image Quality Assessment (IQA) benchmarks. Our results show that CLIP captures meaningful priors that generalize well to different perceptual assessments.", + "primary_area": "computer vision ii", + "author": "Jianyi Wang; Kelvin C.K. Chan; Chen Change Loy", + "authorids": "", + "aff": "S-Lab, Nanyang Technological University; S-Lab, Nanyang Technological University; S-Lab, Nanyang Technological University", + "bibtex": "@article{Wang_Chan_Loy_2023, title={Exploring CLIP for Assessing the Look and Feel of Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25353}, DOI={10.1609/aaai.v37i2.25353}, abstractNote={Measuring the perception of visual content is a long-standing problem in computer vision. Many mathematical models have been developed to evaluate the look or quality of an image. Despite the effectiveness of such tools in quantifying degradations such as noise and blurriness levels, such quantification is loosely coupled with human language. When it comes to more abstract perception about the feel of visual content, existing methods can only rely on supervised models that are explicitly trained with labeled data collected via laborious user study. In this paper, we go beyond the conventional paradigms by exploring the rich visual language prior encapsulated in Contrastive Language-Image Pre-training (CLIP) models for assessing both the quality perception (look) and abstract perception (feel) of images without explicit task-specific training. In particular, we discuss effective prompt designs and show an effective prompt pairing strategy to harness the prior. We also provide extensive experiments on controlled datasets and Image Quality Assessment (IQA) benchmarks. Our results show that CLIP captures meaningful priors that generalize well to different perceptual assessments.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Jianyi and Chan, Kelvin C.K. and Loy, Chen Change}, year={2023}, month={Jun.}, pages={2555-2563} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25353/25125", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25353", + "pdf_size": 10501975, + "gs_citation": 538, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10602047150179389907&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "email": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanyang Technological University", + "aff_unique_dep": "S-Lab", + "aff_unique_url": "https://www.ntu.edu.sg", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26591", + "title": "Exploring Faithful Rationale for Multi-Hop Fact Verification via Salience-Aware Graph Learning", + "track": "main", + "status": "Technical", + "abstract": "The opaqueness of the multi-hop fact verification model imposes imperative requirements for explainability. One feasible way is to extract rationales, a subset of inputs, where the performance of prediction drops dramatically when being removed. Though being explainable, most rationale extraction methods for multi-hop fact verification explore the semantic information within each piece of evidence individually, while ignoring the topological information interaction among different pieces of evidence. Intuitively, a faithful rationale bears complementary information being able to extract other rationales through the multi-hop reasoning process. To tackle such disadvantages, we cast explainable multi-hop fact verification as subgraph extraction, which can be solved based on graph convolutional network (GCN) with salience-aware graph learning. In specific, GCN is utilized to incorporate the topological interaction information among multiple pieces of evidence for learning evidence representation. Meanwhile, to alleviate the influence of noisy evidence, the salience-aware graph perturbation is induced into the message passing of GCN. Moreover, the multi-task model with three diagnostic properties of rationale is elaborately designed to improve the quality of an explanation without any explicit annotations. Experimental results on the FEVEROUS benchmark show significant gains over previous state-of-the-art methods for both rationale extraction and fact verification.", + "primary_area": "speech natural language processing", + "author": "Jiasheng Si; Yingjie Zhu; Deyu Zhou", + "authorids": "", + "aff": "School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China", + "bibtex": "@article{Si_Zhu_Zhou_2023, title={Exploring Faithful Rationale for Multi-Hop Fact Verification via Salience-Aware Graph Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26591}, DOI={10.1609/aaai.v37i11.26591}, abstractNote={The opaqueness of the multi-hop fact verification model imposes imperative requirements for explainability. One feasible way is to extract rationales, a subset of inputs, where the performance of prediction drops dramatically when being removed. Though being explainable, most rationale extraction methods for multi-hop fact verification explore the semantic information within each piece of evidence individually, while ignoring the topological information interaction among different pieces of evidence. Intuitively, a faithful rationale bears complementary information being able to extract other rationales through the multi-hop reasoning process. To tackle such disadvantages, we cast explainable multi-hop fact verification as subgraph extraction, which can be solved based on graph convolutional network (GCN) with salience-aware graph learning. In specific, GCN is utilized to incorporate the topological interaction information among multiple pieces of evidence for learning evidence representation. Meanwhile, to alleviate the influence of noisy evidence, the salience-aware graph perturbation is induced into the message passing of GCN. Moreover, the multi-task model with three diagnostic properties of rationale is elaborately designed to improve the quality of an explanation without any explicit annotations. Experimental results on the FEVEROUS benchmark show significant gains over previous state-of-the-art methods for both rationale extraction and fact verification.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Si, Jiasheng and Zhu, Yingjie and Zhou, Deyu}, year={2023}, month={Jun.}, pages={13573-13581} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26591/26363", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26591", + "pdf_size": 463385, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10740185126001377133&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26973", + "title": "Exploring Hypergraph of Earnings Call for Risk Prediction (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In financial economics, studies have shown that the textual content in the earnings conference call transcript has predictive power for a firm's future risk. However, the conference call transcript is very long and contains diverse non-relevant content, which poses challenges for the text-based risk forecast. This study investigates the structural dependency within a conference call transcript by explicitly modeling the dialogue between managers and analysts. Specifically, we utilize TextRank to extract information and exploit the semantic correlation within a discussion using hypergraph learning. This novel design can improve the transcript representation performance and reduce the risk of forecast errors. Experimental results on a large-scale dataset show that our approach can significantly improve prediction performance compared to state-of-the-art text-based models.", + "primary_area": "", + "author": "Yi He; Wenxin Tai; Fan Zhou; Yi Yang", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China+Kashi Institute of Electronics and Information Industry; The Hong Kong University of Science and Technology", + "bibtex": "@article{He_Tai_Zhou_Yang_2024, title={Exploring Hypergraph of Earnings Call for Risk Prediction (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26973}, DOI={10.1609/aaai.v37i13.26973}, abstractNote={In financial economics, studies have shown that the textual content in the earnings conference call transcript has predictive power for a firm\u2019s future risk. However, the conference call transcript is very long and contains diverse non-relevant content, which poses challenges for the text-based risk forecast. This study investigates the structural dependency within a conference call transcript by explicitly modeling the dialogue between managers and analysts. Specifically, we utilize TextRank to extract information and exploit the semantic correlation within a discussion using hypergraph learning. This novel design can improve the transcript representation performance and reduce the risk of forecast errors. Experimental results on a large-scale dataset show that our approach can significantly improve prediction performance compared to state-of-the-art text-based models.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Yi and Tai, Wenxin and Zhou, Fan and Yang, Yi}, year={2024}, month={Jul.}, pages={16226-16227} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26973/26745", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26973", + "pdf_size": 168127, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1382973155399298572&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "foxmail.com;gmail.com;uestc.edu.cn;ust.hk", + "email": "foxmail.com;gmail.com;uestc.edu.cn;ust.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;2", + "aff_unique_norm": "University of Electronic Science and Technology of China;Kashi Institute of Electronics and Information Industry;Hong Kong University of Science and Technology", + "aff_unique_dep": ";Electronics and Information Industry;", + "aff_unique_url": "https://www.uestc.edu.cn;;https://www.ust.hk", + "aff_unique_abbr": "UESTC;;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25377", + "title": "Exploring Non-target Knowledge for Improving Ensemble Universal Adversarial Attacks", + "track": "main", + "status": "Technical", + "abstract": "The ensemble attack with average weights can be leveraged for increasing the transferability of universal adversarial perturbation (UAP) by training with multiple Convolutional Neural Networks (CNNs). However, after analyzing the Pearson Correlation Coefficients (PCCs) between the ensemble logits and individual logits of the crafted UAP trained by the ensemble attack, we find that one CNN plays a dominant role during the optimization. Consequently, this average weighted strategy will weaken the contributions of other CNNs and thus limit the transferability for other black-box CNNs. To deal with this bias issue, the primary attempt is to leverage the Kullback\u2013Leibler (KL) divergence loss to encourage the joint contribution from different CNNs, which is still insufficient. After decoupling the KL loss into a target-class part and a non-target-class part, the main issue lies in that the non-target knowledge will be significantly suppressed due to the increasing logit of the target class. \nIn this study, we simply adopt a KL loss that only considers the non-target classes for addressing the dominant bias issue. Besides, to further boost the transferability, we incorporate the min-max learning framework to self-adjust the ensemble weights for each CNN. Experiments results validate that considering the non-target KL loss can achieve superior transferability than the original KL loss by a large margin, and the min-max training can provide a mutual benefit in adversarial ensemble attacks.\nThe source code is available at: https://github.com/WJJLL/ND-MM.", + "primary_area": "computer vision iii", + "author": "Juanjuan Weng; Zhiming Luo; Zhun Zhong; Dazhen Lin; Shaozi Li", + "authorids": "", + "aff": "Department of Artificial Intelligence, Xiamen University, China + Fujian Key Laboratory of Big Data Application and Intellectualization for Tea Industry, Wuyi University, China; Department of Artificial Intelligence, Xiamen University, China + Fujian Key Laboratory of Big Data Application and Intellectualization for Tea Industry, Wuyi University, China; Department of Information Engineering and Computer Science, University of Trento, Italy; Department of Artificial Intelligence, Xiamen University, China; Department of Artificial Intelligence, Xiamen University, China", + "bibtex": "@article{Weng_Luo_Zhong_Lin_Li_2023, title={Exploring Non-target Knowledge for Improving Ensemble Universal Adversarial Attacks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25377}, DOI={10.1609/aaai.v37i3.25377}, abstractNote={The ensemble attack with average weights can be leveraged for increasing the transferability of universal adversarial perturbation (UAP) by training with multiple Convolutional Neural Networks (CNNs). However, after analyzing the Pearson Correlation Coefficients (PCCs) between the ensemble logits and individual logits of the crafted UAP trained by the ensemble attack, we find that one CNN plays a dominant role during the optimization. Consequently, this average weighted strategy will weaken the contributions of other CNNs and thus limit the transferability for other black-box CNNs. To deal with this bias issue, the primary attempt is to leverage the Kullback\u2013Leibler (KL) divergence loss to encourage the joint contribution from different CNNs, which is still insufficient. After decoupling the KL loss into a target-class part and a non-target-class part, the main issue lies in that the non-target knowledge will be significantly suppressed due to the increasing logit of the target class. In this study, we simply adopt a KL loss that only considers the non-target classes for addressing the dominant bias issue. Besides, to further boost the transferability, we incorporate the min-max learning framework to self-adjust the ensemble weights for each CNN. Experiments results validate that considering the non-target KL loss can achieve superior transferability than the original KL loss by a large margin, and the min-max training can provide a mutual benefit in adversarial ensemble attacks.\nThe source code is available at: https://github.com/WJJLL/ND-MM.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Weng, Juanjuan and Luo, Zhiming and Zhong, Zhun and Lin, Dazhen and Li, Shaozi}, year={2023}, month={Jun.}, pages={2768-2775} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25377/25149", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25377", + "pdf_size": 418054, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6194449421617989255&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;gmail.com;xmu.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn;gmail.com;xmu.edu.cn;xmu.edu.cn", + "github": "https://github.com/WJJLL/ND-MM", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0;0", + "aff_unique_norm": "Xiamen University;Wuyi University;University of Trento", + "aff_unique_dep": "Department of Artificial Intelligence;Fujian Key Laboratory of Big Data Application and Intellectualization for Tea Industry;Department of Information Engineering and Computer Science", + "aff_unique_url": "https://www.xmu.edu.cn;;https://www.unitn.it", + "aff_unique_abbr": "XMU;;UniTN", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0;0", + "aff_country_unique": "China;Italy" + }, + { + "id": "article-26635", + "title": "Exploring Self-Distillation Based Relational Reasoning Training for Document-Level Relation Extraction", + "track": "main", + "status": "Technical", + "abstract": "Document-level relation extraction (RE) aims to extract relational triples from a document. One of its primary challenges is to predict implicit relations between entities, which are not explicitly expressed in the document but can usually be extracted through relational reasoning. Previous methods mainly implicitly model relational reasoning through the interaction among entities or entity pairs. However, they suffer from two deficiencies: 1) they often consider only one reasoning pattern, of which coverage on relational triples is limited; 2) they do not explicitly model the process of relational reasoning. In this paper, to deal with the first problem, we propose a document-level RE model with a reasoning module that contains a core unit, the reasoning multi-head self-attention unit. This unit is a variant of the conventional multi-head self-attention and utilizes four attention heads to model four common reasoning patterns, respectively, which can cover more relational triples than previous methods. Then, to address the second issue, we propose a self-distillation training framework, which contains two branches sharing parameters. In the first branch, we first randomly mask some entity pair feature vectors in the document, and then train our reasoning module to infer their relations by exploiting the feature information of other related entity pairs. By doing so, we can explicitly model the process of relational reasoning. However, because the additional masking operation is not used during testing, it causes an input gap between training and testing scenarios, which would hurt the model performance. To reduce this gap, we perform conventional supervised training without masking operation in the second branch and utilize Kullback-Leibler divergence loss to minimize the difference between the predictions of the two branches. Finally, we conduct comprehensive experiments on three benchmark datasets, of which experimental results demonstrate that our model consistently outperforms all competitive baselines. Our source code is available at https://github.com/DeepLearnXMU/DocRE-SD", + "primary_area": "speech natural language processing", + "author": "Liang Zhang; Jinsong Su; Zijun Min; Zhongjian Miao; Qingguo Hu; Biao Fu; Xiaodong Shi; Yidong Chen", + "authorids": "", + "aff": "1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China", + "bibtex": "@article{Zhang_Su_Min_Miao_Hu_Fu_Shi_Chen_2023, title={Exploring Self-Distillation Based Relational Reasoning Training for Document-Level Relation Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26635}, DOI={10.1609/aaai.v37i11.26635}, abstractNote={Document-level relation extraction (RE) aims to extract relational triples from a document. One of its primary challenges is to predict implicit relations between entities, which are not explicitly expressed in the document but can usually be extracted through relational reasoning. Previous methods mainly implicitly model relational reasoning through the interaction among entities or entity pairs. However, they suffer from two deficiencies: 1) they often consider only one reasoning pattern, of which coverage on relational triples is limited; 2) they do not explicitly model the process of relational reasoning. In this paper, to deal with the first problem, we propose a document-level RE model with a reasoning module that contains a core unit, the reasoning multi-head self-attention unit. This unit is a variant of the conventional multi-head self-attention and utilizes four attention heads to model four common reasoning patterns, respectively, which can cover more relational triples than previous methods. Then, to address the second issue, we propose a self-distillation training framework, which contains two branches sharing parameters. In the first branch, we first randomly mask some entity pair feature vectors in the document, and then train our reasoning module to infer their relations by exploiting the feature information of other related entity pairs. By doing so, we can explicitly model the process of relational reasoning. However, because the additional masking operation is not used during testing, it causes an input gap between training and testing scenarios, which would hurt the model performance. To reduce this gap, we perform conventional supervised training without masking operation in the second branch and utilize Kullback-Leibler divergence loss to minimize the difference between the predictions of the two branches. Finally, we conduct comprehensive experiments on three benchmark datasets, of which experimental results demonstrate that our model consistently outperforms all competitive baselines. Our source code is available at https://github.com/DeepLearnXMU/DocRE-SD}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Liang and Su, Jinsong and Min, Zijun and Miao, Zhongjian and Hu, Qingguo and Fu, Biao and Shi, Xiaodong and Chen, Yidong}, year={2023}, month={Jun.}, pages={13967-13975} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26635/26407", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26635", + "pdf_size": 600621, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15166389819961308364&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn; ; ; ; ; ;xmu.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn; ; ; ; ; ;xmu.edu.cn", + "github": "https://github.com/DeepLearnXMU/DocRE-SD", + "project": "", + "author_num": 8, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Xiamen University", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.xmu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": ";;;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26879", + "title": "Exploring Social Biases of Large Language Models in a College Artificial Intelligence Course", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "Large neural network-based language models play an increasingly important role in contemporary AI. Although these models demonstrate sophisticated text generation capabilities, they have also been shown to reproduce harmful social biases contained in their training data. This paper presents a project that guides students through an exploration of social biases in large language models.\n\nAs a final project for an intermediate college course in Artificial Intelligence, students developed a bias probe task for a previously-unstudied aspect of sociolinguistic or sociocultural bias they were interested in exploring. Through the process of constructing a dataset and evaluation metric to measure bias, students mastered key technical concepts, including how to run contemporary neural networks for natural language processing tasks; construct datasets and evaluation metrics; and analyze experimental results. Students reported their findings in an in-class presentation and a final report, recounting patterns of predictions that surprised, unsettled, and sparked interest in advocating for technology that reflects a more diverse set of backgrounds and experiences.\n\nThrough this project, students engage with and even contribute to a growing body of scholarly work on social biases in large language models.", + "primary_area": "", + "author": "Skylar Kolisko; Carolyn Jane Anderson", + "authorids": "", + "aff": "Wellesley College; Wellesley College", + "bibtex": "@article{Kolisko_Anderson_2024, title={Exploring Social Biases of Large Language Models in a College Artificial Intelligence Course}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26879}, DOI={10.1609/aaai.v37i13.26879}, abstractNote={Large neural network-based language models play an increasingly important role in contemporary AI. Although these models demonstrate sophisticated text generation capabilities, they have also been shown to reproduce harmful social biases contained in their training data. This paper presents a project that guides students through an exploration of social biases in large language models. As a final project for an intermediate college course in Artificial Intelligence, students developed a bias probe task for a previously-unstudied aspect of sociolinguistic or sociocultural bias they were interested in exploring. Through the process of constructing a dataset and evaluation metric to measure bias, students mastered key technical concepts, including how to run contemporary neural networks for natural language processing tasks; construct datasets and evaluation metrics; and analyze experimental results. Students reported their findings in an in-class presentation and a final report, recounting patterns of predictions that surprised, unsettled, and sparked interest in advocating for technology that reflects a more diverse set of backgrounds and experiences. Through this project, students engage with and even contribute to a growing body of scholarly work on social biases in large language models.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kolisko, Skylar and Anderson, Carolyn Jane}, year={2024}, month={Jul.}, pages={15825-15833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26879/26651", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26879", + "pdf_size": 124908, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15372339930627381380&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff_domain": "wellesley.edu;wellesley.edu", + "email": "wellesley.edu;wellesley.edu", + "github": "https://github.com/Wellesley-EASEL-lab/Exploring-Social-Biases-of-Large-Language-Models", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Wellesley College", + "aff_unique_dep": "", + "aff_unique_url": "https://www.wellesley.edu", + "aff_unique_abbr": "Wellesley", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25300", + "title": "Exploring Stochastic Autoregressive Image Modeling for Visual Representation", + "track": "main", + "status": "Technical", + "abstract": "Autoregressive language modeling (ALM) has been successfully used in self-supervised pre-training in Natural language processing (NLP). However, this paradigm has not achieved comparable results with other self-supervised approaches in computer vision (e.g., contrastive learning, masked image modeling). In this paper, we try to find the reason why autoregressive modeling does not work well on vision tasks. To tackle this problem, we fully analyze the limitation of visual autoregressive methods and proposed a novel stochastic autoregressive image modeling (named SAIM) by the two simple designs. First, we serialize the image into patches. Second, we employ the stochastic permutation strategy to generate an effective and robust image context which is critical for vision tasks. To realize this task, we create a parallel encoder-decoder training process in which the encoder serves a similar role to the standard vision transformer focusing on learning the whole contextual information, and meanwhile the decoder predicts the content of the current position so that the encoder and decoder can reinforce each other. Our method significantly improves the performance of autoregressive image modeling and achieves the best accuracy (83.9%) on the vanilla ViT-Base model among methods using only ImageNet-1K data. Transfer performance in downstream tasks also shows that our model achieves competitive performance. Code is available at https://github.com/qiy20/SAIM.", + "primary_area": "computer vision ii", + "author": "Yu Qi; Fan Yang; Yousong Zhu; Yufei Liu; Liwei Wu; Rui Zhao; Wei Li", + "authorids": "", + "aff": "Tsinghua University; SenseTime Research; Institute of Automation, Chinese Academy of Sciences; Tsinghua University; SenseTime Research; SenseTime Research + Qing Yuan Research Institute, Shanghai Jiao Tong University; SenseTime Research", + "bibtex": "@article{Qi_Yang_Zhu_Liu_Wu_Zhao_Li_2023, title={Exploring Stochastic Autoregressive Image Modeling for Visual Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25300}, DOI={10.1609/aaai.v37i2.25300}, abstractNote={Autoregressive language modeling (ALM) has been successfully used in self-supervised pre-training in Natural language processing (NLP). However, this paradigm has not achieved comparable results with other self-supervised approaches in computer vision (e.g., contrastive learning, masked image modeling). In this paper, we try to find the reason why autoregressive modeling does not work well on vision tasks. To tackle this problem, we fully analyze the limitation of visual autoregressive methods and proposed a novel stochastic autoregressive image modeling (named SAIM) by the two simple designs. First, we serialize the image into patches. Second, we employ the stochastic permutation strategy to generate an effective and robust image context which is critical for vision tasks. To realize this task, we create a parallel encoder-decoder training process in which the encoder serves a similar role to the standard vision transformer focusing on learning the whole contextual information, and meanwhile the decoder predicts the content of the current position so that the encoder and decoder can reinforce each other. Our method significantly improves the performance of autoregressive image modeling and achieves the best accuracy (83.9%) on the vanilla ViT-Base model among methods using only ImageNet-1K data. Transfer performance in downstream tasks also shows that our model achieves competitive performance. Code is available at https://github.com/qiy20/SAIM.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qi, Yu and Yang, Fan and Zhu, Yousong and Liu, Yufei and Wu, Liwei and Zhao, Rui and Li, Wei}, year={2023}, month={Jun.}, pages={2074-2081} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25300/25072", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25300", + "pdf_size": 858898, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13611696646013319973&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;sensetime.com;nlpr.ia.ac.cn;tsinghua.edu.cn;sensetime.com;sensetime.com;sensetime.com", + "email": "mails.tsinghua.edu.cn;sensetime.com;nlpr.ia.ac.cn;tsinghua.edu.cn;sensetime.com;sensetime.com;sensetime.com", + "github": "https://github.com/qiy20/SAIM", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0;1;1+3;1", + "aff_unique_norm": "Tsinghua University;SenseTime;Chinese Academy of Sciences;Shanghai Jiao Tong University", + "aff_unique_dep": ";SenseTime Research;Institute of Automation;Qing Yuan Research Institute", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.sensetime.com;http://www.ia.cas.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "THU;SenseTime;CAS;SJTU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25305", + "title": "Exploring Stroke-Level Modifications for Scene Text Editing", + "track": "main", + "status": "Technical", + "abstract": "Scene text editing (STE) aims to replace text with the desired one while preserving background and styles of the original text. However, due to the complicated background textures and various text styles, existing methods fall short in generating clear and legible edited text images. In this study, we attribute the poor editing performance to two problems: 1) Implicit decoupling structure. Previous methods of editing the whole image have to learn different translation rules of background and text regions simultaneously. 2) Domain gap. Due to the lack of edited real scene text images, the network can only be well trained on synthetic pairs and performs poorly on real-world images. To handle the above problems, we propose a novel network by MOdifying Scene Text image at strokE Level (MOSTEL). Firstly, we generate stroke guidance maps to explicitly indicate regions to be edited. Different from the implicit one by directly modifying all the pixels at image level, such explicit instructions filter out the distractions from background and guide the network to focus on editing rules of text regions. Secondly, we propose a Semi-supervised Hybrid Learning to train the network with both labeled synthetic images and unpaired real scene text images. Thus, the STE model is adapted to real-world datasets distributions. Moreover, two new datasets (Tamper-Syn2k and Tamper-Scene) are proposed to fill the blank of public evaluation datasets. Extensive experiments demonstrate that our MOSTEL outperforms previous methods both qualitatively and quantitatively. Datasets and code will be available at https://github.com/qqqyd/MOSTEL.", + "primary_area": "computer vision ii", + "author": "Yadong Qu; Qingfeng Tan; Hongtao Xie; Jianjun Xu; YuXin Wang; Yongdong Zhang", + "authorids": "", + "aff": "University of Science and Technology of China; Cyberspace Institute of Advanced Technology, GuangZhou University; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Qu_Tan_Xie_Xu_Wang_Zhang_2023, title={Exploring Stroke-Level Modifications for Scene Text Editing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25305}, DOI={10.1609/aaai.v37i2.25305}, abstractNote={Scene text editing (STE) aims to replace text with the desired one while preserving background and styles of the original text. However, due to the complicated background textures and various text styles, existing methods fall short in generating clear and legible edited text images. In this study, we attribute the poor editing performance to two problems: 1) Implicit decoupling structure. Previous methods of editing the whole image have to learn different translation rules of background and text regions simultaneously. 2) Domain gap. Due to the lack of edited real scene text images, the network can only be well trained on synthetic pairs and performs poorly on real-world images. To handle the above problems, we propose a novel network by MOdifying Scene Text image at strokE Level (MOSTEL). Firstly, we generate stroke guidance maps to explicitly indicate regions to be edited. Different from the implicit one by directly modifying all the pixels at image level, such explicit instructions filter out the distractions from background and guide the network to focus on editing rules of text regions. Secondly, we propose a Semi-supervised Hybrid Learning to train the network with both labeled synthetic images and unpaired real scene text images. Thus, the STE model is adapted to real-world datasets distributions. Moreover, two new datasets (Tamper-Syn2k and Tamper-Scene) are proposed to fill the blank of public evaluation datasets. Extensive experiments demonstrate that our MOSTEL outperforms previous methods both qualitatively and quantitatively. Datasets and code will be available at https://github.com/qqqyd/MOSTEL.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qu, Yadong and Tan, Qingfeng and Xie, Hongtao and Xu, Jianjun and Wang, YuXin and Zhang, Yongdong}, year={2023}, month={Jun.}, pages={2119-2127} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25305/25077", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25305", + "pdf_size": 3643567, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8062799303450781472&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;gzhu.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;gzhu.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/qqqyd/MOSTEL", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "University of Science and Technology of China;Guangzhou University", + "aff_unique_dep": ";Cyberspace Institute of Advanced Technology", + "aff_unique_url": "http://www.ustc.edu.cn;http://www.gzhu.edu.cn", + "aff_unique_abbr": "USTC;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26002", + "title": "Exploring Temporal Information Dynamics in Spiking Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Most existing Spiking Neural Network (SNN) works state that SNNs may utilize temporal information dynamics of spikes. However, an explicit analysis of temporal information dynamics is still missing. In this paper, we ask several important questions for providing a fundamental understanding of SNNs: What are temporal information dynamics inside SNNs? How can we measure the temporal information dynamics? How do the temporal information dynamics affect the overall learning performance? To answer these questions, we estimate the Fisher Information of the weights to measure the distribution of temporal information during training in an empirical manner. Surprisingly, as training goes on, Fisher information starts to concentrate in the early timesteps. After training, we observe that information becomes highly concentrated in earlier few timesteps, a phenomenon we refer to as temporal information concentration. We observe that the temporal information concentration phenomenon is a common learning feature of SNNs by conducting extensive experiments on various configurations such as architecture, dataset, optimization strategy, time constant, and timesteps. Furthermore, to reveal how temporal information concentration affects the performance of SNNs, we design a loss function to change the trend of temporal information. We find that temporal information concentration is crucial to building a robust SNN but has little effect on classification accuracy. Finally, we propose an efficient iterative pruning method based on our observation on temporal information concentration. \nCode is available at https://github.com/Intelligent-Computing-Lab-Yale/Exploring-Temporal-Information-Dynamics-in-Spiking-Neural-Networks.", + "primary_area": "machine learning ii", + "author": "Youngeun Kim; Yuhang Li; Hyoungseob Park; Yeshwanth Venkatesha; Anna Hambitzer; Priyadarshini Panda", + "authorids": "", + "aff": "Department of Electrical Engineering, Yale University; Department of Electrical Engineering, Yale University; Department of Electrical Engineering, Yale University; Department of Electrical Engineering, Yale University; Technology Innovation Institute; Department of Electrical Engineering, Yale University", + "bibtex": "@article{Kim_Li_Park_Venkatesha_Hambitzer_Panda_2023, title={Exploring Temporal Information Dynamics in Spiking Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26002}, DOI={10.1609/aaai.v37i7.26002}, abstractNote={Most existing Spiking Neural Network (SNN) works state that SNNs may utilize temporal information dynamics of spikes. However, an explicit analysis of temporal information dynamics is still missing. In this paper, we ask several important questions for providing a fundamental understanding of SNNs: What are temporal information dynamics inside SNNs? How can we measure the temporal information dynamics? How do the temporal information dynamics affect the overall learning performance? To answer these questions, we estimate the Fisher Information of the weights to measure the distribution of temporal information during training in an empirical manner. Surprisingly, as training goes on, Fisher information starts to concentrate in the early timesteps. After training, we observe that information becomes highly concentrated in earlier few timesteps, a phenomenon we refer to as temporal information concentration. We observe that the temporal information concentration phenomenon is a common learning feature of SNNs by conducting extensive experiments on various configurations such as architecture, dataset, optimization strategy, time constant, and timesteps. Furthermore, to reveal how temporal information concentration affects the performance of SNNs, we design a loss function to change the trend of temporal information. We find that temporal information concentration is crucial to building a robust SNN but has little effect on classification accuracy. Finally, we propose an efficient iterative pruning method based on our observation on temporal information concentration. Code is available at https://github.com/Intelligent-Computing-Lab-Yale/Exploring-Temporal-Information-Dynamics-in-Spiking-Neural-Networks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Youngeun and Li, Yuhang and Park, Hyoungseob and Venkatesha, Yeshwanth and Hambitzer, Anna and Panda, Priyadarshini}, year={2023}, month={Jun.}, pages={8308-8316} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26002/25774", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26002", + "pdf_size": 445001, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14768980414352687515&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "yale.edu;yale.edu;yale.edu;yale.edu;tii.ae;yale.edu", + "email": "yale.edu;yale.edu;yale.edu;yale.edu;tii.ae;yale.edu", + "github": "https://github.com/Intelligent-Computing-Lab-Yale/Exploring-Temporal-Information-Dynamics-in-Spiking-Neural-Networks", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Yale University;Technology Innovation Institute", + "aff_unique_dep": "Department of Electrical Engineering;", + "aff_unique_url": "https://www.yale.edu;", + "aff_unique_abbr": "Yale;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "article-26889", + "title": "Exploring Tradeoffs in Automated School Redistricting: Computational and Ethical Perspectives", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "The US public school system is administered by local school districts. Each district comprises a set of schools mapped to attendance zones which are annually assessed to meet enrollment objectives. To support school officials in redrawing attendance boundaries, existing approaches have proven promising but still suffer from several challenges, including: 1) inability to scale to large school districts, 2) high computational cost of obtaining compact school attendance zones, and 3) lack of discussion on quantifying ethical considerations underlying the redrawing of school boundaries. Motivated by these challenges, this paper approaches the school redistricting problem from both computational and ethical standpoints. First, we introduce a practical framework based on sampling methods to solve school redistricting as a graph partitioning problem. Next, the advantages of adopting a modified objective function for optimizing discrete geometry to obtain compact boundaries are examined. Lastly, alternative metrics to address ethical considerations in real-world scenarios are formally defined and thoroughly discussed. Our findings highlight the inclusiveness and efficiency advantages of the designed framework and depict how tradeoffs need to be made to obtain qualitatively different school redistricting plans.", + "primary_area": "", + "author": "Fanglan Chen; Subhodip Biswas; Zhiqian Chen; Shuo Lei; Naren Ramakrishnan; Chang-Tien Lu", + "authorids": "", + "aff": "Department of Computer Science, Virginia Tech; Department of Computer Science, Virginia Tech; Department of Computer Science and Engineering, Mississippi State University; Department of Computer Science, Virginia Tech; Department of Computer Science, Virginia Tech; Department of Computer Science, Virginia Tech", + "bibtex": "@article{Chen_Biswas_Chen_Lei_Ramakrishnan_Lu_2024, title={Exploring Tradeoffs in Automated School Redistricting: Computational and Ethical Perspectives}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26889}, DOI={10.1609/aaai.v37i13.26889}, abstractNote={The US public school system is administered by local school districts. Each district comprises a set of schools mapped to attendance zones which are annually assessed to meet enrollment objectives. To support school officials in redrawing attendance boundaries, existing approaches have proven promising but still suffer from several challenges, including: 1) inability to scale to large school districts, 2) high computational cost of obtaining compact school attendance zones, and 3) lack of discussion on quantifying ethical considerations underlying the redrawing of school boundaries. Motivated by these challenges, this paper approaches the school redistricting problem from both computational and ethical standpoints. First, we introduce a practical framework based on sampling methods to solve school redistricting as a graph partitioning problem. Next, the advantages of adopting a modified objective function for optimizing discrete geometry to obtain compact boundaries are examined. Lastly, alternative metrics to address ethical considerations in real-world scenarios are formally defined and thoroughly discussed. Our findings highlight the inclusiveness and efficiency advantages of the designed framework and depict how tradeoffs need to be made to obtain qualitatively different school redistricting plans.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Fanglan and Biswas, Subhodip and Chen, Zhiqian and Lei, Shuo and Ramakrishnan, Naren and Lu, Chang-Tien}, year={2024}, month={Jul.}, pages={15912-15920} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26889/26661", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26889", + "pdf_size": 3589235, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7660595551216786621&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "vt.edu;vt.edu;cse.msstate.edu;vt.edu;vt.edu;vt.edu", + "email": "vt.edu;vt.edu;cse.msstate.edu;vt.edu;vt.edu;vt.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Virginia Tech;Mississippi State University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.vt.edu;https://www.msstate.edu", + "aff_unique_abbr": "VT;MSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25128", + "title": "Exploring Tuning Characteristics of Ventral Stream\u2019s Neurons for Few-Shot Image Classification", + "track": "main", + "status": "Technical", + "abstract": "Human has the remarkable ability of learning novel objects by browsing extremely few examples, which may be attributed to the generic and robust feature extracted in the ventral stream of our brain for representing visual objects. In this sense, the tuning characteristics of ventral stream's neurons can be useful prior knowledge to improve few-shot classification. Specifically, we computationally model two groups of neurons found in ventral stream which are respectively sensitive to shape cues and color cues. Then we propose the hierarchical feature regularization method with these neuron models to regularize the backbone of a few-shot model, thus making it produce more generic and robust features for few-shot classification. In addition, to simulate the tuning characteristic that neuron firing at a higher rate in response to foreground stimulus elements compared to background elements, which we call belongingness, we design a foreground segmentation algorithm based on the observation that the foreground object usually does not appear at the edge of the picture, then multiply the foreground mask with the backbone of few-shot model. Our method is model-agnostic and can be applied to few-shot models with different backbones, training paradigms and classifiers.", + "primary_area": "computer vision i", + "author": "Lintao Dong; Wei Zhai; Zheng-Jun Zha", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Dong_Zhai_Zha_2023, title={Exploring Tuning Characteristics of Ventral Stream\u2019s Neurons for Few-Shot Image Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25128}, DOI={10.1609/aaai.v37i1.25128}, abstractNote={Human has the remarkable ability of learning novel objects by browsing extremely few examples, which may be attributed to the generic and robust feature extracted in the ventral stream of our brain for representing visual objects. In this sense, the tuning characteristics of ventral stream\u2019s neurons can be useful prior knowledge to improve few-shot classification. Specifically, we computationally model two groups of neurons found in ventral stream which are respectively sensitive to shape cues and color cues. Then we propose the hierarchical feature regularization method with these neuron models to regularize the backbone of a few-shot model, thus making it produce more generic and robust features for few-shot classification. In addition, to simulate the tuning characteristic that neuron firing at a higher rate in response to foreground stimulus elements compared to background elements, which we call belongingness, we design a foreground segmentation algorithm based on the observation that the foreground object usually does not appear at the edge of the picture, then multiply the foreground mask with the backbone of few-shot model. Our method is model-agnostic and can be applied to few-shot models with different backbones, training paradigms and classifiers.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Lintao and Zhai, Wei and Zha, Zheng-Jun}, year={2023}, month={Jun.}, pages={534-542} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25128/24900", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25128", + "pdf_size": 1150552, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10214848960362806240&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27035", + "title": "Exploring the Effectiveness of Mask-Guided Feature Modulation as a Mechanism for Localized Style Editing of Real Images (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The success of Deep Generative Models at high-resolution image generation has led to their extensive utilization for style editing of real images. Most existing methods work on the principle of inverting real images onto their latent space, followed by determining controllable directions. Both inversion of real images and determination of controllable latent directions are computationally expensive operations. Moreover, the determination of controllable latent directions requires additional human supervision. This work aims to explore the efficacy of mask-guided feature modulation in the latent space of a Deep Generative Model as a solution to these bottlenecks. To this end, we present the SemanticStyle Autoencoder (SSAE), a deep Generative Autoencoder model that leverages semantic mask-guided latent space manipulation for highly localized photorealistic style editing of real images. We present qualitative and quantitative results for the same and their analysis. This work shall serve as a guiding primer for future work.", + "primary_area": "", + "author": "Snehal Singh Tomar; Maitreya Suin; A. N. Rajagopalan", + "authorids": "", + "aff": "Indian Institute of Technology Madras; Indian Institute of Technology Madras; Indian Institute of Technology Madras", + "bibtex": "@article{Tomar_Suin_Rajagopalan_2024, title={Exploring the Effectiveness of Mask-Guided Feature Modulation as a Mechanism for Localized Style Editing of Real Images (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27035}, DOI={10.1609/aaai.v37i13.27035}, abstractNote={The success of Deep Generative Models at high-resolution image generation has led to their extensive utilization for style editing of real images. Most existing methods work on the principle of inverting real images onto their latent space, followed by determining controllable directions. Both inversion of real images and determination of controllable latent directions are computationally expensive operations. Moreover, the determination of controllable latent directions requires additional human supervision. This work aims to explore the efficacy of mask-guided feature modulation in the latent space of a Deep Generative Model as a solution to these bottlenecks. To this end, we present the SemanticStyle Autoencoder (SSAE), a deep Generative Autoencoder model that leverages semantic mask-guided latent space manipulation for highly localized photorealistic style editing of real images. We present qualitative and quantitative results for the same and their analysis. This work shall serve as a guiding primer for future work.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tomar, Snehal Singh and Suin, Maitreya and Rajagopalan, A. N.}, year={2024}, month={Jul.}, pages={16350-16351} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27035/26807", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27035", + "pdf_size": 1364397, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:cCIRiybQzuMJ:scholar.google.com/&scioq=Exploring+the+Effectiveness+of+Mask-Guided+Feature+Modulation+as+a+Mechanism+for+Localized+Style+Editing+of+Real+Images+(Student+Abstract)&hl=en&as_sdt=0,44", + "gs_version_total": 4, + "aff_domain": "smail.iitm.ac.in;gmail.com;ee.iitm.ac.in", + "email": "smail.iitm.ac.in;gmail.com;ee.iitm.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Madras", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitm.ac.in", + "aff_unique_abbr": "IIT Madras", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Madras", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26107", + "title": "Exploring the Interaction between Local and Global Latent Configurations for Clustering Single-Cell RNA-Seq: A Unified Perspective", + "track": "main", + "status": "Technical", + "abstract": "The most recent approaches for clustering single-cell RNA-sequencing data rely on deep auto-encoders. However, three major challenges remain unaddressed. First, current models overlook the impact of the cumulative errors induced by the pseudo-supervised embedding clustering task (Feature Randomness). Second, existing methods neglect the effect of the strong competition between embedding clustering and reconstruction (Feature Drift). Third, the previous deep clustering models regularly fail to consider the topological information of the latent data, even though the local and global latent configurations can bring complementary views to the clustering task. To address these challenges, we propose a novel approach that explores the interaction between local and global latent configurations to progressively adjust the reconstruction and embedding clustering tasks. We elaborate a topological and probabilistic filter to mitigate Feature Randomness and a cell-cell graph structure and content correction mechanism to counteract Feature Drift. The Zero-Inflated Negative Binomial model is also integrated to capture the characteristics of gene expression profiles. We conduct detailed experiments on real-world datasets from multiple representative genome sequencing platforms. Our approach outperforms the state-of-the-art clustering methods in various evaluation metrics.", + "primary_area": "machine learning iii", + "author": "Nairouz Mrabah; Mohamed Mahmoud Amar; Mohamed Bouguessa; Abdoulaye Banire Diallo", + "authorids": "", + "aff": "University of Quebec at Montreal, Montreal, Quebec, Canada; University of Quebec at Montreal, Montreal, Quebec, Canada; University of Quebec at Montreal, Montreal, Quebec, Canada; University of Quebec at Montreal, Montreal, Quebec, Canada", + "bibtex": "@article{Mrabah_Amar_Bouguessa_Diallo_2023, title={Exploring the Interaction between Local and Global Latent Configurations for Clustering Single-Cell RNA-Seq: A Unified Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26107}, DOI={10.1609/aaai.v37i8.26107}, abstractNote={The most recent approaches for clustering single-cell RNA-sequencing data rely on deep auto-encoders. However, three major challenges remain unaddressed. First, current models overlook the impact of the cumulative errors induced by the pseudo-supervised embedding clustering task (Feature Randomness). Second, existing methods neglect the effect of the strong competition between embedding clustering and reconstruction (Feature Drift). Third, the previous deep clustering models regularly fail to consider the topological information of the latent data, even though the local and global latent configurations can bring complementary views to the clustering task. To address these challenges, we propose a novel approach that explores the interaction between local and global latent configurations to progressively adjust the reconstruction and embedding clustering tasks. We elaborate a topological and probabilistic filter to mitigate Feature Randomness and a cell-cell graph structure and content correction mechanism to counteract Feature Drift. The Zero-Inflated Negative Binomial model is also integrated to capture the characteristics of gene expression profiles. We conduct detailed experiments on real-world datasets from multiple representative genome sequencing platforms. Our approach outperforms the state-of-the-art clustering methods in various evaluation metrics.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mrabah, Nairouz and Amar, Mohamed Mahmoud and Bouguessa, Mohamed and Diallo, Abdoulaye Banire}, year={2023}, month={Jun.}, pages={9235-9242} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26107/25879", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26107", + "pdf_size": 6095436, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14331647720046283884&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "courrier.uqam.ca;courrier.uqam.ca;uqam.ca;uqam.ca", + "email": "courrier.uqam.ca;courrier.uqam.ca;uqam.ca;uqam.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Quebec at Montreal", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uqam.ca", + "aff_unique_abbr": "UQAM", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Montreal", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-27028", + "title": "Exploring the Relative Value of Collaborative Optimisation Pathways (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Compression techniques in machine learning (ML) independently improve a model\u2019s inference efficiency by reducing its memory footprint while aiming to maintain its quality. This paper lays groundwork in questioning the merit of a compression pipeline involving all techniques as opposed to skipping a few by considering a case study on a keyword spotting model: DS-CNN-S. In addition, it documents improvements to the model\u2019s training and dataset infrastructure. For this model, preliminary findings suggest that a full-scale pipeline isn\u2019t required to achieve a competent memory footprint and accuracy, but a more comprehensive study is required.", + "primary_area": "", + "author": "Sudarshan Sreeram", + "authorids": "", + "aff": "Department of Computing, Imperial College London", + "bibtex": "@article{Sreeram_2024, title={Exploring the Relative Value of Collaborative Optimisation Pathways (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27028}, DOI={10.1609/aaai.v37i13.27028}, abstractNote={Compression techniques in machine learning (ML) independently improve a model\u2019s inference efficiency by reducing its memory footprint while aiming to maintain its quality. This paper lays groundwork in questioning the merit of a compression pipeline involving all techniques as opposed to skipping a few by considering a case study on a keyword spotting model: DS-CNN-S. In addition, it documents improvements to the model\u2019s training and dataset infrastructure. For this model, preliminary findings suggest that a full-scale pipeline isn\u2019t required to achieve a competent memory footprint and accuracy, but a more comprehensive study is required.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sreeram, Sudarshan}, year={2024}, month={Jul.}, pages={16336-16337} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27028/26800", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27028", + "pdf_size": 174123, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:7119EFeqncsJ:scholar.google.com/&scioq=Exploring+the+Relative+Value+of+Collaborative+Optimisation+Pathways+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "imperial.ac.uk", + "email": "imperial.ac.uk", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Imperial College London", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.imperial.ac.uk", + "aff_unique_abbr": "Imperial", + "aff_campus_unique_index": "0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25304", + "title": "Exposing the Self-Supervised Space-Time Correspondence Learning via Graph Kernels", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised space-time correspondence learning is emerging as a promising way of leveraging unlabeled video. Currently, most methods adapt contrastive learning with mining negative samples or reconstruction adapted from the image domain, which requires dense affinity across multiple frames or optical flow constraints. Moreover, video correspondence predictive models require mining more inherent properties in videos, such as structural information. In this work, we propose the VideoHiGraph, a space-time correspondence framework based on a learnable graph kernel. Concerning the video as the spatial-temporal graph, the learning objectives of VideoHiGraph are emanated in a self-supervised manner for predicting unobserved hidden graphs via graph kernel manner. We learn a representation of the temporal coherence across frames in which pairwise similarity defines the structured hidden graph, such that a biased random walk graph kernel along the sub-graph can predict long-range correspondence. Then, we learn a refined representation across frames on the node-level via a dense graph kernel. The self-supervision of the model training is formed by the structural and temporal consistency of the graph. VideoHiGraph achieves superior performance and demonstrates its robustness across the benchmark of label propagation tasks involving objects, semantic parts, keypoints, and instances. Our algorithm implementations have been made publicly available at https://github.com/zyqin19/VideoHiGraph.", + "primary_area": "computer vision ii", + "author": "Zheyun Qin; Xiankai Lu; Xiushan Nie; Yilong Yin; Jianbing Shen", + "authorids": "", + "aff": "School of Software, Shandong University; School of Software, Shandong University; School of Computer Science and Technology, Shandong Jianzhu University; School of Software, Shandong University; SKL-IOTSC, CIS, University of Macau", + "bibtex": "@article{Qin_Lu_Nie_Yin_Shen_2023, title={Exposing the Self-Supervised Space-Time Correspondence Learning via Graph Kernels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25304}, DOI={10.1609/aaai.v37i2.25304}, abstractNote={Self-supervised space-time correspondence learning is emerging as a promising way of leveraging unlabeled video. Currently, most methods adapt contrastive learning with mining negative samples or reconstruction adapted from the image domain, which requires dense affinity across multiple frames or optical flow constraints. Moreover, video correspondence predictive models require mining more inherent properties in videos, such as structural information. In this work, we propose the VideoHiGraph, a space-time correspondence framework based on a learnable graph kernel. Concerning the video as the spatial-temporal graph, the learning objectives of VideoHiGraph are emanated in a self-supervised manner for predicting unobserved hidden graphs via graph kernel manner. We learn a representation of the temporal coherence across frames in which pairwise similarity defines the structured hidden graph, such that a biased random walk graph kernel along the sub-graph can predict long-range correspondence. Then, we learn a refined representation across frames on the node-level via a dense graph kernel. The self-supervision of the model training is formed by the structural and temporal consistency of the graph. VideoHiGraph achieves superior performance and demonstrates its robustness across the benchmark of label propagation tasks involving objects, semantic parts, keypoints, and instances. Our algorithm implementations have been made publicly available at https://github.com/zyqin19/VideoHiGraph.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Zheyun and Lu, Xiankai and Nie, Xiushan and Yin, Yilong and Shen, Jianbing}, year={2023}, month={Jun.}, pages={2110-2118} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25304/25076", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25304", + "pdf_size": 5278984, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16410716981969793067&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com; ; ; ", + "email": "gmail.com;gmail.com; ; ; ", + "github": "https://github.com/zyqin19/VideoHiGraph", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Shandong University;Shandong Jianzhu University;University of Macau", + "aff_unique_dep": "School of Software;School of Computer Science and Technology;Department of Computer and Information Science", + "aff_unique_url": "http://www.sdu.edu.cn;http://www.sdjzu.edu.cn;https://www.um.edu.mo", + "aff_unique_abbr": ";;UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Macau" + }, + { + "id": "article-26426", + "title": "Expressive Optimal Temporal Planning via Optimization Modulo Theory", + "track": "main", + "status": "Technical", + "abstract": "Temporal Planning is the problem of synthesizing a course of actions given a predictive model of a system subject to temporal constraints. This kind of planning finds natural applications in the automation of industrial processes and in robotics when the timing and deadlines are important. Finding any plan in temporal planning is often not enough as it is sometimes needed to optimize a certain objective function: particularly interesting are the minimization of the makespan and the optimization of the costs of actions. Despite the importance of the problem, only few works in the literature tackled the problem of optimal temporal planning because of the complicated intermix of planning and scheduling.\nIn this paper, we address the problem of optimal temporal planning for a very expressive class of problems using a reduction of the bounded planning problem to Optimization Modulo Theory (OMT) a powerful discrete/continuous optimization framework. We theoretically and empirically show the expressive power of this approach and we set a baseline for future research in this area.", + "primary_area": "planning routing and scheduling", + "author": "Stefan Panjkovic; Andrea Micheli", + "authorids": "", + "aff": "Fondazione Bruno Kessler, Trento, Italy + University of Trento, Trento, Italy; Fondazione Bruno Kessler, Trento, Italy", + "bibtex": "@article{Panjkovic_Micheli_2023, title={Expressive Optimal Temporal Planning via Optimization Modulo Theory}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26426}, DOI={10.1609/aaai.v37i10.26426}, abstractNote={Temporal Planning is the problem of synthesizing a course of actions given a predictive model of a system subject to temporal constraints. This kind of planning finds natural applications in the automation of industrial processes and in robotics when the timing and deadlines are important. Finding any plan in temporal planning is often not enough as it is sometimes needed to optimize a certain objective function: particularly interesting are the minimization of the makespan and the optimization of the costs of actions. Despite the importance of the problem, only few works in the literature tackled the problem of optimal temporal planning because of the complicated intermix of planning and scheduling.\nIn this paper, we address the problem of optimal temporal planning for a very expressive class of problems using a reduction of the bounded planning problem to Optimization Modulo Theory (OMT) a powerful discrete/continuous optimization framework. We theoretically and empirically show the expressive power of this approach and we set a baseline for future research in this area.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Panjkovic, Stefan and Micheli, Andrea}, year={2023}, month={Jun.}, pages={12095-12102} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26426/26198", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26426", + "pdf_size": 173936, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5012504578580233676&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "fbk.eu;fbk.eu", + "email": "fbk.eu;fbk.eu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0", + "aff_unique_norm": "Fondazione Bruno Kessler;University of Trento", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.fbk.eu;https://www.unitn.it", + "aff_unique_abbr": "FBK;UniTN", + "aff_campus_unique_index": "0+0;0", + "aff_campus_unique": "Trento", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26232", + "title": "Extracting Low-/High- Frequency Knowledge from Graph Neural Networks and Injecting It into MLPs: An Effective GNN-to-MLP Distillation Framework", + "track": "main", + "status": "Technical", + "abstract": "Recent years have witnessed the great success of Graph Neural Networks (GNNs) in handling graph-related tasks. However, MLPs remain the primary workhorse for practical industrial applications due to their desirable inference efficiency and scalability. To reduce their gaps, one can directly distill knowledge from a well-designed teacher GNN to a student MLP, which is termed as GNN-to-MLP distillation. However, the process of distillation usually entails a loss of information, and ``which knowledge patterns of GNNs are more likely to be left and distilled into MLPs?\" becomes an important question. In this paper, we first factorize the knowledge learned by GNNs into low- and high-frequency components in the spectral domain and then derive their correspondence in the spatial domain. Furthermore, we identified a potential information drowning problem for existing GNN-to-MLP distillation, i.e., the high-frequency knowledge of the pre-trained GNNs may be overwhelmed by the low-frequency knowledge during distillation; we have described in detail what it represents, how it arises, what impact it has, and how to deal with it. In this paper, we propose an efficient Full-Frequency GNN-to-MLP (FF-G2M) distillation framework, which extracts both low-frequency and high-frequency knowledge from GNNs and injects it into MLPs. Extensive experiments show that FF-G2M improves over the vanilla MLPs by 12.6% and outperforms its corresponding teacher GNNs by 2.6% averaged over six graph datasets and three common GNN architectures.", + "primary_area": "machine learning iv", + "author": "Lirong Wu; Haitao Lin; Yufei Huang; Tianyu Fan; Stan Z. Li", + "authorids": "", + "aff": "AI Division, School of Engineering, Westlake University, Hangzhou, 310030 + Zhejiang University, Hangzhou, 310058; AI Division, School of Engineering, Westlake University, Hangzhou, 310030 + Zhejiang University, Hangzhou, 310058; AI Division, School of Engineering, Westlake University, Hangzhou, 310030 + Zhejiang University, Hangzhou, 310058; Zhejiang University, Hangzhou, 310058; AI Division, School of Engineering, Westlake University, Hangzhou, 310030", + "bibtex": "@article{Wu_Lin_Huang_Fan_Li_2023, title={Extracting Low-/High- Frequency Knowledge from Graph Neural Networks and Injecting It into MLPs: An Effective GNN-to-MLP Distillation Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26232}, DOI={10.1609/aaai.v37i9.26232}, abstractNote={Recent years have witnessed the great success of Graph Neural Networks (GNNs) in handling graph-related tasks. However, MLPs remain the primary workhorse for practical industrial applications due to their desirable inference efficiency and scalability. To reduce their gaps, one can directly distill knowledge from a well-designed teacher GNN to a student MLP, which is termed as GNN-to-MLP distillation. However, the process of distillation usually entails a loss of information, and ``which knowledge patterns of GNNs are more likely to be left and distilled into MLPs?" becomes an important question. In this paper, we first factorize the knowledge learned by GNNs into low- and high-frequency components in the spectral domain and then derive their correspondence in the spatial domain. Furthermore, we identified a potential information drowning problem for existing GNN-to-MLP distillation, i.e., the high-frequency knowledge of the pre-trained GNNs may be overwhelmed by the low-frequency knowledge during distillation; we have described in detail what it represents, how it arises, what impact it has, and how to deal with it. In this paper, we propose an efficient Full-Frequency GNN-to-MLP (FF-G2M) distillation framework, which extracts both low-frequency and high-frequency knowledge from GNNs and injects it into MLPs. Extensive experiments show that FF-G2M improves over the vanilla MLPs by 12.6% and outperforms its corresponding teacher GNNs by 2.6% averaged over six graph datasets and three common GNN architectures.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Lirong and Lin, Haitao and Huang, Yufei and Fan, Tianyu and Li, Stan Z.}, year={2023}, month={Jun.}, pages={10351-10360} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26232/26004", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26232", + "pdf_size": 647589, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2781667228844538127&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn", + "email": "westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;1;0", + "aff_unique_norm": "Westlake University;Zhejiang University", + "aff_unique_dep": "School of Engineering;", + "aff_unique_url": "https://www.westlake.edu.cn;http://www.zju.edu.cn", + "aff_unique_abbr": ";ZJU", + "aff_campus_unique_index": "0+0;0+0;0+0;0;0", + "aff_campus_unique": "Hangzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25738", + "title": "Extracting Semantic-Dynamic Features for Long-Term Stable Brain Computer Interface", + "track": "main", + "status": "Technical", + "abstract": "Brain-computer Interface (BCI) builds a neural signal to the motor command pathway, which is a prerequisite for the realization of neural prosthetics. However, a long-term stable BCI suffers from the neural data drift across days while retraining the BCI decoder is expensive and restricts its application scenarios. Recent solutions of neural signal recalibration treat the continuous neural signals as discrete, which is less effective in temporal feature extraction. Inspired by the observation from biologists that low-dimensional dynamics could describe high-dimensional neural signals, we model the underlying neural dynamics and propose a semantic-dynamic feature that represents the semantics and dynamics in a shared feature space facilitating the BCI recalibration. Besides, we present the joint distribution alignment instead of the common used marginal alignment strategy, dealing with the various complex changes in neural data distribution. Our recalibration approach achieves state-of-the-art performance on the real neural data of two monkeys in both classification and regression tasks. Our approach is also evaluated on a simulated dataset, which indicates its robustness in dealing with various common causes of neural signal instability.", + "primary_area": "humans and ai", + "author": "Tao Fang; Qian Zheng; Yu Qi; Gang Pan", + "authorids": "", + "aff": "The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; MOE Frontier Science Center for Brain Science and Brain-machine Integration, Zhejiang University, Hangzhou, China+The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China", + "bibtex": "@article{Fang_Zheng_Qi_Pan_2023, title={Extracting Semantic-Dynamic Features for Long-Term Stable Brain Computer Interface}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25738}, DOI={10.1609/aaai.v37i5.25738}, abstractNote={Brain-computer Interface (BCI) builds a neural signal to the motor command pathway, which is a prerequisite for the realization of neural prosthetics. However, a long-term stable BCI suffers from the neural data drift across days while retraining the BCI decoder is expensive and restricts its application scenarios. Recent solutions of neural signal recalibration treat the continuous neural signals as discrete, which is less effective in temporal feature extraction. Inspired by the observation from biologists that low-dimensional dynamics could describe high-dimensional neural signals, we model the underlying neural dynamics and propose a semantic-dynamic feature that represents the semantics and dynamics in a shared feature space facilitating the BCI recalibration. Besides, we present the joint distribution alignment instead of the common used marginal alignment strategy, dealing with the various complex changes in neural data distribution. Our recalibration approach achieves state-of-the-art performance on the real neural data of two monkeys in both classification and regression tasks. Our approach is also evaluated on a simulated dataset, which indicates its robustness in dealing with various common causes of neural signal instability.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Tao and Zheng, Qian and Qi, Yu and Pan, Gang}, year={2023}, month={Jun.}, pages={5965-5973} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25738/25510", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25738", + "pdf_size": 801136, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12675772369981267352&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0+0;0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "State Key Lab of Brain-Machine Intelligence", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "0+0;0+0;0+0+0;0+0", + "aff_campus_unique": "Hangzhou", + "aff_country_unique_index": "0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25792", + "title": "FASTDIAGP: An Algorithm for Parallelized Direct Diagnosis", + "track": "main", + "status": "Technical", + "abstract": "Constraint-based applications attempt to identify a solution that meets all defined user requirements. If the requirements are inconsistent with the underlying constraint set, algorithms that compute diagnoses for inconsistent constraints should be implemented to help users resolve the \u201cno solution could be found\u201d dilemma. FastDiag is a typical direct diagnosis algorithm that supports diagnosis calculation without pre-determining conflicts. However, this approach faces runtime performance issues, especially when analyzing complex and large-scale knowledge bases. In this paper, we propose a novel algorithm, so-called FastDiagP, which is based on the idea of speculative programming. This algorithm extends FastDiag by integrating a parallelization mechanism that anticipates and pre-calculates consistency checks requested by FastDiag. This mechanism helps to provide consistency checks with fast answers and boosts the algorithm\u2019s runtime performance. The performance improvements of our proposed algorithm have been shown through empirical results using the Linux-2.6.3.33 configuration knowledge base.", + "primary_area": "knowledge representation and reasoning", + "author": "Viet-Man Le; Cristian Vidal Silva; Alexander Felfernig; David Benavides; Jos\u00e9 Galindo; Thi Ngoc Trang Tran", + "authorids": "", + "aff": "Graz University of Technology, Graz, Austria; Universidad de Talca, Talca, Chile; Graz University of Technology, Graz, Austria; University of Sevilla, Seville, Spain; University of Sevilla, Seville, Spain; Graz University of Technology, Graz, Austria", + "bibtex": "@article{Le_Vidal Silva_Felfernig_Benavides_Galindo_Tran_2023, title={FASTDIAGP: An Algorithm for Parallelized Direct Diagnosis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25792}, DOI={10.1609/aaai.v37i5.25792}, abstractNote={Constraint-based applications attempt to identify a solution that meets all defined user requirements. If the requirements are inconsistent with the underlying constraint set, algorithms that compute diagnoses for inconsistent constraints should be implemented to help users resolve the \u201cno solution could be found\u201d dilemma. FastDiag is a typical direct diagnosis algorithm that supports diagnosis calculation without pre-determining conflicts. However, this approach faces runtime performance issues, especially when analyzing complex and large-scale knowledge bases. In this paper, we propose a novel algorithm, so-called FastDiagP, which is based on the idea of speculative programming. This algorithm extends FastDiag by integrating a parallelization mechanism that anticipates and pre-calculates consistency checks requested by FastDiag. This mechanism helps to provide consistency checks with fast answers and boosts the algorithm\u2019s runtime performance. The performance improvements of our proposed algorithm have been shown through empirical results using the Linux-2.6.3.33 configuration knowledge base.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Le, Viet-Man and Vidal Silva, Cristian and Felfernig, Alexander and Benavides, David and Galindo, Jos\u00e9 and Tran, Thi Ngoc Trang}, year={2023}, month={Jun.}, pages={6442-6449} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25792/25564", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25792", + "pdf_size": 219368, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17480735359275077980&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "ist.tugraz.at;utalca.cl;ist.tugraz.at;us.es;us.es;ist.tugraz.at", + "email": "ist.tugraz.at;utalca.cl;ist.tugraz.at;us.es;us.es;ist.tugraz.at", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2;2;0", + "aff_unique_norm": "Graz University of Technology;Universidad de Talca;University of Sevilla", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.tugraz.at;https://www.uttalca.cl;https://www.us.es", + "aff_unique_abbr": "TUGraz;;US", + "aff_campus_unique_index": "0;1;0;2;2;0", + "aff_campus_unique": "Graz;Talca;Seville", + "aff_country_unique_index": "0;1;0;2;2;0", + "aff_country_unique": "Austria;Chile;Spain" + }, + { + "id": "article-27077", + "title": "FC-TrackNet: Fast Convergence Net for 6D Pose Tracking in Synthetic Domains", + "track": "demonstrations", + "status": "Technical", + "abstract": "In this work, we propose a fast convergence track net, or FC-TrackNet, based on a synthetic data-driven approach to maintaining long-term 6D pose tracking. Comparison experiments are performed on two different datasets, The results demonstrate that our approach can achieve a consistent tracking frequency of 90.9 Hz as well as higher accuracy than the state-of-the art approaches.", + "primary_area": "", + "author": "Di Jia; Qian Wang; Jun Cao; Peng Cai; Zhiyang Jin", + "authorids": "", + "aff": "Liaoning Technical University; Liaoning Technical University + Intel Corporation; Intel Corporation; Liaoning Technical University; Liaoning Technical University", + "bibtex": "@article{Jia_Wang_Cao_Cai_Jin_2024, title={FC-TrackNet: Fast Convergence Net for 6D Pose Tracking in Synthetic Domains}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27077}, DOI={10.1609/aaai.v37i13.27077}, abstractNote={In this work, we propose a fast convergence track net, or FC-TrackNet, based on a synthetic data-driven approach to maintaining long-term 6D pose tracking. Comparison experiments are performed on two different datasets, The results demonstrate that our approach can achieve a consistent tracking frequency of 90.9 Hz as well as higher accuracy than the state-of-the art approaches.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Di and Wang, Qian and Cao, Jun and Cai, Peng and Jin, Zhiyang}, year={2024}, month={Jul.}, pages={16455-16457} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27077/26849", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27077", + "pdf_size": 933368, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8507769353784843116&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "lntu.edu.cn;163.com;intel.com;163.com;163.com", + "email": "lntu.edu.cn;163.com;intel.com;163.com;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;1;0;0", + "aff_unique_norm": "Liaoning Technical University;Intel Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.lntu.edu.cn/;https://www.intel.com", + "aff_unique_abbr": ";Intel", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25394", + "title": "FEditNet: Few-Shot Editing of Latent Semantics in GAN Spaces", + "track": "main", + "status": "Technical", + "abstract": "Generative Adversarial networks (GANs) have demonstrated their powerful capability of synthesizing high-resolution images, and great efforts have been made to interpret the semantics in the latent spaces of GANs. However, existing works still have the following limitations: (1) the majority of works rely on either pretrained attribute predictors or large-scale labeled datasets, which are difficult to collect in most cases, and (2) some other methods are only suitable for restricted cases, such as focusing on interpretation of human facial images using prior facial semantics. In this paper, we propose a GAN-based method called FEditNet, aiming to discover latent semantics using very few labeled data without any pretrained predictors or prior knowledge. Specifically, we reuse the knowledge from the pretrained GANs, and by doing so, avoid overfitting during the few-shot training of FEditNet. Moreover, our layer-wise objectives which take content consistency into account also ensure the disentanglement between attributes. Qualitative and quantitative results demonstrate that our method outperforms the state-of-the-art methods on various datasets. The code is available at https://github.com/THU-LYJ-Lab/FEditNet.", + "primary_area": "computer vision iii", + "author": "Mengfei Xia; Yezhi Shu; Yuji Wang; Yu-Kun Lai; Qiang Li; Pengfei Wan; Zhongyuan Wang; Yong-Jin Liu", + "authorids": "", + "aff": "Department of Computer Science, BNRist, Tsinghua University; Department of Computer Science, BNRist, Tsinghua University; Department of Computer Science, BNRist, Tsinghua University; School of Computer Science and Informatics, Cardiff University; Kuaishou Technology; Kuaishou Technology; Kuaishou Technology; Department of Computer Science, BNRist, Tsinghua University", + "bibtex": "@article{Xia_Shu_Wang_Lai_Li_Wan_Wang_Liu_2023, title={FEditNet: Few-Shot Editing of Latent Semantics in GAN Spaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25394}, DOI={10.1609/aaai.v37i3.25394}, abstractNote={Generative Adversarial networks (GANs) have demonstrated their powerful capability of synthesizing high-resolution images, and great efforts have been made to interpret the semantics in the latent spaces of GANs. However, existing works still have the following limitations: (1) the majority of works rely on either pretrained attribute predictors or large-scale labeled datasets, which are difficult to collect in most cases, and (2) some other methods are only suitable for restricted cases, such as focusing on interpretation of human facial images using prior facial semantics. In this paper, we propose a GAN-based method called FEditNet, aiming to discover latent semantics using very few labeled data without any pretrained predictors or prior knowledge. Specifically, we reuse the knowledge from the pretrained GANs, and by doing so, avoid overfitting during the few-shot training of FEditNet. Moreover, our layer-wise objectives which take content consistency into account also ensure the disentanglement between attributes. Qualitative and quantitative results demonstrate that our method outperforms the state-of-the-art methods on various datasets. The code is available at https://github.com/THU-LYJ-Lab/FEditNet.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xia, Mengfei and Shu, Yezhi and Wang, Yuji and Lai, Yu-Kun and Li, Qiang and Wan, Pengfei and Wang, Zhongyuan and Liu, Yong-Jin}, year={2023}, month={Jun.}, pages={2919-2927} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25394/25166", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25394", + "pdf_size": 6114137, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10853513937893999821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;cardiff.ac.uk;kuaishou.com;kuaishou.com;kuaishou.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;cardiff.ac.uk;kuaishou.com;kuaishou.com;kuaishou.com;tsinghua.edu.cn", + "github": "https://github.com/THU-LYJ-Lab/FEditNet", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2;2;2;0", + "aff_unique_norm": "Tsinghua University;Cardiff University;Kuaishou Technology", + "aff_unique_dep": "Department of Computer Science;School of Computer Science and Informatics;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.cardiff.ac.uk;https://www.kuaishou.com", + "aff_unique_abbr": "THU;Cardiff;Kuaishou", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cardiff", + "aff_country_unique_index": "0;0;0;1;0;0;0;0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25996", + "title": "FLAME: Free-Form Language-Based Motion Synthesis & Editing", + "track": "main", + "status": "Technical", + "abstract": "Text-based motion generation models are drawing a surge of interest for their potential for automating the motion-making process in the game, animation, or robot industries. In this paper, we propose a diffusion-based motion synthesis and editing model named FLAME. Inspired by the recent successes in diffusion models, we integrate diffusion-based generative models into the motion domain. FLAME can generate high-fidelity motions well aligned with the given text. Also, it can edit the parts of the motion, both frame-wise and joint-wise, without any fine-tuning. FLAME involves a new transformer-based architecture we devise to better handle motion data, which is found to be crucial to manage variable-length motions and well attend to free-form text. In experiments, we show that FLAME achieves state-of-the-art generation performances on three text-motion datasets: HumanML3D, BABEL, and KIT. We also demonstrate that FLAME\u2019s editing capability can be extended to other tasks such as motion prediction or motion in-betweening, which have been previously covered by dedicated models.", + "primary_area": "machine learning ii", + "author": "Jihoon Kim; Jiseob Kim; Sungjoon Choi", + "authorids": "", + "aff": "Korea University+Kakao Brain; Kakao Brain; Korea University", + "bibtex": "@article{Kim_Kim_Choi_2023, title={FLAME: Free-Form Language-Based Motion Synthesis & Editing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25996}, DOI={10.1609/aaai.v37i7.25996}, abstractNote={Text-based motion generation models are drawing a surge of interest for their potential for automating the motion-making process in the game, animation, or robot industries. In this paper, we propose a diffusion-based motion synthesis and editing model named FLAME. Inspired by the recent successes in diffusion models, we integrate diffusion-based generative models into the motion domain. FLAME can generate high-fidelity motions well aligned with the given text. Also, it can edit the parts of the motion, both frame-wise and joint-wise, without any fine-tuning. FLAME involves a new transformer-based architecture we devise to better handle motion data, which is found to be crucial to manage variable-length motions and well attend to free-form text. In experiments, we show that FLAME achieves state-of-the-art generation performances on three text-motion datasets: HumanML3D, BABEL, and KIT. We also demonstrate that FLAME\u2019s editing capability can be extended to other tasks such as motion prediction or motion in-betweening, which have been previously covered by dedicated models.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Jihoon and Kim, Jiseob and Choi, Sungjoon}, year={2023}, month={Jun.}, pages={8255-8263} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25996/25768", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25996", + "pdf_size": 3581781, + "gs_citation": 214, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7810529807268977&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "korea.ac.kr;kakaobrain.com;korea.ac.kr", + "email": "korea.ac.kr;kakaobrain.com;korea.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "Korea University;Kakao Brain", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.korea.ac.kr;https://brain.kakao.com", + "aff_unique_abbr": "KU;Kakao Brain", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26884", + "title": "FOLL-E: Teaching First Order Logic to Children", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "First-order logic (FO) is an important foundation of many domains, including computer science and artificial intelligence. In recent efforts to teach basic CS and AI concepts to children, FO has so far remained absent. In this paper, we examine whether it is possible to design a learning environment that both motivates and enables children to learn the basics of FO. The key components of the learning environment are a syntax-free blocks-based notation for FO, graphics-based puzzles to solve, and a tactile environment which uses computer vision to allow the children to work with wooden blocks. The resulting FOLL-E system is intended to sharpen childrens' reasoning skills, encourage critical thinking and make them aware of the ambiguities of natural language. During preliminary testing with children, they reported that they found the notation intuitive and inviting, and that they enjoyed interacting with the application.", + "primary_area": "", + "author": "Simon Vandevelde; Joost Vennekens", + "authorids": "", + "aff": "KU Leuven, De Nayer Campus, Dept. of Computer Science, Belgium+Leuven.AI \u2013 KU Leuven Institute for AI, B-3000 Leuven, Belgium+Flanders Make \u2013 DTAI-FET; KU Leuven, De Nayer Campus, Dept. of Computer Science, Belgium+Leuven.AI \u2013 KU Leuven Institute for AI, B-3000 Leuven, Belgium+Flanders Make \u2013 DTAI-FET", + "bibtex": "@article{Vandevelde_Vennekens_2024, title={FOLL-E: Teaching First Order Logic to Children}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26884}, DOI={10.1609/aaai.v37i13.26884}, abstractNote={First-order logic (FO) is an important foundation of many domains, including computer science and artificial intelligence. In recent efforts to teach basic CS and AI concepts to children, FO has so far remained absent. In this paper, we examine whether it is possible to design a learning environment that both motivates and enables children to learn the basics of FO. The key components of the learning environment are a syntax-free blocks-based notation for FO, graphics-based puzzles to solve, and a tactile environment which uses computer vision to allow the children to work with wooden blocks. The resulting FOLL-E system is intended to sharpen childrens\u2019 reasoning skills, encourage critical thinking and make them aware of the ambiguities of natural language. During preliminary testing with children, they reported that they found the notation intuitive and inviting, and that they enjoyed interacting with the application.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vandevelde, Simon and Vennekens, Joost}, year={2024}, month={Jul.}, pages={15869-15876} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26884/26656", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26884", + "pdf_size": 9081441, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1326865374808278699&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kuleuven.be;kuleuven.be", + "email": "kuleuven.be;kuleuven.be", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1+2;0+1+2", + "aff_unique_norm": "KU Leuven;KU Leuven Institute for AI;Flanders Make", + "aff_unique_dep": "Dept. of Computer Science;Leuven.AI;DTAI-FET", + "aff_unique_url": "https://www.kuleuven.be;https://www.kuleuven.be/english;https://www.flandersmake.be", + "aff_unique_abbr": "KU Leuven;KU Leuven;", + "aff_campus_unique_index": "0+1;0+1", + "aff_campus_unique": "De Nayer;Leuven;", + "aff_country_unique_index": "0+0+0;0+0+0", + "aff_country_unique": "Belgium" + }, + { + "id": "article-25218", + "title": "FSR: A General Frequency-Oriented Framework to Accelerate Image Super-resolution Networks", + "track": "main", + "status": "Technical", + "abstract": "Deep neural networks (DNNs) have witnessed remarkable achievement in image super-resolution (SR), and plenty of DNN-based SR models with elaborated network designs have recently been proposed. However, existing methods usually require substantial computations by operating in spatial domain. To address this issue, we propose a general frequency-oriented framework (FSR) to accelerate SR networks by considering data characteristics in frequency domain. Our FSR mainly contains dual feature aggregation module (DFAM) to extract informative features in both spatial and transform domains, followed by a four-path SR-Module with different capacities to super-resolve in the frequency domain. Specifically, DFAM further consists of a transform attention block (TABlock) and a spatial context block (SCBlock) to extract global spectral information and local spatial information, respectively, while SR-Module is a parallel network container that contains four to-be-accelerated branches. Furthermore, we propose an adaptive weight strategy for a trade-off between image details recovery and visual quality. Extensive experiments show that our FSR can save FLOPs by almost 40% while reducing inference time by 50% for other SR methods (e.g., FSRCNN, CARN, SRResNet and RCAN). Code is available at https://github.com/THU-Kingmin/FSR.", + "primary_area": "computer vision i", + "author": "Jinmin Li; Tao Dai; Mingyan Zhu; Bin Chen; Zhi Wang; Shu-Tao Xia", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China; College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China+Research Center of Artificial Intelligence, Peng Cheng Laboratory; Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China+Research Center of Artificial Intelligence, Peng Cheng Laboratory", + "bibtex": "@article{Li_Dai_Zhu_Chen_Wang_Xia_2023, title={FSR: A General Frequency-Oriented Framework to Accelerate Image Super-resolution Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25218}, DOI={10.1609/aaai.v37i1.25218}, abstractNote={Deep neural networks (DNNs) have witnessed remarkable achievement in image super-resolution (SR), and plenty of DNN-based SR models with elaborated network designs have recently been proposed. However, existing methods usually require substantial computations by operating in spatial domain. To address this issue, we propose a general frequency-oriented framework (FSR) to accelerate SR networks by considering data characteristics in frequency domain. Our FSR mainly contains dual feature aggregation module (DFAM) to extract informative features in both spatial and transform domains, followed by a four-path SR-Module with different capacities to super-resolve in the frequency domain. Specifically, DFAM further consists of a transform attention block (TABlock) and a spatial context block (SCBlock) to extract global spectral information and local spatial information, respectively, while SR-Module is a parallel network container that contains four to-be-accelerated branches. Furthermore, we propose an adaptive weight strategy for a trade-off between image details recovery and visual quality. Extensive experiments show that our FSR can save FLOPs by almost 40% while reducing inference time by 50% for other SR methods (e.g., FSRCNN, CARN, SRResNet and RCAN). Code is available at https://github.com/THU-Kingmin/FSR.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jinmin and Dai, Tao and Zhu, Mingyan and Chen, Bin and Wang, Zhi and Xia, Shu-Tao}, year={2023}, month={Jun.}, pages={1343-1350} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25218/24990", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25218", + "pdf_size": 1044782, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1409184409299389162&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;hit.edu.cn;sz.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;hit.edu.cn;sz.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "github": "https://github.com/THU-Kingmin/FSR", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0+2;3;0;0+2", + "aff_unique_norm": "Tsinghua University;Shenzhen University;Peng Cheng Laboratory;Harbin Institute of Technology", + "aff_unique_dep": "International Graduate School;College of Computer Science and Software Engineering;Research Center of Artificial Intelligence;Department of Computer Science and Technology", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.szu.edu.cn;http://www.pcl.ac.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "THU;SZU;;HIT", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25843", + "title": "FTM: A Frame-Level Timeline Modeling Method for Temporal Graph Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Learning representations for graph-structured data is essential for graph analytical tasks. While remarkable progress has been made on static graphs, researches on temporal graphs are still in its beginning stage. The bottleneck of the temporal graph representation learning approach is the neighborhood aggregation strategy, based on which graph attributes share and gather information explicitly. Existing neighborhood aggregation strategies fail to capture either the short-term features or the long-term features of temporal graph attributes, leading to unsatisfactory model performance and even poor robustness and domain generality of the representation learning method. To address this problem, we propose a Frame-level Timeline Modeling (FTM) method that helps to capture both short-term and long-term features and thus learns more informative representations on temporal graphs. In particular, we present a novel link-based framing technique to preserve the short-term features and then incorporate a timeline aggregator module to capture the intrinsic dynamics of graph evolution as long-term features. Our method can be easily assembled with most temporal GNNs. Extensive experiments on common datasets show that our method brings great improvements to the capability, robustness, and domain generality of backbone methods in downstream tasks. Our code can be found at https://github.com/yeeeqichen/FTM.", + "primary_area": "machine learning i", + "author": "Bowen Cao; Qichen Ye; Weiyuan Xu; Yuexian Zou", + "authorids": "", + "aff": "ADSPLAB, School of ECE, Peking University, Shenzhen, China; ADSPLAB, School of ECE, Peking University, Shenzhen, China; ADSPLAB, School of ECE, Peking University, Shenzhen, China; ADSPLAB, School of ECE, Peking University, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China", + "bibtex": "@article{Cao_Ye_Xu_Zou_2023, title={FTM: A Frame-Level Timeline Modeling Method for Temporal Graph Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25843}, DOI={10.1609/aaai.v37i6.25843}, abstractNote={Learning representations for graph-structured data is essential for graph analytical tasks. While remarkable progress has been made on static graphs, researches on temporal graphs are still in its beginning stage. The bottleneck of the temporal graph representation learning approach is the neighborhood aggregation strategy, based on which graph attributes share and gather information explicitly. Existing neighborhood aggregation strategies fail to capture either the short-term features or the long-term features of temporal graph attributes, leading to unsatisfactory model performance and even poor robustness and domain generality of the representation learning method. To address this problem, we propose a Frame-level Timeline Modeling (FTM) method that helps to capture both short-term and long-term features and thus learns more informative representations on temporal graphs. In particular, we present a novel link-based framing technique to preserve the short-term features and then incorporate a timeline aggregator module to capture the intrinsic dynamics of graph evolution as long-term features. Our method can be easily assembled with most temporal GNNs. Extensive experiments on common datasets show that our method brings great improvements to the capability, robustness, and domain generality of backbone methods in downstream tasks. Our code can be found at https://github.com/yeeeqichen/FTM.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Bowen and Ye, Qichen and Xu, Weiyuan and Zou, Yuexian}, year={2023}, month={Jun.}, pages={6888-6896} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25843/25615", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25843", + "pdf_size": 750205, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5890357212898975949&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 8, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "https://github.com/yeeeqichen/FTM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": "School of ECE;", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": "0;0;0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26938", + "title": "FV-Train: Quantum Convolutional Neural Network Training with a Finite Number of Qubits by Extracting Diverse Features (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Quantum convolutional neural network (QCNN) has just become as an emerging research topic as we experience the noisy intermediate-scale quantum (NISQ) era and beyond. As convolutional filters in QCNN extract intrinsic feature using quantum-based ansatz, it should use only finite number of qubits to prevent barren plateaus, and it introduces the lack of the feature information. In this paper, we propose a novel QCNN training algorithm to optimize feature extraction while using only a finite number of qubits, which is called fidelity-variation training (FV-Training).", + "primary_area": "", + "author": "Hankyul Baek; Won Joon Yun; Joongheon Kim", + "authorids": "", + "aff": "Korea University, Seoul, Republic of Korea; Korea University, Seoul, Republic of Korea; Korea University, Seoul, Republic of Korea", + "bibtex": "@article{Baek_Yun_Kim_2024, title={FV-Train: Quantum Convolutional Neural Network Training with a Finite Number of Qubits by Extracting Diverse Features (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26938}, DOI={10.1609/aaai.v37i13.26938}, abstractNote={Quantum convolutional neural network (QCNN) has just become as an emerging research topic as we experience the noisy intermediate-scale quantum (NISQ) era and beyond. As convolutional filters in QCNN extract intrinsic feature using quantum-based ansatz, it should use only finite number of qubits to prevent barren plateaus, and it introduces the lack of the feature information. In this paper, we propose a novel QCNN training algorithm to optimize feature extraction while using only a finite number of qubits, which is called fidelity-variation training (FV-Training).}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Baek, Hankyul and Yun, Won Joon and Kim, Joongheon}, year={2024}, month={Jul.}, pages={16156-16157} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26938/26710", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26938", + "pdf_size": 146169, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17064840889001738812&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Korea University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.korea.ac.kr", + "aff_unique_abbr": "KU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-25187", + "title": "FacT: Factor-Tuning for Lightweight Adaptation on Vision Transformer", + "track": "main", + "status": "Technical", + "abstract": "Recent work has explored the potential to adapt a pre-trained vision transformer (ViT) by updating only a few parameters so as to improve storage efficiency, called parameter-efficient transfer learning (PETL). Current PETL methods have shown that by tuning only 0.5% of the parameters, ViT can be adapted to downstream tasks with even better performance than full fine-tuning. In this paper, we aim to further promote the efficiency of PETL to meet the extreme storage constraint in real-world applications. To this end, we propose a tensorization-decomposition framework to store the weight increments, in which the weights of each ViT are tensorized into a single 3D tensor, and their increments are then decomposed into lightweight factors. In the fine-tuning process, only the factors need to be updated and stored, termed Factor-Tuning (FacT). On VTAB-1K benchmark, our method performs on par with NOAH, the state-of-the-art PETL method, while being 5x more parameter-efficient. We also present a tiny version that only uses 8K (0.01% of ViT's parameters) trainable parameters but outperforms full fine-tuning and many other PETL methods such as VPT and BitFit. In few-shot settings, FacT also beats all PETL baselines using the fewest parameters, demonstrating its strong capability in the low-data regime.", + "primary_area": "computer vision i", + "author": "Shibo Jie; Zhi-Hong Deng", + "authorids": "", + "aff": "School of Intelligence Science and Technology, Peking University; School of Intelligence Science and Technology, Peking University", + "bibtex": "@article{Jie_Deng_2023, title={FacT: Factor-Tuning for Lightweight Adaptation on Vision Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25187}, DOI={10.1609/aaai.v37i1.25187}, abstractNote={Recent work has explored the potential to adapt a pre-trained vision transformer (ViT) by updating only a few parameters so as to improve storage efficiency, called parameter-efficient transfer learning (PETL). Current PETL methods have shown that by tuning only 0.5% of the parameters, ViT can be adapted to downstream tasks with even better performance than full fine-tuning. In this paper, we aim to further promote the efficiency of PETL to meet the extreme storage constraint in real-world applications. To this end, we propose a tensorization-decomposition framework to store the weight increments, in which the weights of each ViT are tensorized into a single 3D tensor, and their increments are then decomposed into lightweight factors. In the fine-tuning process, only the factors need to be updated and stored, termed Factor-Tuning (FacT). On VTAB-1K benchmark, our method performs on par with NOAH, the state-of-the-art PETL method, while being 5x more parameter-efficient. We also present a tiny version that only uses 8K (0.01% of ViT\u2019s parameters) trainable parameters but outperforms full fine-tuning and many other PETL methods such as VPT and BitFit. In few-shot settings, FacT also beats all PETL baselines using the fewest parameters, demonstrating its strong capability in the low-data regime.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jie, Shibo and Deng, Zhi-Hong}, year={2023}, month={Jun.}, pages={1060-1068} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25187/24959", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25187", + "pdf_size": 273003, + "gs_citation": 128, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12395761476874163383&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Intelligence Science and Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25719", + "title": "Facility Location Games with Entrance Fees", + "track": "main", + "status": "Technical", + "abstract": "The facility location game is an extensively studied problem in mechanism design. In the classical model, the cost of each agent is her distance to the nearest facility. In this paper, we consider a novel model where each facility charges an entrance fee, which is a function of the facility's location. Thus, in our model, the cost of each agent is the sum of the distance to the facility and the entrance fee of the facility. The generalized model captures more real-life scenarios. In our model, the entrance fee function can be an arbitrary function, and the corresponding preferences of agents may not be single-peaked anymore: this makes the problem complex and requires new techniques in the analysis. We systematically study the model and design strategyproof mechanisms with nice approximation ratios and also complement these with nearly-tight impossibility results. Specifically, for one-facility and two-facility games, we provide upper and lower bounds for the approximation ratios given by deterministic and randomized mechanisms, with respect to the utilitarian and egalitarian objectives. Most of our bounds are tight, and these bounds are independent of the entrance fee functions. Our results also match the results of the classical model.", + "primary_area": "game theory and economic paradigms", + "author": "Mengfan Ma; Mingyu Xiao; Tian Bai; Bakh Khoussainov", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of Electronic Science and Technology of China; School of Computer Science and Engineering, University of Electronic Science and Technology of China; School of Computer Science and Engineering, University of Electronic Science and Technology of China; School of Computer Science and Engineering, University of Electronic Science and Technology of China", + "bibtex": "@article{Ma_Xiao_Bai_Khoussainov_2023, title={Facility Location Games with Entrance Fees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25719}, DOI={10.1609/aaai.v37i5.25719}, abstractNote={The facility location game is an extensively studied problem in mechanism design. In the classical model, the cost of each agent is her distance to the nearest facility. In this paper, we consider a novel model where each facility charges an entrance fee, which is a function of the facility\u2019s location. Thus, in our model, the cost of each agent is the sum of the distance to the facility and the entrance fee of the facility. The generalized model captures more real-life scenarios. In our model, the entrance fee function can be an arbitrary function, and the corresponding preferences of agents may not be single-peaked anymore: this makes the problem complex and requires new techniques in the analysis. We systematically study the model and design strategyproof mechanisms with nice approximation ratios and also complement these with nearly-tight impossibility results. Specifically, for one-facility and two-facility games, we provide upper and lower bounds for the approximation ratios given by deterministic and randomized mechanisms, with respect to the utilitarian and egalitarian objectives. Most of our bounds are tight, and these bounds are independent of the entrance fee functions. Our results also match the results of the classical model.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Mengfan and Xiao, Mingyu and Bai, Tian and Khoussainov, Bakh}, year={2023}, month={Jun.}, pages={5797-5804} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25719/25491", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25719", + "pdf_size": 310988, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10552628739202125085&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;uestc.edu.cn;outlook.com;uestc.edu.cn", + "email": "gmail.com;uestc.edu.cn;outlook.com;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.uestc.edu.cn", + "aff_unique_abbr": "UESTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26618", + "title": "Factual and Informative Review Generation for Explainable Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Recent models can generate fluent and grammatical synthetic reviews while accurately predicting user ratings. The generated reviews, expressing users' estimated opinions towards related products, are often viewed as natural language \u2018rationales\u2019 for the jointly predicted rating. However, previous studies found that existing models often generate repetitive, universally applicable, and generic explanations, resulting in uninformative rationales. Further, our analysis shows that previous models' generated content often contain factual hallucinations. These issues call for novel solutions that could generate both informative and factually grounded explanations. Inspired by recent success in using retrieved content in addition to parametric knowledge for generation, we propose to augment the generator with a personalized retriever, where the retriever's output serves as external knowledge for enhancing the generator. Experiments on Yelp, TripAdvisor, and Amazon Movie Reviews dataset show our model could generate explanations that more reliably entail existing reviews, are more diverse, and are rated more informative by human evaluators.", + "primary_area": "speech natural language processing", + "author": "Zhouhang Xie; Sameer Singh; Julian McAuley; Bodhisattwa Prasad Majumder", + "authorids": "", + "aff": "University of California, San Diego; University of California, Irvine; University of California, San Diego; University of California, San Diego", + "bibtex": "@article{Xie_Singh_McAuley_Majumder_2023, title={Factual and Informative Review Generation for Explainable Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26618}, DOI={10.1609/aaai.v37i11.26618}, abstractNote={Recent models can generate fluent and grammatical synthetic reviews while accurately predicting user ratings. The generated reviews, expressing users\u2019 estimated opinions towards related products, are often viewed as natural language \u2018rationales\u2019 for the jointly predicted rating. However, previous studies found that existing models often generate repetitive, universally applicable, and generic explanations, resulting in uninformative rationales. Further, our analysis shows that previous models\u2019 generated content often contain factual hallucinations. These issues call for novel solutions that could generate both informative and factually grounded explanations. Inspired by recent success in using retrieved content in addition to parametric knowledge for generation, we propose to augment the generator with a personalized retriever, where the retriever\u2019s output serves as external knowledge for enhancing the generator. Experiments on Yelp, TripAdvisor, and Amazon Movie Reviews dataset show our model could generate explanations that more reliably entail existing reviews, are more diverse, and are rated more informative by human evaluators.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Zhouhang and Singh, Sameer and McAuley, Julian and Majumder, Bodhisattwa Prasad}, year={2023}, month={Jun.}, pages={13816-13824} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26618/26390", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26618", + "pdf_size": 834728, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18045340172885483359&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ucsd.edu;uci.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;uci.edu;ucsd.edu;ucsd.edu", + "github": "https://github.com/zhouhanxie/PRAG", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of California, San Diego;University of California, Irvine", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsd.edu;https://www.uci.edu", + "aff_unique_abbr": "UCSD;UCI", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "San Diego;Irvine", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26931", + "title": "Failure-Resistant Intelligent Interaction for Reliable Human-AI Collaboration", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "My thesis is focusing on how we can overcome the gap people have against machine learning techniques that require a well-defined application scheme and can produce wrong results. I am planning to discuss the principle of the interaction design that fills such a gap based on my past projects that have explored better interactions for applying machine learning in various fields, such as malware analysis, executive coaching, photo editing, and so on. To this aim, my thesis also shed a light on the limitations of machine learning techniques, like adversarial examples, to highlight the importance of \"failure-resistant intelligent interaction.\"", + "primary_area": "", + "author": "Hiromu Yakura", + "authorids": "", + "aff": "Graduate School of Science and Technology, University of Tsukuba / Tsukuba, Japan", + "bibtex": "@article{Yakura_2024, title={Failure-Resistant Intelligent Interaction for Reliable Human-AI Collaboration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26931}, DOI={10.1609/aaai.v37i13.26931}, abstractNote={My thesis is focusing on how we can overcome the gap people have against machine learning techniques that require a well-defined application scheme and can produce wrong results. I am planning to discuss the principle of the interaction design that fills such a gap based on my past projects that have explored better interactions for applying machine learning in various fields, such as malware analysis, executive coaching, photo editing, and so on. To this aim, my thesis also shed a light on the limitations of machine learning techniques, like adversarial examples, to highlight the importance of "failure-resistant intelligent interaction."}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yakura, Hiromu}, year={2024}, month={Jul.}, pages={16141-16142} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26931/26703", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26931", + "pdf_size": 1627699, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:IelaI4IVGEoJ:scholar.google.com/&scioq=Failure-Resistant+Intelligent+Interaction+for+Reliable+Human-AI+Collaboration&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "aist.go.jp", + "email": "aist.go.jp", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Tsukuba", + "aff_unique_dep": "Graduate School of Science and Technology", + "aff_unique_url": "https://www.tsukuba.ac.jp", + "aff_unique_abbr": "UT", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Tsukuba", + "aff_country_unique_index": "0", + "aff_country_unique": "Japan" + }, + { + "id": "article-25688", + "title": "Fair Division with Prioritized Agents", + "track": "main", + "status": "Technical", + "abstract": "We consider the fair division problem of indivisible items. It is well-known that an envy-free allocation may not exist, and a relaxed version of envy-freeness, envy-freeness up to one item (EF1), has been widely considered. In an EF1 allocation, an agent may envy others' allocated shares, but only up to one item. In many applications, we may wish to specify a subset of prioritized agents where strict envy-freeness needs to be guaranteed from these agents to the remaining agents, while ensuring the whole allocation is still EF1. Prioritized agents may be those agents who are envious in a previous EF1 allocation, those agents who belong to underrepresented groups, etc. Motivated by this, we propose a new fairness notion named envy-freeness with prioritized agents EFprior, and study the existence and the algorithmic aspects for the problem of computing an EFprior allocation. With additive valuations, the simple round-robin algorithm is able to compute an EFprior allocation. In this paper, we mainly focus on general valuations. In particular, we present a polynomial-time algorithm that outputs an EFprior allocation with most of the items allocated. When all the items need to be allocated, we also present polynomial-time algorithms for some well-motivated special cases.", + "primary_area": "game theory and economic paradigms", + "author": "Xiaolin Bu; Zihao Li; Shengxin Liu; Jiaxin Song; Biaoshuai Tao", + "authorids": "", + "aff": "School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; School of Physical and Mathematical Sciences, Nanyang Technological University; School of Computer Science and Technology, Harbin Institute of Technology, Shenzhen; School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University", + "bibtex": "@article{Bu_Li_Liu_Song_Tao_2023, title={Fair Division with Prioritized Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25688}, DOI={10.1609/aaai.v37i5.25688}, abstractNote={We consider the fair division problem of indivisible items. It is well-known that an envy-free allocation may not exist, and a relaxed version of envy-freeness, envy-freeness up to one item (EF1), has been widely considered. In an EF1 allocation, an agent may envy others\u2019 allocated shares, but only up to one item. In many applications, we may wish to specify a subset of prioritized agents where strict envy-freeness needs to be guaranteed from these agents to the remaining agents, while ensuring the whole allocation is still EF1. Prioritized agents may be those agents who are envious in a previous EF1 allocation, those agents who belong to underrepresented groups, etc. Motivated by this, we propose a new fairness notion named envy-freeness with prioritized agents EFprior, and study the existence and the algorithmic aspects for the problem of computing an EFprior allocation. With additive valuations, the simple round-robin algorithm is able to compute an EFprior allocation. In this paper, we mainly focus on general valuations. In particular, we present a polynomial-time algorithm that outputs an EFprior allocation with most of the items allocated. When all the items need to be allocated, we also present polynomial-time algorithms for some well-motivated special cases.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bu, Xiaolin and Li, Zihao and Liu, Shengxin and Song, Jiaxin and Tao, Biaoshuai}, year={2023}, month={Jun.}, pages={5540-5548} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25688/25460", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25688", + "pdf_size": 154359, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3828277251716825342&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;e.ntu.edu.sg;hit.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;e.ntu.edu.sg;hit.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Nanyang Technological University;Harbin Institute of Technology", + "aff_unique_dep": "School of Electronic Information and Electrical Engineering;School of Physical and Mathematical Sciences;School of Computer Science and Technology", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.ntu.edu.sg;http://www.hit.edu.cn/", + "aff_unique_abbr": "SJTU;NTU;HIT", + "aff_campus_unique_index": "0;2;0;0", + "aff_campus_unique": "Shanghai;;Shenzhen", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25339", + "title": "Fair Generative Models via Transfer Learning", + "track": "main", + "status": "Technical", + "abstract": "This work addresses fair generative models. Dataset biases have been a major cause of unfairness in deep generative models. Previous work had proposed to augment large, biased datasets with small, unbiased reference datasets. Under this setup, a weakly-supervised approach has been proposed, which achieves state-of-the-art quality and fairness in generated samples. In our work, based on this setup, we propose a simple yet effective approach. Specifically, first, we propose fairTL, a transfer learning approach to learn fair generative models. Under fairTL, we pre-train the generative model with the available large, biased datasets and subsequently adapt the model using the small, unbiased reference dataset. We find that our fairTL can learn expressive sample generation during pre-training, thanks to the large (biased) dataset. This knowledge is then transferred to the target model during adaptation, which also learns to capture the underlying fair distribution of the small reference dataset. Second, we propose fairTL++, where we introduce two additional innovations to improve upon fairTL: (i) multiple feedback and (ii) Linear-Probing followed by Fine-Tuning (LP-FT). Taking one step further, we consider an alternative, challenging setup when only a pre-trained (potentially biased) model is available but the dataset that was used to pre-train the model is inaccessible. We demonstrate that our proposed fairTL and fairTL++ remain very effective under this setup. We note that previous work requires access to the large, biased datasets and is incapable of handling this more challenging setup. Extensive experiments show that fairTL and fairTL++ achieve state-of-the-art in both quality and fairness of generated samples. The code and additional resources can be found at bearwithchris.github.io/fairTL/.", + "primary_area": "computer vision ii", + "author": "Christopher T.H. Teo; Milad Abdollahzadeh; Ngai-Man Cheung", + "authorids": "", + "aff": "Singapore University of Technology and Design (SUTD); Singapore University of Technology and Design (SUTD); Singapore University of Technology and Design (SUTD)", + "bibtex": "@article{Teo_Abdollahzadeh_Cheung_2023, title={Fair Generative Models via Transfer Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25339}, DOI={10.1609/aaai.v37i2.25339}, abstractNote={This work addresses fair generative models. Dataset biases have been a major cause of unfairness in deep generative models. Previous work had proposed to augment large, biased datasets with small, unbiased reference datasets. Under this setup, a weakly-supervised approach has been proposed, which achieves state-of-the-art quality and fairness in generated samples. In our work, based on this setup, we propose a simple yet effective approach. Specifically, first, we propose fairTL, a transfer learning approach to learn fair generative models. Under fairTL, we pre-train the generative model with the available large, biased datasets and subsequently adapt the model using the small, unbiased reference dataset. We find that our fairTL can learn expressive sample generation during pre-training, thanks to the large (biased) dataset. This knowledge is then transferred to the target model during adaptation, which also learns to capture the underlying fair distribution of the small reference dataset. Second, we propose fairTL++, where we introduce two additional innovations to improve upon fairTL: (i) multiple feedback and (ii) Linear-Probing followed by Fine-Tuning (LP-FT). Taking one step further, we consider an alternative, challenging setup when only a pre-trained (potentially biased) model is available but the dataset that was used to pre-train the model is inaccessible. We demonstrate that our proposed fairTL and fairTL++ remain very effective under this setup. We note that previous work requires access to the large, biased datasets and is incapable of handling this more challenging setup. Extensive experiments show that fairTL and fairTL++ achieve state-of-the-art in both quality and fairness of generated samples. The code and additional resources can be found at bearwithchris.github.io/fairTL/.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Teo, Christopher T.H. and Abdollahzadeh, Milad and Cheung, Ngai-Man}, year={2023}, month={Jun.}, pages={2429-2437} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25339/25111", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25339", + "pdf_size": 1599573, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6945338225584411382&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mymail.sutd.edu.sg;sutd.edu.sg;sutd.edu.sg", + "email": "mymail.sutd.edu.sg;sutd.edu.sg;sutd.edu.sg", + "github": "", + "project": "bearwithchris.github.io/fairTL/", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Singapore University of Technology and Design", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sutd.edu.sg", + "aff_unique_abbr": "SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25617", + "title": "Fair Representation Learning for Recommendation: A Mutual Information Perspective", + "track": "main", + "status": "Technical", + "abstract": "Recommender systems have been widely used in recent years. By exploiting historical user-item interactions, recommender systems can model personalized potential interests of users and have been widely applied to a wide range of scenarios. Despite their impressive performance, most of them may be subject to unwanted biases related to sensitive attributes (e.g., race and gender), leading to unfairness. An intuitive idea to alleviate this problem is to ensure that there is no mutual information between recommendation results and sensitive attributes. However, keeping independence conditions solely achieves fairness improvement while causing an obvious degradation of recommendation accuracy, which is not a desired result. To this end, in this paper, we re-define recommendation fairness with a novel two-fold mutual information objective. In concerned details, we define fairness as mutual information minimization between embeddings and sensitive information, and mutual information maximization between embeddings and non-sensitive information. Then, a flexible Fair Mutual Information (FairMI) framework is designed to achieve this goal. FairMI first employs a sensitive attribute encoder to capture sensitive information in the data. Then, based on results from the sensitive attribute encoder, an interest encoder is developed to generate sensitive-free embeddings, which are expected to contain rich non-sensitive information of input data. Moreover, we propose novel mutual information (upper/lower) bounds with contrastive information estimation for model optimization. Extensive experiments over two real-world datasets demonstrate the effectiveness of our proposed FairMI in reducing unfairness and improving recommendation accuracy simultaneously.", + "primary_area": "data mining and knowledge management", + "author": "Chen Zhao; Le Wu; Pengyang Shao; Kun Zhang; Richang Hong; Meng Wang", + "authorids": "", + "aff": "School of Computer Science and Information Engineering, Hefei University of Technology+Hefei Comprehensive National Science Center; School of Computer Science and Information Engineering, Hefei University of Technology+Hefei Comprehensive National Science Center; School of Computer Science and Information Engineering, Hefei University of Technology; School of Computer Science and Information Engineering, Hefei University of Technology; School of Computer Science and Information Engineering, Hefei University of Technology+Hefei Comprehensive National Science Center; School of Computer Science and Information Engineering, Hefei University of Technology+Hefei Comprehensive National Science Center", + "bibtex": "@article{Zhao_Wu_Shao_Zhang_Hong_Wang_2023, title={Fair Representation Learning for Recommendation: A Mutual Information Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25617}, DOI={10.1609/aaai.v37i4.25617}, abstractNote={Recommender systems have been widely used in recent years. By exploiting historical user-item interactions, recommender systems can model personalized potential interests of users and have been widely applied to a wide range of scenarios. Despite their impressive performance, most of them may be subject to unwanted biases related to sensitive attributes (e.g., race and gender), leading to unfairness. An intuitive idea to alleviate this problem is to ensure that there is no mutual information between recommendation results and sensitive attributes. However, keeping independence conditions solely achieves fairness improvement while causing an obvious degradation of recommendation accuracy, which is not a desired result. To this end, in this paper, we re-define recommendation fairness with a novel two-fold mutual information objective. In concerned details, we define fairness as mutual information minimization between embeddings and sensitive information, and mutual information maximization between embeddings and non-sensitive information. Then, a flexible Fair Mutual Information (FairMI) framework is designed to achieve this goal. FairMI first employs a sensitive attribute encoder to capture sensitive information in the data. Then, based on results from the sensitive attribute encoder, an interest encoder is developed to generate sensitive-free embeddings, which are expected to contain rich non-sensitive information of input data. Moreover, we propose novel mutual information (upper/lower) bounds with contrastive information estimation for model optimization. Extensive experiments over two real-world datasets demonstrate the effectiveness of our proposed FairMI in reducing unfairness and improving recommendation accuracy simultaneously.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Chen and Wu, Le and Shao, Pengyang and Zhang, Kun and Hong, Richang and Wang, Meng}, year={2023}, month={Jun.}, pages={4911-4919} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25617/25389", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25617", + "pdf_size": 372892, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4254424311923553578&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0;0;0+1;0+1", + "aff_unique_norm": "Hefei University of Technology;Hefei Comprehensive National Science Center", + "aff_unique_dep": "School of Computer Science and Information Engineering;", + "aff_unique_url": "http://www.hfut.edu.cn/;http://www.hfnsc.ac.cn", + "aff_unique_abbr": "Hefei UTech;", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Hefei;", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26455", + "title": "Fair Short Paths in Vertex-Colored Graphs", + "track": "main", + "status": "Technical", + "abstract": "The computation of short paths in graphs with arc lengths is a pillar of graph algorithmics and network science. In a more diverse world, however, not every short path is equally valuable. For the setting where each vertex is assigned to a group (color), we provide a framework to model multiple natural fairness aspects. We seek to find short paths in which the number of occurrences of each color is within some given lower and upper bounds. Among other results, we prove the introduced problems to be computationally intractable (NP-hard and parameterized hard with respect to the number of colors) even in very restricted settings (such as each color should appear with exactly the same frequency), while also presenting an encouraging algorithmic result (\"fixed-parameter tractability\") related to the length of the sought solution path for the general problem.", + "primary_area": "search and optimization", + "author": "Matthias Bentert; Leon Kellerhals; Rolf Niedermeier", + "authorids": "", + "aff": "Technische Universit \u00a8at Berlin, Algorithmics and Computational Complexity; Technische Universit \u00a8at Berlin, Algorithmics and Computational Complexity; Technische Universit \u00a8at Berlin, Algorithmics and Computational Complexity", + "bibtex": "@article{Bentert_Kellerhals_Niedermeier_2023, title={Fair Short Paths in Vertex-Colored Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26455}, DOI={10.1609/aaai.v37i10.26455}, abstractNote={The computation of short paths in graphs with arc lengths is a pillar of graph algorithmics and network science. In a more diverse world, however, not every short path is equally valuable. For the setting where each vertex is assigned to a group (color), we provide a framework to model multiple natural fairness aspects. We seek to find short paths in which the number of occurrences of each color is within some given lower and upper bounds. Among other results, we prove the introduced problems to be computationally intractable (NP-hard and parameterized hard with respect to the number of colors) even in very restricted settings (such as each color should appear with exactly the same frequency), while also presenting an encouraging algorithmic result ("fixed-parameter tractability") related to the length of the sought solution path for the general problem.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bentert, Matthias and Kellerhals, Leon and Niedermeier, Rolf}, year={2023}, month={Jun.}, pages={12346-12354} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26455/26227", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26455", + "pdf_size": 243594, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15397119000816219374&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "tu-berlin.de;tu-berlin.de; ", + "email": "tu-berlin.de;tu-berlin.de; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technische Universit\u00e4t Berlin", + "aff_unique_dep": "Algorithmics and Computational Complexity", + "aff_unique_url": "https://www.tu-berlin.de", + "aff_unique_abbr": "TU Berlin", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Berlin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26183", + "title": "Fair-CDA: Continuous and Directional Augmentation for Group Fairness", + "track": "main", + "status": "Technical", + "abstract": "In this work, we propose Fair-CDA, a fine-grained data augmentation strategy for imposing fairness constraints. We use a feature disentanglement method to extract the features highly related to the sensitive attributes. Then we show that group fairness can be achieved by regularizing the models on transition paths of sensitive features between groups. By adjusting the perturbation strength in the direction of the paths, our proposed augmentation is controllable and auditable. To alleviate the accuracy degradation caused by fairness constraints, we further introduce a calibrated model to impute labels for the augmented data. Our proposed method does not assume any data generative model and ensures good generalization for both accuracy and fairness. Experimental results show that Fair-CDA consistently outperforms state-of-the-art methods on widely-used benchmarks, e.g., Adult, CelebA and MovieLens. Especially, Fair-CDA obtains an 86.3% relative improvement for fairness while maintaining the accuracy on the Adult dataset. Moreover, we evaluate Fair-CDA in an online recommendation system to demonstrate the effectiveness of our method in terms of accuracy and fairness.", + "primary_area": "machine learning iii", + "author": "Rui Sun; Fengwei Zhou; Zhenhua Dong; Chuanlong Xie; Lanqing Hong; Jiawei Li; Rui Zhang; Zhen Li; Zhenguo Li", + "authorids": "", + "aff": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen)+School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen); Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Beijing Normal University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Tsinghua University; The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen)+School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen); Huawei Noah\u2019s Ark Lab", + "bibtex": "@article{Sun_Zhou_Dong_Xie_Hong_Li_Zhang_Li_Li_2023, title={Fair-CDA: Continuous and Directional Augmentation for Group Fairness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26183}, DOI={10.1609/aaai.v37i8.26183}, abstractNote={In this work, we propose Fair-CDA, a fine-grained data augmentation strategy for imposing fairness constraints. We use a feature disentanglement method to extract the features highly related to the sensitive attributes. Then we show that group fairness can be achieved by regularizing the models on transition paths of sensitive features between groups. By adjusting the perturbation strength in the direction of the paths, our proposed augmentation is controllable and auditable. To alleviate the accuracy degradation caused by fairness constraints, we further introduce a calibrated model to impute labels for the augmented data. Our proposed method does not assume any data generative model and ensures good generalization for both accuracy and fairness. Experimental results show that Fair-CDA consistently outperforms state-of-the-art methods on widely-used benchmarks, e.g., Adult, CelebA and MovieLens. Especially, Fair-CDA obtains an 86.3% relative improvement for fairness while maintaining the accuracy on the Adult dataset. Moreover, we evaluate Fair-CDA in an online recommendation system to demonstrate the effectiveness of our method in terms of accuracy and fairness.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Rui and Zhou, Fengwei and Dong, Zhenhua and Xie, Chuanlong and Hong, Lanqing and Li, Jiawei and Zhang, Rui and Li, Zhen and Li, Zhenguo}, year={2023}, month={Jun.}, pages={9918-9926} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26183/25955", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26183", + "pdf_size": 1299010, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4170507858383832060&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "link.cuhk.edu.cn;connect.ust.hk;huawei.com;bnu.edu.cn;huawei.com;huawei.com;yeah.net;cuhk.edu.cn;huawei.com", + "email": "link.cuhk.edu.cn;connect.ust.hk;huawei.com;bnu.edu.cn;huawei.com;huawei.com;yeah.net;cuhk.edu.cn;huawei.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+0;1;1;2;1;1;3;0+0;1", + "aff_unique_norm": "The Chinese University of Hong Kong;Huawei;Beijing Normal University;Tsinghua University", + "aff_unique_dep": "Future Network of Intelligence Institute;Noah\u2019s Ark Lab;;", + "aff_unique_url": "https://www.cuhk.edu.cn;https://www.huawei.com;https://www.bnu.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "CUHK;Huawei;BNU;THU", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25911", + "title": "FairFed: Enabling Group Fairness in Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Training ML models which are fair across different demographic groups is of critical importance due to the increased integration of ML in crucial decision-making scenarios such as healthcare and recruitment. Federated learning has been viewed as a promising solution for collaboratively training machine learning models among multiple parties while maintaining their local data privacy. However, federated learning also poses new challenges in mitigating the potential bias against certain populations (e.g., demographic groups), as this typically requires centralized access to the sensitive information (e.g., race, gender) of each datapoint. Motivated by the importance and challenges of group fairness in federated learning, in this work, we propose FairFed, a novel algorithm for fairness-aware aggregation to enhance group fairness in federated learning. Our proposed approach is server-side and agnostic to the applied local debiasing thus allowing for flexible use of different local debiasing methods across clients. We evaluate FairFed empirically versus common baselines for fair ML and federated learning and demonstrate that it provides fairer models, particularly under highly heterogeneous data distributions across clients. We also demonstrate the benefits of FairFed in scenarios involving naturally distributed real-life data collected from different geographical locations or departments within an organization.", + "primary_area": "machine learning i", + "author": "Yahya H. Ezzeldin; Shen Yan; Chaoyang He; Emilio Ferrara; A. Salman Avestimehr", + "authorids": "", + "aff": "University of Southern California (USC); University of Southern California (USC); University of Southern California (USC); University of Southern California (USC); University of Southern California (USC)", + "bibtex": "@article{Ezzeldin_Yan_He_Ferrara_Avestimehr_2023, title={FairFed: Enabling Group Fairness in Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25911}, DOI={10.1609/aaai.v37i6.25911}, abstractNote={Training ML models which are fair across different demographic groups is of critical importance due to the increased integration of ML in crucial decision-making scenarios such as healthcare and recruitment. Federated learning has been viewed as a promising solution for collaboratively training machine learning models among multiple parties while maintaining their local data privacy. However, federated learning also poses new challenges in mitigating the potential bias against certain populations (e.g., demographic groups), as this typically requires centralized access to the sensitive information (e.g., race, gender) of each datapoint. Motivated by the importance and challenges of group fairness in federated learning, in this work, we propose FairFed, a novel algorithm for fairness-aware aggregation to enhance group fairness in federated learning. Our proposed approach is server-side and agnostic to the applied local debiasing thus allowing for flexible use of different local debiasing methods across clients. We evaluate FairFed empirically versus common baselines for fair ML and federated learning and demonstrate that it provides fairer models, particularly under highly heterogeneous data distributions across clients. We also demonstrate the benefits of FairFed in scenarios involving naturally distributed real-life data collected from different geographical locations or departments within an organization.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ezzeldin, Yahya H. and Yan, Shen and He, Chaoyang and Ferrara, Emilio and Avestimehr, A. Salman}, year={2023}, month={Jun.}, pages={7494-7502} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25911/25683", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25911", + "pdf_size": 1371145, + "gs_citation": 237, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=137380899481707897&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "usc.edu;usc.edu;usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu;usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25680", + "title": "Fairness Concepts for Indivisible Items with Externalities", + "track": "main", + "status": "Technical", + "abstract": "We study a fair allocation problem of indivisible items under additive externalities in which each agent also receives utility from items that are assigned to other agents. This allows us to capture scenarios in which agents benefit from or compete against one another. We extend the well-studied properties of envy-freeness up to one item (EF1) and envy-freeness up to any item (EFX) to this setting, and we propose a new fairness concept called general fair share (GFS), which applies to a more general public decision making model. We undertake a detailed study and present algorithms for finding fair allocations.", + "primary_area": "game theory and economic paradigms", + "author": "Haris Aziz; Warut Suksompong; Zhaohong Sun; Toby Walsh", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of New South Wales, Australia; School of Computing, National University of Singapore, Singapore; AI Lab, CyberAgent, Japan; School of Computer Science and Engineering, University of New South Wales, Australia", + "bibtex": "@article{Aziz_Suksompong_Sun_Walsh_2023, title={Fairness Concepts for Indivisible Items with Externalities}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25680}, DOI={10.1609/aaai.v37i5.25680}, abstractNote={We study a fair allocation problem of indivisible items under additive externalities in which each agent also receives utility from items that are assigned to other agents. This allows us to capture scenarios in which agents benefit from or compete against one another. We extend the well-studied properties of envy-freeness up to one item (EF1) and envy-freeness up to any item (EFX) to this setting, and we propose a new fairness concept called general fair share (GFS), which applies to a more general public decision making model. We undertake a detailed study and present algorithms for finding fair allocations.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aziz, Haris and Suksompong, Warut and Sun, Zhaohong and Walsh, Toby}, year={2023}, month={Jun.}, pages={5472-5480} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25680/25452", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25680", + "pdf_size": 138914, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4227164397923474882&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of New South Wales;National University of Singapore;CyberAgent", + "aff_unique_dep": "School of Computer Science and Engineering;School of Computing;AI Lab", + "aff_unique_url": "https://www.unsw.edu.au;https://www.nus.edu.sg;https://www.cyberagent.co.jp", + "aff_unique_abbr": "UNSW;NUS;CyberAgent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0", + "aff_country_unique": "Australia;Singapore;Japan" + }, + { + "id": "article-26344", + "title": "Fairness and Explainability: Bridging the Gap towards Fair Model Explanations", + "track": "main", + "status": "Technical", + "abstract": "While machine learning models have achieved unprecedented success in real-world applications, they might make biased/unfair decisions for specific demographic groups and hence result in discriminative outcomes. Although research efforts have been devoted to measuring and mitigating bias, they mainly study bias from the result-oriented perspective while neglecting the bias encoded in the decision-making procedure. This results in their inability to capture procedure-oriented bias, which therefore limits the ability to have a fully debiasing method. Fortunately, with the rapid development of explainable machine learning, explanations for predictions are now available to gain insights into the procedure. In this work, we bridge the gap between fairness and explainability by presenting a novel perspective of procedure-oriented fairness based on explanations. We identify the procedure-based bias by measuring the gap of explanation quality between different groups with Ratio-based and Value-based Explanation Fairness. The new metrics further motivate us to design an optimization objective to mitigate the procedure-based bias where we observe that it will also mitigate bias from the prediction. Based on our designed optimization objective, we propose a Comprehensive Fairness Algorithm (CFA), which simultaneously fulfills multiple objectives - improving traditional fairness, satisfying explanation fairness, and maintaining the utility performance. Extensive experiments on real-world datasets demonstrate the effectiveness of our proposed CFA and highlight the importance of considering fairness from the explainability perspective. Our code: https://github.com/YuyingZhao/FairExplanations-CFA.", + "primary_area": "machine learning iv", + "author": "Yuying Zhao; Yu Wang; Tyler Derr", + "authorids": "", + "aff": "Vanderbilt University; Vanderbilt University; Vanderbilt University", + "bibtex": "@article{Zhao_Wang_Derr_2023, title={Fairness and Explainability: Bridging the Gap towards Fair Model Explanations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26344}, DOI={10.1609/aaai.v37i9.26344}, abstractNote={While machine learning models have achieved unprecedented success in real-world applications, they might make biased/unfair decisions for specific demographic groups and hence result in discriminative outcomes. Although research efforts have been devoted to measuring and mitigating bias, they mainly study bias from the result-oriented perspective while neglecting the bias encoded in the decision-making procedure. This results in their inability to capture procedure-oriented bias, which therefore limits the ability to have a fully debiasing method. Fortunately, with the rapid development of explainable machine learning, explanations for predictions are now available to gain insights into the procedure. In this work, we bridge the gap between fairness and explainability by presenting a novel perspective of procedure-oriented fairness based on explanations. We identify the procedure-based bias by measuring the gap of explanation quality between different groups with Ratio-based and Value-based Explanation Fairness. The new metrics further motivate us to design an optimization objective to mitigate the procedure-based bias where we observe that it will also mitigate bias from the prediction. Based on our designed optimization objective, we propose a Comprehensive Fairness Algorithm (CFA), which simultaneously fulfills multiple objectives - improving traditional fairness, satisfying explanation fairness, and maintaining the utility performance. Extensive experiments on real-world datasets demonstrate the effectiveness of our proposed CFA and highlight the importance of considering fairness from the explainability perspective. Our code: https://github.com/YuyingZhao/FairExplanations-CFA.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yuying and Wang, Yu and Derr, Tyler}, year={2023}, month={Jun.}, pages={11363-11371} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26344/26116", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26344", + "pdf_size": 2847199, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4777117914253210877&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 7, + "aff_domain": "vanderbilt.edu;vanderbilt.edu;vanderbilt.edu", + "email": "vanderbilt.edu;vanderbilt.edu;vanderbilt.edu", + "github": "https://github.com/YuyingZhao/FairExplanations-CFA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Vanderbilt University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.vanderbilt.edu", + "aff_unique_abbr": "Vanderbilt", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25829", + "title": "Fairness and Welfare Quantification for Regret in Multi-Armed Bandits", + "track": "main", + "status": "Technical", + "abstract": "We extend the notion of regret with a welfarist perspective. Focussing on the classic multi-armed bandit (MAB) framework, the current work quantifies the performance of bandit algorithms by applying a fundamental welfare function, namely the Nash social welfare (NSW) function. This corresponds to equating algorithm's performance to the geometric mean of its expected rewards and leads us to the study of Nash regret, defined as the difference between the - a priori unknown - optimal mean (among the arms) and the algorithm's performance. Since NSW is known to satisfy fairness axioms, our approach complements the utilitarian considerations of average (cumulative) regret, wherein the algorithm is evaluated via the arithmetic mean of its expected rewards. \n\nThis work develops an algorithm that, given the horizon of play T, achieves a Nash regret of O ( sqrt{(k log T)/T} ), here k denotes the number of arms in the MAB instance. Since, for any algorithm, the Nash regret is at least as much as its average regret (the AM-GM inequality), the known lower bound on average regret holds for Nash regret as well. Therefore, our Nash regret guarantee is essentially tight. In addition, we develop an anytime algorithm with a Nash regret guarantee of O( sqrt{(k log T)/T} log T ).", + "primary_area": "machine learning i", + "author": "Siddharth Barman; Arindam Khan; Arnab Maiti; Ayush Sawarni", + "authorids": "", + "aff": "Indian Institute of Science; Indian Institute of Science; University of Washington; Indian Institute of Science", + "bibtex": "@article{Barman_Khan_Maiti_Sawarni_2023, title={Fairness and Welfare Quantification for Regret in Multi-Armed Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25829}, DOI={10.1609/aaai.v37i6.25829}, abstractNote={We extend the notion of regret with a welfarist perspective. Focussing on the classic multi-armed bandit (MAB) framework, the current work quantifies the performance of bandit algorithms by applying a fundamental welfare function, namely the Nash social welfare (NSW) function. This corresponds to equating algorithm\u2019s performance to the geometric mean of its expected rewards and leads us to the study of Nash regret, defined as the difference between the - a priori unknown - optimal mean (among the arms) and the algorithm\u2019s performance. Since NSW is known to satisfy fairness axioms, our approach complements the utilitarian considerations of average (cumulative) regret, wherein the algorithm is evaluated via the arithmetic mean of its expected rewards. This work develops an algorithm that, given the horizon of play T, achieves a Nash regret of O ( sqrt{(k log T)/T} ), here k denotes the number of arms in the MAB instance. Since, for any algorithm, the Nash regret is at least as much as its average regret (the AM-GM inequality), the known lower bound on average regret holds for Nash regret as well. Therefore, our Nash regret guarantee is essentially tight. In addition, we develop an anytime algorithm with a Nash regret guarantee of O( sqrt{(k log T)/T} log T ).}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Barman, Siddharth and Khan, Arindam and Maiti, Arnab and Sawarni, Ayush}, year={2023}, month={Jun.}, pages={6762-6769} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25829/25601", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25829", + "pdf_size": 146954, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13110026700677768548&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "iisc.ac.in;iisc.ac.in;gmail.com;iisc.ac.in", + "email": "iisc.ac.in;iisc.ac.in;gmail.com;iisc.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Indian Institute of Science;University of Washington", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iisc.ac.in;https://www.washington.edu", + "aff_unique_abbr": "IISc;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-26397", + "title": "Fairness in Contextual Resource Allocation Systems: Metrics and Incompatibility Results", + "track": "main", + "status": "Technical", + "abstract": "We study critical systems that allocate scarce resources to satisfy basic needs, such as homeless services that provide housing. These systems often support communities disproportionately affected by systemic racial, gender, or other injustices, so it is crucial to design these systems with fairness considerations in mind. To address this problem, we propose a framework for evaluating fairness in contextual resource allocation systems that is inspired by fairness metrics in machine learning. This framework can be applied to evaluate the fairness properties of a historical policy, as well as to impose constraints in the design of new (counterfactual) allocation policies. Our work culminates with a set of incompatibility results that investigate the interplay between the different fairness metrics we propose. Notably, we demonstrate that: 1) fairness in allocation and fairness in outcomes are usually incompatible; 2) policies that prioritize based on a vulnerability score will usually result in unequal outcomes across groups, even if the score is perfectly calibrated; 3) policies using contextual information beyond what is needed to characterize baseline risk and treatment effects can be fairer in their outcomes than those using just baseline risk and treatment effects; and 4) policies using group status in addition to baseline risk and treatment effects are as fair as possible given all available information. Our framework can help guide the discussion among stakeholders in deciding which fairness metrics to impose when allocating scarce resources.", + "primary_area": "philosophy and ethics of ai", + "author": "Nathanael Jo; Bill Tang; Kathryn Dullerud; Sina Aghaei; Eric Rice; Phebe Vayanos", + "authorids": "", + "aff": "USC Center for AI in Society, Los Angeles, CA; USC Center for AI in Society, Los Angeles, CA; USC Center for AI in Society, Los Angeles, CA; USC Center for AI in Society, Los Angeles, CA; USC Center for AI in Society, Los Angeles, CA; USC Center for AI in Society, Los Angeles, CA", + "bibtex": "@article{Jo_Tang_Dullerud_Aghaei_Rice_Vayanos_2023, title={Fairness in Contextual Resource Allocation Systems: Metrics and Incompatibility Results}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26397}, DOI={10.1609/aaai.v37i10.26397}, abstractNote={We study critical systems that allocate scarce resources to satisfy basic needs, such as homeless services that provide housing. These systems often support communities disproportionately affected by systemic racial, gender, or other injustices, so it is crucial to design these systems with fairness considerations in mind. To address this problem, we propose a framework for evaluating fairness in contextual resource allocation systems that is inspired by fairness metrics in machine learning. This framework can be applied to evaluate the fairness properties of a historical policy, as well as to impose constraints in the design of new (counterfactual) allocation policies. Our work culminates with a set of incompatibility results that investigate the interplay between the different fairness metrics we propose. Notably, we demonstrate that: 1) fairness in allocation and fairness in outcomes are usually incompatible; 2) policies that prioritize based on a vulnerability score will usually result in unequal outcomes across groups, even if the score is perfectly calibrated; 3) policies using contextual information beyond what is needed to characterize baseline risk and treatment effects can be fairer in their outcomes than those using just baseline risk and treatment effects; and 4) policies using group status in addition to baseline risk and treatment effects are as fair as possible given all available information. Our framework can help guide the discussion among stakeholders in deciding which fairness metrics to impose when allocating scarce resources.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jo, Nathanael and Tang, Bill and Dullerud, Kathryn and Aghaei, Sina and Rice, Eric and Vayanos, Phebe}, year={2023}, month={Jun.}, pages={11837-11846} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26397/26169", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26397", + "pdf_size": 146132, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9985923891235004970&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;usc.edu;usc.edu;usc.edu;usc.edu;usc.edu", + "email": "gmail.com;usc.edu;usc.edu;usc.edu;usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "Center for AI in Society", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27020", + "title": "FakeKG: A Knowledge Graph of Fake Claims for Improving Automated Fact-Checking (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "False information could be dangerous if the claim is not debunked timely. Fact-checking organisations get a high volume of claims on different topics with immense velocity. The efficiency of the fact-checkers decreases due to 3V problems volume, velocity and variety. Especially during crises or elections, fact-checkers cannot handle user requests to verify the claim. Until now, no real-time curable centralised corpus of fact-checked articles is available. Also, the same claim is fact-checked by multiple fact-checking organisations with or without judgement. To fill this gap, we introduce FakeKG: A Knowledge Graph-Based approach for improving Automated Fact-checking. FakeKG is a centralised knowledge graph containing fact-checked articles from different sources that can be queried using the SPARQL endpoint. The proposed FakeKG can prescreen claim requests and filter them if the claim is already fact-checked and provide a judgement to the claim. It will also categorise the claim's domain so that the fact-checker can prioritise checking the incoming claims into different groups like health and election. This study proposes an approach for creating FakeKG and its future application for mitigating misinformation.", + "primary_area": "", + "author": "Gautam Kishore Shahi", + "authorids": "", + "aff": "University of Duisburg-Essen, Germany", + "bibtex": "@article{Shahi_2024, title={FakeKG: A Knowledge Graph of Fake Claims for Improving Automated Fact-Checking (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27020}, DOI={10.1609/aaai.v37i13.27020}, abstractNote={False information could be dangerous if the claim is not debunked timely. Fact-checking organisations get a high volume of claims on different topics with immense velocity. The efficiency of the fact-checkers decreases due to 3V problems volume, velocity and variety. Especially during crises or elections, fact-checkers cannot handle user requests to verify the claim. Until now, no real-time curable centralised corpus of fact-checked articles is available. Also, the same claim is fact-checked by multiple fact-checking organisations with or without judgement. To fill this gap, we introduce FakeKG: A Knowledge Graph-Based approach for improving Automated Fact-checking. FakeKG is a centralised knowledge graph containing fact-checked articles from different sources that can be queried using the SPARQL endpoint. The proposed FakeKG can prescreen claim requests and filter them if the claim is already fact-checked and provide a judgement to the claim. It will also categorise the claim\u2019s domain so that the fact-checker can prioritise checking the incoming claims into different groups like health and election. This study proposes an approach for creating FakeKG and its future application for mitigating misinformation.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shahi, Gautam Kishore}, year={2024}, month={Jul.}, pages={16320-16321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27020/26792", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27020", + "pdf_size": 104934, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3221403985674905484&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 4, + "aff_domain": "uni-due.de", + "email": "uni-due.de", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Duisburg-Essen", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uni-due.de", + "aff_unique_abbr": "UDE", + "aff_country_unique_index": "0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26689", + "title": "FakeSV: A Multimodal Benchmark with Rich Social Context for Fake News Detection on Short Video Platforms", + "track": "aaai special track", + "status": "Technical", + "abstract": "Short video platforms have become an important channel for news sharing, but also a new breeding ground for fake news. To mitigate this problem, research of fake news video detection has recently received a lot of attention. Existing works face two roadblocks: the scarcity of comprehensive and largescale datasets and insufficient utilization of multimodal information. Therefore, in this paper, we construct the largest Chinese short video dataset about fake news named FakeSV, which includes news content, user comments, and publisher profiles simultaneously. To understand the characteristics of fake news videos, we conduct exploratory analysis of FakeSV from different perspectives. Moreover, we provide a new multimodal detection model named SV-FEND, which exploits the cross-modal correlations to select the most informative features and utilizes the social context information for detection. Extensive experiments evaluate the superiority of the proposed method and provide detailed comparisons of different methods and modalities for future works. Our dataset and codes are available in https://github.com/ICTMCG/FakeSV.", + "primary_area": "ai for social impact", + "author": "Peng Qi; Yuyan Bu; Juan Cao; Wei Ji; Ruihao Shui; Junbin Xiao; Danding Wang; Tat-Seng Chua", + "authorids": "", + "aff": "Key Lab of Intelligent Information Processing, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; Key Lab of Intelligent Information Processing, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; Key Lab of Intelligent Information Processing, Institute of Computing Technology, CAS+University of Chinese Academy of Sciences; National University of Singapore; National University of Singapore; National University of Singapore; Key Lab of Intelligent Information Processing, Institute of Computing Technology, CAS; National University of Singapore", + "bibtex": "@article{Qi_Bu_Cao_Ji_Shui_Xiao_Wang_Chua_2023, title={FakeSV: A Multimodal Benchmark with Rich Social Context for Fake News Detection on Short Video Platforms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26689}, DOI={10.1609/aaai.v37i12.26689}, abstractNote={Short video platforms have become an important channel for news sharing, but also a new breeding ground for fake news. To mitigate this problem, research of fake news video detection has recently received a lot of attention. Existing works face two roadblocks: the scarcity of comprehensive and largescale datasets and insufficient utilization of multimodal information. Therefore, in this paper, we construct the largest Chinese short video dataset about fake news named FakeSV, which includes news content, user comments, and publisher profiles simultaneously. To understand the characteristics of fake news videos, we conduct exploratory analysis of FakeSV from different perspectives. Moreover, we provide a new multimodal detection model named SV-FEND, which exploits the cross-modal correlations to select the most informative features and utilizes the social context information for detection. Extensive experiments evaluate the superiority of the proposed method and provide detailed comparisons of different methods and modalities for future works. Our dataset and codes are available in https://github.com/ICTMCG/FakeSV.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qi, Peng and Bu, Yuyan and Cao, Juan and Ji, Wei and Shui, Ruihao and Xiao, Junbin and Wang, Danding and Chua, Tat-Seng}, year={2023}, month={Jun.}, pages={14444-14452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26689/26461", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26689", + "pdf_size": 8331720, + "gs_citation": 67, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14789645195689813326&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ict.ac.cn;mails.ucas.ac.cn;ict.ac.cn;nus.edu.sg;u.nus.edu;comp.nus.edu.sg;ict.ac.cn;nus.edu.sg", + "email": "ict.ac.cn;mails.ucas.ac.cn;ict.ac.cn;nus.edu.sg;u.nus.edu;comp.nus.edu.sg;ict.ac.cn;nus.edu.sg", + "github": "https://github.com/ICTMCG/FakeSV", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;2;2;2;0;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;National University of Singapore", + "aff_unique_dep": "Institute of Computing Technology;;", + "aff_unique_url": "http://www.cas.cn/;http://www.ucas.ac.cn;https://www.nus.edu.sg", + "aff_unique_abbr": "CAS;UCAS;NUS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;1;1;1;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26030", + "title": "FanoutNet: A Neuralized PCB Fanout Automation Method Using Deep Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In modern electronic manufacturing processes, multi-layer Printed Circuit Board (PCB) routing requires connecting more than hundreds of nets with perplexing topology under complex routing constraints and highly limited resources, so that takes intense effort and time of human engineers. PCB fanout as a pre-design of PCB routing has been proved to be an ideal technique to reduce the complexity of PCB routing by pre-allocating resources and pre-routing. However, current PCB fanout design heavily relies on the experience of human engineers, and there is no existing solution for PCB fanout automation in industry, which limits the quality of PCB routing automation. To address the problem, we propose a neuralized PCB fanout method by deep reinforcement learning. To the best of our knowledge, we are the first in the literature to propose the automation method for PCB fanout. We combine with Convolution Neural Network (CNN) and attention-based network to train our fanout policy model and value model. The models learn representations of PCB layout and netlist to make decisions and evaluations in place of human engineers. We employ Proximal Policy Optimization (PPO) to update the parameters of the models. In addition, we apply our PCB fanout method to a PCB router to improve the quality of PCB routing. Extensive experimental results on real-world industrial PCB benchmarks demonstrate that our approach achieves 100% routability in all industrial cases and improves wire length by an average of 6.8%, which makes a significant improvement compared with the state-of-the-art methods.", + "primary_area": "machine learning ii", + "author": "Haiyun Li; Jixin Zhang; Ning Xu; Mingyu Liu", + "authorids": "", + "aff": "School of Information Engineering, Wuhan University of Technology, Wuhan, China; School of Computer Science, Hubei University of Technology, Wuhan, China; School of Information Engineering, Wuhan University of Technology, Wuhan, China; Wuhan Research Institute, Huawei Device Co., Ltd., Wuhan, China", + "bibtex": "@article{Li_Zhang_Xu_Liu_2023, title={FanoutNet: A Neuralized PCB Fanout Automation Method Using Deep Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26030}, DOI={10.1609/aaai.v37i7.26030}, abstractNote={In modern electronic manufacturing processes, multi-layer Printed Circuit Board (PCB) routing requires connecting more than hundreds of nets with perplexing topology under complex routing constraints and highly limited resources, so that takes intense effort and time of human engineers. PCB fanout as a pre-design of PCB routing has been proved to be an ideal technique to reduce the complexity of PCB routing by pre-allocating resources and pre-routing. However, current PCB fanout design heavily relies on the experience of human engineers, and there is no existing solution for PCB fanout automation in industry, which limits the quality of PCB routing automation. To address the problem, we propose a neuralized PCB fanout method by deep reinforcement learning. To the best of our knowledge, we are the first in the literature to propose the automation method for PCB fanout. We combine with Convolution Neural Network (CNN) and attention-based network to train our fanout policy model and value model. The models learn representations of PCB layout and netlist to make decisions and evaluations in place of human engineers. We employ Proximal Policy Optimization (PPO) to update the parameters of the models. In addition, we apply our PCB fanout method to a PCB router to improve the quality of PCB routing. Extensive experimental results on real-world industrial PCB benchmarks demonstrate that our approach achieves 100% routability in all industrial cases and improves wire length by an average of 6.8%, which makes a significant improvement compared with the state-of-the-art methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Haiyun and Zhang, Jixin and Xu, Ning and Liu, Mingyu}, year={2023}, month={Jun.}, pages={8554-8561} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26030/25802", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26030", + "pdf_size": 725923, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10471800024138173217&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "hbut.edu.cn;whut.edu.cn; ; ", + "email": "hbut.edu.cn;whut.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Wuhan University of Technology;Hubei University of Technology;Huawei Device Co., Ltd.", + "aff_unique_dep": "School of Information Engineering;School of Computer Science;Wuhan Research Institute", + "aff_unique_url": "http://www.wut.edu.cn;http://www.hbut.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "WUT;;Huawei", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25529", + "title": "Farsighted Probabilistic Sampling: A General Strategy for Boosting Local Search MaxSAT Solvers", + "track": "main", + "status": "Technical", + "abstract": "Local search has been demonstrated as an efficient approach for two practical generalizations of the MaxSAT problem, namely Partial MaxSAT (PMS) and Weighted PMS (WPMS). In this work, we observe that most local search (W)PMS solvers usually flip a single variable per iteration. Such a mechanism may lead to relatively low-quality local optimal solutions, and may limit the diversity of search directions to escape from local optima. To address this issue, we propose a general strategy, called farsighted probabilistic sampling (FPS), to replace the single flipping mechanism so as to boost the local search (W)PMS algorithms. FPS considers the benefit of continuously flipping a pair of variables in order to find higher-quality local optimal solutions. Moreover, FPS proposes an effective approach to escape from local optima by preferring the best to flip among the best sampled single variable and the best sampled variable pair. Extensive experiments demonstrate that our proposed FPS strategy significantly improves the state-of-the-art (W)PMS solvers, and FPS has an excellent generalization capability to various local search MaxSAT solvers.", + "primary_area": "constraint satisfaction and optimization", + "author": "Jiongzhi Zheng; Kun He; Jianrong Zhou", + "authorids": "", + "aff": "School of Computer Science and Technology, Huazhong University of Science and Technology, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, China; School of Computer Science and Technology, Huazhong University of Science and Technology, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, China; School of Computer Science and Technology, Huazhong University of Science and Technology, China + Hopcroft Center on Computing Science, Huazhong University of Science and Technology, China", + "bibtex": "@article{Zheng_He_Zhou_2023, title={Farsighted Probabilistic Sampling: A General Strategy for Boosting Local Search MaxSAT Solvers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25529}, DOI={10.1609/aaai.v37i4.25529}, abstractNote={Local search has been demonstrated as an efficient approach for two practical generalizations of the MaxSAT problem, namely Partial MaxSAT (PMS) and Weighted PMS (WPMS). In this work, we observe that most local search (W)PMS solvers usually flip a single variable per iteration. Such a mechanism may lead to relatively low-quality local optimal solutions, and may limit the diversity of search directions to escape from local optima. To address this issue, we propose a general strategy, called farsighted probabilistic sampling (FPS), to replace the single flipping mechanism so as to boost the local search (W)PMS algorithms. FPS considers the benefit of continuously flipping a pair of variables in order to find higher-quality local optimal solutions. Moreover, FPS proposes an effective approach to escape from local optima by preferring the best to flip among the best sampled single variable and the best sampled variable pair. Extensive experiments demonstrate that our proposed FPS strategy significantly improves the state-of-the-art (W)PMS solvers, and FPS has an excellent generalization capability to various local search MaxSAT solvers.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Jiongzhi and He, Kun and Zhou, Jianrong}, year={2023}, month={Jun.}, pages={4132-4139} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25529/25301", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25529", + "pdf_size": 157321, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16776099694094249177&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26186", + "title": "Fast Convergence in Learning Two-Layer Neural Networks with Separable Data", + "track": "main", + "status": "Technical", + "abstract": "Normalized gradient descent has shown substantial success in speeding up the convergence of exponentially-tailed loss functions (which includes exponential and logistic losses) on linear classifiers with separable data. In this paper, we go beyond linear models by studying normalized GD on two-layer neural nets. We prove for exponentially-tailed losses that using normalized GD leads to linear rate of convergence of the training loss to the global optimum. This is made possible by showing certain gradient self-boundedness conditions and a log-Lipschitzness property. We also study generalization of normalized GD for convex objectives via an algorithmic-stability analysis. In particular, we show that normalized GD does not overfit during training by establishing finite-time generalization bounds.", + "primary_area": "machine learning iii", + "author": "Hossein Taheri; Christos Thrampoulidis", + "authorids": "", + "aff": "University of California, Santa Barbara; University of British Columbia", + "bibtex": "@article{Taheri_Thrampoulidis_2023, title={Fast Convergence in Learning Two-Layer Neural Networks with Separable Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26186}, DOI={10.1609/aaai.v37i8.26186}, abstractNote={Normalized gradient descent has shown substantial success in speeding up the convergence of exponentially-tailed loss functions (which includes exponential and logistic losses) on linear classifiers with separable data. In this paper, we go beyond linear models by studying normalized GD on two-layer neural nets. We prove for exponentially-tailed losses that using normalized GD leads to linear rate of convergence of the training loss to the global optimum. This is made possible by showing certain gradient self-boundedness conditions and a log-Lipschitzness property. We also study generalization of normalized GD for convex objectives via an algorithmic-stability analysis. In particular, we show that normalized GD does not overfit during training by establishing finite-time generalization bounds.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Taheri, Hossein and Thrampoulidis, Christos}, year={2023}, month={Jun.}, pages={9944-9952} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26186/25958", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26186", + "pdf_size": 272061, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15695493218147786001&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ucsb.edu;ece.ubc.ca", + "email": "ucsb.edu;ece.ubc.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of California, Santa Barbara;University of British Columbia", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsb.edu;https://www.ubc.ca", + "aff_unique_abbr": "UCSB;UBC", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Santa Barbara;Vancouver", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-25517", + "title": "Fast Converging Anytime Model Counting", + "track": "main", + "status": "Technical", + "abstract": "Model counting is a fundamental problem which has been influential in many applications, from artificial intelligence to formal verification. Due to the intrinsic hardness of model counting, approximate techniques have been developed to solve real-world instances of model counting. This paper designs a new anytime approach called PartialKC for approximate model counting. The idea is a form of partial knowledge compilation to provide an unbiased estimate of the model count which can converge to the exact count. Our empirical analysis demonstrates that PartialKC achieves significant scalability and accuracy over prior state-of-the-art approximate counters, including satss and STS. Interestingly, the empirical results show that PartialKC reaches convergence for many instances and therefore provides exact model counting performance comparable to state-of-the-art exact counters.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yong Lai; Kuldeep S. Meel; Roland H.C. Yap", + "authorids": "", + "aff": "Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, Jilin University, China; School of Computing, National University of Singapore; School of Computing, National University of Singapore", + "bibtex": "@article{Lai_Meel_Yap_2023, title={Fast Converging Anytime Model Counting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25517}, DOI={10.1609/aaai.v37i4.25517}, abstractNote={Model counting is a fundamental problem which has been influential in many applications, from artificial intelligence to formal verification. Due to the intrinsic hardness of model counting, approximate techniques have been developed to solve real-world instances of model counting. This paper designs a new anytime approach called PartialKC for approximate model counting. The idea is a form of partial knowledge compilation to provide an unbiased estimate of the model count which can converge to the exact count. Our empirical analysis demonstrates that PartialKC achieves significant scalability and accuracy over prior state-of-the-art approximate counters, including satss and STS. Interestingly, the empirical results show that PartialKC reaches convergence for many instances and therefore provides exact model counting performance comparable to state-of-the-art exact counters.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lai, Yong and Meel, Kuldeep S. and Yap, Roland H.C.}, year={2023}, month={Jun.}, pages={4025-4034} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25517/25289", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25517", + "pdf_size": 357331, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10471815033033947215&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Jilin University;National University of Singapore", + "aff_unique_dep": "Key Laboratory of Symbolic Computation and Knowledge Engineering;School of Computing", + "aff_unique_url": "http://www.jlu.edu.cn;https://www.nus.edu.sg", + "aff_unique_abbr": ";NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25924", + "title": "Fast Counterfactual Inference for History-Based Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Incorporating sequence-to-sequence models into history-based Reinforcement Learning (RL) provides a general way to extend RL to partially-observable tasks. This method compresses history spaces according to the correlations between historical observations and the rewards. However, they do not adjust for the confounding correlations caused by data sampling and assign high beliefs to uninformative historical observations, leading to limited compression of history spaces. Counterfactual Inference (CI), which estimates causal effects by single-variable intervention, is a promising way to adjust for confounding. However, it is computationally infeasible to directly apply the single-variable intervention to a huge number of historical observations. This paper proposes to perform CI on observation sub-spaces instead of single observations and develop a coarse-to-fine CI algorithm, called Tree-based History Counterfactual Inference (T-HCI), to reduce the number of interventions exponentially. We show that T-HCI is computationally feasible in practice and brings significant sample efficiency gains in various challenging partially-observable tasks, including Maze, BabyAI, and robot manipulation tasks.", + "primary_area": "machine learning i", + "author": "Haichuan Gao; Tianren Zhang; Zhile Yang; Yuqing Guo; Jinsheng Ren; Shangqi Guo; Feng Chen", + "authorids": "", + "aff": "Department of Automation, Tsinghua University, Beijing, China; Department of Automation, Tsinghua University, Beijing, China; School of Computing, University of Leeds, Leeds, UK; Department of Automation, Tsinghua University, Beijing, China; Department of Automation, Tsinghua University, Beijing, China; Department of Precision Instrument, Tsinghua University, Beijing, China + LSBDPA Beijing Key Laboratory, Beijing, China; Department of Automation, Tsinghua University, Beijing, China + LSBDPA Beijing Key Laboratory, Beijing, China", + "bibtex": "@article{Gao_Zhang_Yang_Guo_Ren_Guo_Chen_2023, title={Fast Counterfactual Inference for History-Based Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25924}, DOI={10.1609/aaai.v37i6.25924}, abstractNote={Incorporating sequence-to-sequence models into history-based Reinforcement Learning (RL) provides a general way to extend RL to partially-observable tasks. This method compresses history spaces according to the correlations between historical observations and the rewards. However, they do not adjust for the confounding correlations caused by data sampling and assign high beliefs to uninformative historical observations, leading to limited compression of history spaces. Counterfactual Inference (CI), which estimates causal effects by single-variable intervention, is a promising way to adjust for confounding. However, it is computationally infeasible to directly apply the single-variable intervention to a huge number of historical observations. This paper proposes to perform CI on observation sub-spaces instead of single observations and develop a coarse-to-fine CI algorithm, called Tree-based History Counterfactual Inference (T-HCI), to reduce the number of interventions exponentially. We show that T-HCI is computationally feasible in practice and brings significant sample efficiency gains in various challenging partially-observable tasks, including Maze, BabyAI, and robot manipulation tasks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Haichuan and Zhang, Tianren and Yang, Zhile and Guo, Yuqing and Ren, Jinsheng and Guo, Shangqi and Chen, Feng}, year={2023}, month={Jun.}, pages={7613-7623} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25924/25696", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25924", + "pdf_size": 2720073, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=796049893505529358&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;leeds.ac.uk;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;foxmail.com;mail.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;leeds.ac.uk;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;foxmail.com;mail.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0+2;0+2", + "aff_unique_norm": "Tsinghua University;University of Leeds;LSBDPA Beijing Key Laboratory", + "aff_unique_dep": "Department of Automation;School of Computing;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.leeds.ac.uk;", + "aff_unique_abbr": "THU;Leeds;", + "aff_campus_unique_index": "0;0;1;0;0;0+0;0+0", + "aff_campus_unique": "Beijing;Leeds", + "aff_country_unique_index": "0;0;1;0;0;0+0;0+0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25255", + "title": "Fast Fluid Simulation via Dynamic Multi-Scale Gridding", + "track": "main", + "status": "Technical", + "abstract": "Recent works on learning-based frameworks for Lagrangian (i.e., particle-based) fluid simulation, though bypassing iterative pressure projection via efficient convolution operators, are still time-consuming due to excessive amount of particles. To address this challenge, we propose a dynamic multi-scale gridding method to reduce the magnitude of elements that have to be processed, by observing repeated particle motion patterns within certain consistent regions. Specifically, we hierarchically generate multi-scale micelles in Euclidean space by grouping particles that share similar motion patterns/characteristics based on super-light motion and scale estimation modules. With little internal motion variation, each micelle is modeled as a single rigid body with convolution only applied to a single representative particle. In addition, a distance-based interpolation is conducted to propagate relative motion message among micelles. With our efficient design, the network produces high visual fidelity fluid simulations with the inference time to be only 4.24 ms/frame (with 6K fluid particles), hence enables real-time human-computer interaction and animation. Experimental results on multiple datasets show that our work achieves great simulation acceleration with negligible prediction error increase.", + "primary_area": "computer vision ii", + "author": "Jinxian Liu; Ye Chen; Bingbing Ni; Wei Ren; Zhenbo Yu; Xiaoyang Huang", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Huawei Hisilicon; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Liu_Chen_Ni_Ren_Yu_Huang_2023, title={Fast Fluid Simulation via Dynamic Multi-Scale Gridding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25255}, DOI={10.1609/aaai.v37i2.25255}, abstractNote={Recent works on learning-based frameworks for Lagrangian (i.e., particle-based) fluid simulation, though bypassing iterative pressure projection via efficient convolution operators, are still time-consuming due to excessive amount of particles. To address this challenge, we propose a dynamic multi-scale gridding method to reduce the magnitude of elements that have to be processed, by observing repeated particle motion patterns within certain consistent regions. Specifically, we hierarchically generate multi-scale micelles in Euclidean space by grouping particles that share similar motion patterns/characteristics based on super-light motion and scale estimation modules. With little internal motion variation, each micelle is modeled as a single rigid body with convolution only applied to a single representative particle. In addition, a distance-based interpolation is conducted to propagate relative motion message among micelles. With our efficient design, the network produces high visual fidelity fluid simulations with the inference time to be only 4.24 ms/frame (with 6K fluid particles), hence enables real-time human-computer interaction and animation. Experimental results on multiple datasets show that our work achieves great simulation acceleration with negligible prediction error increase.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jinxian and Chen, Ye and Ni, Bingbing and Ren, Wei and Yu, Zhenbo and Huang, Xiaoyang}, year={2023}, month={Jun.}, pages={1675-1682} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25255/25027", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25255", + "pdf_size": 10682996, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13794327522755256887&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;huawei.com;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;huawei.com;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Huawei Technologies Co., Ltd.", + "aff_unique_dep": ";Hisilicon", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.huawei.com/en/", + "aff_unique_abbr": "SJTU;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26158", + "title": "Fast Offline Policy Optimization for Large Scale Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Personalised interactive systems such as recommender systems require selecting relevant items from massive catalogs dependent on context. Reward-driven offline optimisation of these systems can be achieved by a relaxation of the discrete problem resulting in policy learning or REINFORCE style learning algorithms. Unfortunately, this relaxation step requires computing a sum over the entire catalogue making the complexity of the evaluation of the gradient (and hence each stochastic gradient descent iterations) linear in the catalogue size. This calculation is untenable in many real world examples such as large catalogue recommender systems, severely limiting the usefulness of this method in practice. In this paper, we derive an approximation of these policy learning algorithms that scale logarithmically with the catalogue size. Our contribution is based upon combining three novel ideas: a new Monte Carlo estimate of the gradient of a policy, the self normalised importance sampling estimator and the use of fast maximum inner product search at training time. Extensive experiments show that our algorithm is an order of magnitude faster than naive approaches yet produces equally good policies.", + "primary_area": "machine learning iii", + "author": "Otmane Sakhi; David Rohde; Alexandre Gilotte", + "authorids": "", + "aff": "Criteo AI Lab, Paris, France + CREST-ENSAE, IPP, Palaiseau, France; Criteo AI Lab, Paris, France; Criteo AI Lab, Paris, France", + "bibtex": "@article{Sakhi_Rohde_Gilotte_2023, title={Fast Offline Policy Optimization for Large Scale Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26158}, DOI={10.1609/aaai.v37i8.26158}, abstractNote={Personalised interactive systems such as recommender systems require selecting relevant items from massive catalogs dependent on context. Reward-driven offline optimisation of these systems can be achieved by a relaxation of the discrete problem resulting in policy learning or REINFORCE style learning algorithms. Unfortunately, this relaxation step requires computing a sum over the entire catalogue making the complexity of the evaluation of the gradient (and hence each stochastic gradient descent iterations) linear in the catalogue size. This calculation is untenable in many real world examples such as large catalogue recommender systems, severely limiting the usefulness of this method in practice. In this paper, we derive an approximation of these policy learning algorithms that scale logarithmically with the catalogue size. Our contribution is based upon combining three novel ideas: a new Monte Carlo estimate of the gradient of a policy, the self normalised importance sampling estimator and the use of fast maximum inner product search at training time. Extensive experiments show that our algorithm is an order of magnitude faster than naive approaches yet produces equally good policies.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sakhi, Otmane and Rohde, David and Gilotte, Alexandre}, year={2023}, month={Jun.}, pages={9686-9694} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26158/25930", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26158", + "pdf_size": 1222283, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9668183413760839403&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "criteo.com;criteo.com;criteo.com", + "email": "criteo.com;criteo.com;criteo.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Criteo;CREST", + "aff_unique_dep": "Criteo AI Lab;ENSAE", + "aff_unique_url": "https://www.criteo.com;https://www.crest.fr", + "aff_unique_abbr": "Criteo;CREST", + "aff_campus_unique_index": "0+1;0;0", + "aff_campus_unique": "Paris;Palaiseau", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-25181", + "title": "Fast Online Hashing with Multi-Label Projection", + "track": "main", + "status": "Technical", + "abstract": "Hashing has been widely researched to solve the large-scale approximate nearest neighbor search problem owing to its time and storage superiority. In recent years, a number of online hashing methods have emerged, which can update the hash functions to adapt to the new stream data and realize dynamic retrieval. However, existing online hashing methods are required to update the whole database with the latest hash functions when a query arrives, which leads to low retrieval efficiency with the continuous increase of the stream data. On the other hand, these methods ignore the supervision relationship among the examples, especially in the multi-label case. In this paper, we propose a novel Fast Online Hashing (FOH) method which only updates the binary codes of a small part of the database. To be specific, we first build a query pool in which the nearest neighbors of each central point are recorded. When a new query arrives, only the binary codes of the corresponding potential neighbors are updated. In addition, we create a similarity matrix which takes the multi-label supervision information into account and bring in the multi-label projection loss to further preserve the similarity among the multi-label data. The experimental results on two common benchmarks show that the proposed FOH can achieve dramatic superiority on query time up to 6.28 seconds less than state-of-the-art baselines with competitive retrieval accuracy.", + "primary_area": "computer vision i", + "author": "Wenzhe Jia; Yuan Cao; Junwei Liu; Jie Gui", + "authorids": "", + "aff": "Ocean University of China, China+State Key Laboratory of Integrated Services Networks (Xidian University), China; Ocean University of China, China+State Key Laboratory of Integrated Services Networks (Xidian University), China; Ocean University of China, China; Southeast University, China+Purple Mountain Laboratories, China", + "bibtex": "@article{Jia_Cao_Liu_Gui_2023, title={Fast Online Hashing with Multi-Label Projection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25181}, DOI={10.1609/aaai.v37i1.25181}, abstractNote={Hashing has been widely researched to solve the large-scale approximate nearest neighbor search problem owing to its time and storage superiority. In recent years, a number of online hashing methods have emerged, which can update the hash functions to adapt to the new stream data and realize dynamic retrieval. However, existing online hashing methods are required to update the whole database with the latest hash functions when a query arrives, which leads to low retrieval efficiency with the continuous increase of the stream data. On the other hand, these methods ignore the supervision relationship among the examples, especially in the multi-label case. In this paper, we propose a novel Fast Online Hashing (FOH) method which only updates the binary codes of a small part of the database. To be specific, we first build a query pool in which the nearest neighbors of each central point are recorded. When a new query arrives, only the binary codes of the corresponding potential neighbors are updated. In addition, we create a similarity matrix which takes the multi-label supervision information into account and bring in the multi-label projection loss to further preserve the similarity among the multi-label data. The experimental results on two common benchmarks show that the proposed FOH can achieve dramatic superiority on query time up to 6.28 seconds less than state-of-the-art baselines with competitive retrieval accuracy.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Wenzhe and Cao, Yuan and Liu, Junwei and Gui, Jie}, year={2023}, month={Jun.}, pages={1007-1014} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25181/24953", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25181", + "pdf_size": 1495289, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3140818185660107520&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.ouc.edu.cn;ouc.edu.cn;stu.ouc.edu.cn;seu.edu.cn", + "email": "stu.ouc.edu.cn;ouc.edu.cn;stu.ouc.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;2+3", + "aff_unique_norm": "Ocean University of China;Xidian University;Southeast University;Purple Mountain Laboratories", + "aff_unique_dep": ";State Key Laboratory of Integrated Services Networks;;", + "aff_unique_url": "http://www.ouc.edu.cn;http://www.xidian.edu.cn/;https://www.seu.edu.cn/;", + "aff_unique_abbr": "OUC;Xidian;SEU;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25965", + "title": "Fast Regularized Discrete Optimal Transport with Group-Sparse Regularizers", + "track": "main", + "status": "Technical", + "abstract": "Regularized discrete optimal transport (OT) is a powerful tool to measure the distance between two discrete distributions that have been constructed from data samples on two different domains. While it has a wide range of applications in machine learning, in some cases the sampled data from only one of the domains will have class labels such as unsupervised domain adaptation. In this kind of problem setting, a group-sparse regularizer is frequently leveraged as a regularization term to handle class labels. In particular, it can preserve the label structure on the data samples by corresponding the data samples with the same class label to one group-sparse regularization term. As a result, we can measure the distance while utilizing label information by solving the regularized optimization problem with gradient-based algorithms. However, the gradient computation is expensive when the number of classes or data samples is large because the number of regularization terms and their respective sizes also turn out to be large. This paper proposes fast discrete OT with group-sparse regularizers. Our method is based on two ideas. The first is to safely skip the computations of the gradients that must be zero. The second is to efficiently extract the gradients that are expected to be nonzero. Our method is guaranteed to return the same value of the objective function as that of the original approach. Experiments demonstrate that our method is up to 8.6 times faster than the original method without degrading accuracy.", + "primary_area": "machine learning ii", + "author": "Yasutoshi Ida; Sekitoshi Kanai; Kazuki Adachi; Atsutoshi Kumagai; Yasuhiro Fujiwara", + "authorids": "", + "aff": "NTT Computer and Data Science Laboratories; NTT Computer and Data Science Laboratories; NTT Computer and Data Science Laboratories; NTT Computer and Data Science Laboratories; NTT Communication Science Laboratories", + "bibtex": "@article{Ida_Kanai_Adachi_Kumagai_Fujiwara_2023, title={Fast Regularized Discrete Optimal Transport with Group-Sparse Regularizers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25965}, DOI={10.1609/aaai.v37i7.25965}, abstractNote={Regularized discrete optimal transport (OT) is a powerful tool to measure the distance between two discrete distributions that have been constructed from data samples on two different domains. While it has a wide range of applications in machine learning, in some cases the sampled data from only one of the domains will have class labels such as unsupervised domain adaptation. In this kind of problem setting, a group-sparse regularizer is frequently leveraged as a regularization term to handle class labels. In particular, it can preserve the label structure on the data samples by corresponding the data samples with the same class label to one group-sparse regularization term. As a result, we can measure the distance while utilizing label information by solving the regularized optimization problem with gradient-based algorithms. However, the gradient computation is expensive when the number of classes or data samples is large because the number of regularization terms and their respective sizes also turn out to be large. This paper proposes fast discrete OT with group-sparse regularizers. Our method is based on two ideas. The first is to safely skip the computations of the gradients that must be zero. The second is to efficiently extract the gradients that are expected to be nonzero. Our method is guaranteed to return the same value of the objective function as that of the original approach. Experiments demonstrate that our method is up to 8.6 times faster than the original method without degrading accuracy.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ida, Yasutoshi and Kanai, Sekitoshi and Adachi, Kazuki and Kumagai, Atsutoshi and Fujiwara, Yasuhiro}, year={2023}, month={Jun.}, pages={7980-7987} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25965/25737", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25965", + "pdf_size": 325667, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11411143352282156165&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ieee.org; fsekitoshi.kanai.fu; kazuki.adachi.xy; atsutoshi.kumagai.ht;hco.ntt.co.jp", + "email": "ieee.org; fsekitoshi.kanai.fu; kazuki.adachi.xy; atsutoshi.kumagai.ht;hco.ntt.co.jp", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "NTT Data Science Laboratories;NTT Communication Science Laboratories", + "aff_unique_dep": "Computer and Data Science;", + "aff_unique_url": "https://www.ntt.co.jp;https://www.ntt-csl.com", + "aff_unique_abbr": "NTT;NTT CSL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26117", + "title": "Fast Saturating Gate for Learning Long Time Scales with Recurrent Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Gate functions in recurrent models, such as an LSTM and GRU, play a central role in learning various time scales in modeling time series data by using a bounded activation function. However, it is difficult to train gates to capture extremely long time scales due to gradient vanishing of the bounded function for large inputs, which is known as the saturation problem. We closely analyze the relation between saturation of the gate function and efficiency of the training. We prove that the gradient vanishing of the gate function can be mitigated by accelerating the convergence of the saturating function, i.e., making the output of the function converge to 0 or 1 faster. Based on the analysis results, we propose a gate function called fast gate that has a doubly exponential convergence rate with respect to inputs by simple function composition. We empirically show that our method outperforms previous methods in accuracy and computational efficiency on benchmark tasks involving extremely long time scales.", + "primary_area": "machine learning iii", + "author": "Kentaro Ohno; Sekitoshi Kanai; Yasutoshi Ida", + "authorids": "", + "aff": "NTT; NTT; NTT", + "bibtex": "@article{Ohno_Kanai_Ida_2023, title={Fast Saturating Gate for Learning Long Time Scales with Recurrent Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26117}, DOI={10.1609/aaai.v37i8.26117}, abstractNote={Gate functions in recurrent models, such as an LSTM and GRU, play a central role in learning various time scales in modeling time series data by using a bounded activation function. However, it is difficult to train gates to capture extremely long time scales due to gradient vanishing of the bounded function for large inputs, which is known as the saturation problem. We closely analyze the relation between saturation of the gate function and efficiency of the training. We prove that the gradient vanishing of the gate function can be mitigated by accelerating the convergence of the saturating function, i.e., making the output of the function converge to 0 or 1 faster. Based on the analysis results, we propose a gate function called fast gate that has a doubly exponential convergence rate with respect to inputs by simple function composition. We empirically show that our method outperforms previous methods in accuracy and computational efficiency on benchmark tasks involving extremely long time scales.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ohno, Kentaro and Kanai, Sekitoshi and Ida, Yasutoshi}, year={2023}, month={Jun.}, pages={9319-9326} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26117/25889", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26117", + "pdf_size": 635091, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:QVnFb3_G5zMJ:scholar.google.com/&scioq=Fast+Saturating+Gate+for+Learning+Long+Time+Scales+with+Recurrent+Neural+Networks&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff_domain": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "email": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "NTT Corporation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ntt.co.jp", + "aff_unique_abbr": "NTT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26268", + "title": "Fast and Accurate Binary Neural Networks Based on Depth-Width Reshaping", + "track": "main", + "status": "Technical", + "abstract": "Network binarization (i.e., binary neural networks, BNNs) can efficiently compress deep neural networks and accelerate model inference but cause severe accuracy degradation. Existing BNNs are mainly implemented based on the commonly used full-precision network backbones, and then the accuracy is improved with various techniques. However, there is a question of whether the full-precision network backbone is well adapted to BNNs. We start from the factors of the performance degradation of BNNs and analyze the problems of directly using full-precision network backbones for BNNs: for a given computational budget, the backbone of a BNN may need to be shallower and wider compared to the backbone of a full-precision network. With this in mind, Depth-Width Reshaping (DWR) is proposed to reshape the depth and width of existing full-precision network backbones and further optimize them by incorporating pruning techniques to better fit the BNNs. Extensive experiments demonstrate the analytical result and the effectiveness of the proposed method. Compared with the original backbones, the DWR backbones constructed by the proposed method result in close to O(\u221as) decrease in activations, while achieving an absolute accuracy increase by up to 1.7% with comparable computational cost. Besides, by using the DWR backbones, existing methods can achieve new state-of-the-art (SOTA) accuracy (e.g., 67.2% on ImageNet with ResNet-18 as the original backbone). We hope this work provides a novel insight into the backbone design of BNNs. The code is available at https://github.com/pingxue-hfut/DWR.", + "primary_area": "machine learning iv", + "author": "Ping Xue; Yang Lu; Jingfei Chang; Xing Wei; Zhen Wei", + "authorids": "", + "aff": "School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China; School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China+Anhui Mine IOT and Security Monitoring Technology Key Laboratory, Hefei, China+Engineering Research Center of Safety Critical Industrial Measurement and Control Technology, Ministry of Education, Hefei University of Technology, Hefei, China; School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China; School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China+Anhui Mine IOT and Security Monitoring Technology Key Laboratory, Hefei, China+Intelligent Manufacturing Institute of HeFei University of Technology, Hefei, China; School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China+Anhui Mine IOT and Security Monitoring Technology Key Laboratory, Hefei, China+Engineering Research Center of Safety Critical Industrial Measurement and Control Technology, Ministry of Education, Hefei University of Technology, Hefei, China", + "bibtex": "@article{Xue_Lu_Chang_Wei_Wei_2023, title={Fast and Accurate Binary Neural Networks Based on Depth-Width Reshaping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26268}, DOI={10.1609/aaai.v37i9.26268}, abstractNote={Network binarization (i.e., binary neural networks, BNNs) can efficiently compress deep neural networks and accelerate model inference but cause severe accuracy degradation. Existing BNNs are mainly implemented based on the commonly used full-precision network backbones, and then the accuracy is improved with various techniques. However, there is a question of whether the full-precision network backbone is well adapted to BNNs. We start from the factors of the performance degradation of BNNs and analyze the problems of directly using full-precision network backbones for BNNs: for a given computational budget, the backbone of a BNN may need to be shallower and wider compared to the backbone of a full-precision network. With this in mind, Depth-Width Reshaping (DWR) is proposed to reshape the depth and width of existing full-precision network backbones and further optimize them by incorporating pruning techniques to better fit the BNNs. Extensive experiments demonstrate the analytical result and the effectiveness of the proposed method. Compared with the original backbones, the DWR backbones constructed by the proposed method result in close to O(\u221as) decrease in activations, while achieving an absolute accuracy increase by up to 1.7% with comparable computational cost. Besides, by using the DWR backbones, existing methods can achieve new state-of-the-art (SOTA) accuracy (e.g., 67.2% on ImageNet with ResNet-18 as the original backbone). We hope this work provides a novel insight into the backbone design of BNNs. The code is available at https://github.com/pingxue-hfut/DWR.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xue, Ping and Lu, Yang and Chang, Jingfei and Wei, Xing and Wei, Zhen}, year={2023}, month={Jun.}, pages={10684-10692} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26268/26040", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26268", + "pdf_size": 240286, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1657280743936275067&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.hfut.edu.cn;126.com;mail.hfut.edu.cn;hfut.edu.cn;gocom.cn", + "email": "mail.hfut.edu.cn;126.com;mail.hfut.edu.cn;hfut.edu.cn;gocom.cn", + "github": "https://github.com/pingxue-hfut/DWR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1+0;0;0+1+2;0+1+0", + "aff_unique_norm": "Hefei University of Technology;Anhui Mine IOT and Security Monitoring Technology Key Laboratory;HeFei University of Technology", + "aff_unique_dep": "School of Computer Science and Information Engineering;;Intelligent Manufacturing Institute", + "aff_unique_url": "http://www.hfut.edu.cn;;http://www.hfut.edu.cn/", + "aff_unique_abbr": "HUT;;HFUT", + "aff_campus_unique_index": "0;0+0+0;0;0+0+0;0+0+0", + "aff_campus_unique": "Hefei", + "aff_country_unique_index": "0;0+0+0;0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25723", + "title": "Fast and Interpretable Dynamics for Fisher Markets via Block-Coordinate Updates", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of large-scale Fisher market equilibrium computation through scalable first-order optimization methods. It is well-known that market equilibria can be captured using structured convex programs such as the Eisenberg-Gale and Shmyrev convex programs. Highly performant deterministic full-gradient first-order methods have been developed for these programs. In this paper, we develop new block-coordinate first-order methods for computing Fisher market equilibria, and show that these methods have interpretations as t\u00e2tonnement-style or proportional response-style dynamics where either buyers or items show up one at a time. We reformulate these convex programs and solve them using proximal block coordinate descent methods, a class of methods that update only a small number of coordinates of the decision variable in each iteration. Leveraging recent advances in the convergence analysis of these methods and structures of the equilibrium-capturing convex programs, we establish fast convergence rates of these methods.", + "primary_area": "game theory and economic paradigms", + "author": "Tianlong Nan; Yuan Gao; Christian Kroer", + "authorids": "", + "aff": "Columbia University, New York, USA; Columbia University, New York, USA; Columbia University, New York, USA", + "bibtex": "@article{Nan_Gao_Kroer_2023, title={Fast and Interpretable Dynamics for Fisher Markets via Block-Coordinate Updates}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25723}, DOI={10.1609/aaai.v37i5.25723}, abstractNote={We consider the problem of large-scale Fisher market equilibrium computation through scalable first-order optimization methods. It is well-known that market equilibria can be captured using structured convex programs such as the Eisenberg-Gale and Shmyrev convex programs. Highly performant deterministic full-gradient first-order methods have been developed for these programs. In this paper, we develop new block-coordinate first-order methods for computing Fisher market equilibria, and show that these methods have interpretations as t\u00e2tonnement-style or proportional response-style dynamics where either buyers or items show up one at a time. We reformulate these convex programs and solve them using proximal block coordinate descent methods, a class of methods that update only a small number of coordinates of the decision variable in each iteration. Leveraging recent advances in the convergence analysis of these methods and structures of the equilibrium-capturing convex programs, we establish fast convergence rates of these methods.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nan, Tianlong and Gao, Yuan and Kroer, Christian}, year={2023}, month={Jun.}, pages={5832-5840} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25723/25495", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25723", + "pdf_size": 602669, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11523425250369015190&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "columbia.edu;columbia.edu;columbia.edu", + "email": "columbia.edu;columbia.edu;columbia.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Columbia University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.columbia.edu", + "aff_unique_abbr": "Columbia", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "New York", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26003", + "title": "FastAMI \u2013 a Monte Carlo Approach to the Adjustment for Chance in Clustering Comparison Metrics", + "track": "main", + "status": "Technical", + "abstract": "Clustering is at the very core of machine learning, and its applications proliferate with the increasing availability of data. However, as datasets grow, comparing clusterings with an adjustment for chance becomes computationally difficult, preventing unbiased ground-truth comparisons and solution selection. We propose FastAMI, a Monte Carlo-based method to efficiently approximate the Adjusted Mutual Information (AMI) and extend it to the Standardized Mutual Information (SMI). The approach is compared with the exact calculation and a recently developed variant of the AMI based on pairwise permutations, using both synthetic and real data. In contrast to the exact calculation our method is fast enough to enable these adjusted information-theoretic comparisons for large datasets while maintaining considerably more accurate results than the pairwise approach.", + "primary_area": "machine learning ii", + "author": "Kai Klede; Leo Schwinn; Dario Zanca; Bj\u00f6rn Eskofier", + "authorids": "", + "aff": "Machine Learning and Data Analytics Lab, Friedrich-Alexander Universit\u00a8at Erlangen-N\u00a8urnberg (FAU); Machine Learning and Data Analytics Lab, Friedrich-Alexander Universit\u00a8at Erlangen-N\u00a8urnberg (FAU); Machine Learning and Data Analytics Lab, Friedrich-Alexander Universit\u00a8at Erlangen-N\u00a8urnberg (FAU); Machine Learning and Data Analytics Lab, Friedrich-Alexander Universit\u00a8at Erlangen-N\u00a8urnberg (FAU)", + "bibtex": "@article{Klede_Schwinn_Zanca_Eskofier_2023, title={FastAMI \u2013 a Monte Carlo Approach to the Adjustment for Chance in Clustering Comparison Metrics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26003}, DOI={10.1609/aaai.v37i7.26003}, abstractNote={Clustering is at the very core of machine learning, and its applications proliferate with the increasing availability of data. However, as datasets grow, comparing clusterings with an adjustment for chance becomes computationally difficult, preventing unbiased ground-truth comparisons and solution selection. We propose FastAMI, a Monte Carlo-based method to efficiently approximate the Adjusted Mutual Information (AMI) and extend it to the Standardized Mutual Information (SMI). The approach is compared with the exact calculation and a recently developed variant of the AMI based on pairwise permutations, using both synthetic and real data. In contrast to the exact calculation our method is fast enough to enable these adjusted information-theoretic comparisons for large datasets while maintaining considerably more accurate results than the pairwise approach.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Klede, Kai and Schwinn, Leo and Zanca, Dario and Eskofier, Bj\u00f6rn}, year={2023}, month={Jun.}, pages={8317-8324} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26003/25775", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26003", + "pdf_size": 176756, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7420368329313381705&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "fau.de;fau.de;fau.de;fau.de", + "email": "fau.de;fau.de;fau.de;fau.de", + "github": "https://github.com/mad-lab-fau/fastami-benchmark", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Friedrich-Alexander Universit\u00a8at Erlangen-N\u00a8urnberg", + "aff_unique_dep": "Machine Learning and Data Analytics Lab", + "aff_unique_url": "https://www fau.de", + "aff_unique_abbr": "FAU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26235", + "title": "Faster Adaptive Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Federated learning has attracted increasing attention with the emergence of distributed data. While extensive federated learning algorithms have been proposed for the non-convex distributed problem, the federated learning in practice still faces numerous challenges, such as the large training iterations to converge since the sizes of models and datasets keep increasing, and the lack of adaptivity by SGD-based model updates. Meanwhile, the study of adaptive methods in federated learning is scarce and existing works either lack a complete theoretical convergence guarantee or have slow sample complexity. In this paper, we propose an efficient adaptive algorithm (i.e., FAFED) based on the momentum-based variance reduced technique in cross-silo FL. We first explore how to design the adaptive algorithm in the FL setting. By providing a counter-example, we prove that a simple combination of FL and adaptive methods could lead to divergence. More importantly, we provide a convergence analysis for our method and prove that our algorithm is the first adaptive FL algorithm to reach the best-known samples O(epsilon(-3)) and O(epsilon(-2)) communication rounds to find an epsilon-stationary point without large batches. The experimental results on the language modeling task and image classification task with heterogeneous data demonstrate the efficiency of our algorithms.", + "primary_area": "machine learning iv", + "author": "Xidong Wu; Feihu Huang; Zhengmian Hu; Heng Huang", + "authorids": "", + "aff": "Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States + College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China; Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States; Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States; Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA, United States", + "bibtex": "@article{Wu_Huang_Hu_Huang_2023, title={Faster Adaptive Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26235}, DOI={10.1609/aaai.v37i9.26235}, abstractNote={Federated learning has attracted increasing attention with the emergence of distributed data. While extensive federated learning algorithms have been proposed for the non-convex distributed problem, the federated learning in practice still faces numerous challenges, such as the large training iterations to converge since the sizes of models and datasets keep increasing, and the lack of adaptivity by SGD-based model updates. Meanwhile, the study of adaptive methods in federated learning is scarce and existing works either lack a complete theoretical convergence guarantee or have slow sample complexity. In this paper, we propose an efficient adaptive algorithm (i.e., FAFED) based on the momentum-based variance reduced technique in cross-silo FL. We first explore how to design the adaptive algorithm in the FL setting. By providing a counter-example, we prove that a simple combination of FL and adaptive methods could lead to divergence. More importantly, we provide a convergence analysis for our method and prove that our algorithm is the first adaptive FL algorithm to reach the best-known samples O(epsilon(-3)) and O(epsilon(-2)) communication rounds to find an epsilon-stationary point without large batches. The experimental results on the language modeling task and image classification task with heterogeneous data demonstrate the efficiency of our algorithms.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xidong and Huang, Feihu and Hu, Zhengmian and Huang, Heng}, year={2023}, month={Jun.}, pages={10379-10387} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26235/26007", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26235", + "pdf_size": 701472, + "gs_citation": 94, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2971430221380019934&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "outlook.com;gmail.com;gmail.com;gmail.com", + "email": "outlook.com;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;0;0", + "aff_unique_norm": "University of Pittsburgh;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "Electrical and Computer Engineering;College of Computer Science and Technology", + "aff_unique_url": "https://www.pitt.edu;http://www.nuaa.edu.cn", + "aff_unique_abbr": "Pitt;NUAA", + "aff_campus_unique_index": "0+1;0;0;0", + "aff_campus_unique": "Pittsburgh;Nanjing", + "aff_country_unique_index": "0+1;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26406", + "title": "Faster Fair Machine via Transferring Fairness Constraints to Virtual Samples", + "track": "main", + "status": "Technical", + "abstract": "Fair classification is an emerging and important research topic in machine learning community. Existing methods usually formulate the fairness metrics as additional inequality constraints, and then embed them into the original objective. This makes fair classification problems unable to be effectively tackled by some solvers specific to unconstrained optimization. Although many new tailored algorithms have been designed to attempt to overcome this limitation, they often increase additional computation burden and cannot cope with all types of fairness metrics. To address these challenging issues, in this paper, we propose a novel method for fair classification. \nSpecifically, we theoretically\ndemonstrate that all types of fairness with linear and non-linear covariance functions can be transferred to two virtual samples, which makes the existing state-of-the-art classification solvers be applicable to these cases. Meanwhile, we generalize the proposed method to multiple fairness constraints. We take SVM as an example to show the effectiveness of our new idea. \nEmpirically, we test the proposed method on real-world datasets and all results confirm its excellent performance.", + "primary_area": "philosophy and ethics of ai", + "author": "Zhou Zhai; Lei Luo; Heng Huang; Bin Gu", + "authorids": "", + "aff": "School of Computer and Software, Nanjing University of Information Science and Technology, P.R.China; School of Computer Science and Engineering, Nanjing University of Science and Technology, P.R.China; Department of Electrical & Computer Engineering, University of Pittsburgh, USA; Department of Machine Learning, MBZUAI, United Arab Emirates", + "bibtex": "@article{Zhai_Luo_Huang_Gu_2023, title={Faster Fair Machine via Transferring Fairness Constraints to Virtual Samples}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26406}, DOI={10.1609/aaai.v37i10.26406}, abstractNote={Fair classification is an emerging and important research topic in machine learning community. Existing methods usually formulate the fairness metrics as additional inequality constraints, and then embed them into the original objective. This makes fair classification problems unable to be effectively tackled by some solvers specific to unconstrained optimization. Although many new tailored algorithms have been designed to attempt to overcome this limitation, they often increase additional computation burden and cannot cope with all types of fairness metrics. To address these challenging issues, in this paper, we propose a novel method for fair classification. Specifically, we theoretically\ndemonstrate that all types of fairness with linear and non-linear covariance functions can be transferred to two virtual samples, which makes the existing state-of-the-art classification solvers be applicable to these cases. Meanwhile, we generalize the proposed method to multiple fairness constraints. We take SVM as an example to show the effectiveness of our new idea. Empirically, we test the proposed method on real-world datasets and all results confirm its excellent performance.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhai, Zhou and Luo, Lei and Huang, Heng and Gu, Bin}, year={2023}, month={Jun.}, pages={11918-11925} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26406/26178", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26406", + "pdf_size": 422881, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:7kQTyYu7aaoJ:scholar.google.com/&scioq=Faster+Fair+Machine+via+Transferring+Fairness+Constraints+to+Virtual+Samples&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "nuist.edu.cn;gmail.com;gmail.com;gmail.com", + "email": "nuist.edu.cn;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Nanjing University of Information Science and Technology;Nanjing University of Science and Technology;University of Pittsburgh;MBZUAI", + "aff_unique_dep": "School of Computer and Software;School of Computer Science and Engineering;Department of Electrical & Computer Engineering;Department of Machine Learning", + "aff_unique_url": "http://www.nuist.edu.cn;http://www.nust.edu.cn;https://www.pitt.edu;https://www.mbzuai.ac.ae", + "aff_unique_abbr": ";NUST;Pitt;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;2", + "aff_country_unique": "China;United States;United Arab Emirates" + }, + { + "id": "article-26868", + "title": "Fault Injection Based Interventional Causal Learning for Distributed Applications", + "track": "iaai technical track", + "status": "Technical", + "abstract": "We apply the machinery of interventional causal learning with programmable interventions to the domain of applications management. Modern applications are modularized into interdependent components or services (e.g. microservices) for ease of development and management. The communication graph among such components is a function of application code and is not always known to the platform provider. In our solution we learn this unknown communication graph solely using application logs observed during the execution of the application by using fault injections in a staging environment. Specifically, we have developed an active (or interventional) causal learning algorithm that uses the observations obtained during fault injections to learn a model of error propagation in the communication among the components. The \u201cpower of intervention\u201d additionally allows us to address the presence of confounders in unobserved user interactions. We demonstrate the effectiveness of our solution in learning the communication graph of well-known microservice application benchmarks. We also show the efficacy of the solution on a downstream task of fault localization in which the learned graph indeed helps to localize faults at runtime in a production environment (in which the location of the fault is unknown). Additionally, we briefly discuss the implementation and deployment status of a fault injection framework which incorporates the developed technology.", + "primary_area": "emerging applications of ai", + "author": "Qing Wang; Jesus Rios; Saurabh Jha; Karthikeyan Shanmugam; Frank Bagehorn; Xi Yang; Robert Filepp; Naoki Abe; Larisa Shwartz", + "authorids": "", + "aff": "IBM Global Chief Data Office; IBM Research; IBM Research; Google Research + IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "bibtex": "@article{Wang_Rios_Jha_Shanmugam_Bagehorn_Yang_Filepp_Abe_Shwartz_2024, title={Fault Injection Based Interventional Causal Learning for Distributed Applications}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26868}, DOI={10.1609/aaai.v37i13.26868}, abstractNote={We apply the machinery of interventional causal learning with programmable interventions to the domain of applications management. Modern applications are modularized into interdependent components or services (e.g. microservices) for ease of development and management. The communication graph among such components is a function of application code and is not always known to the platform provider. In our solution we learn this unknown communication graph solely using application logs observed during the execution of the application by using fault injections in a staging environment. Specifically, we have developed an active (or interventional) causal learning algorithm that uses the observations obtained during fault injections to learn a model of error propagation in the communication among the components. The \u201cpower of intervention\u201d additionally allows us to address the presence of confounders in unobserved user interactions. We demonstrate the effectiveness of our solution in learning the communication graph of well-known microservice application benchmarks. We also show the efficacy of the solution on a downstream task of fault localization in which the learned graph indeed helps to localize faults at runtime in a production environment (in which the location of the fault is unknown). Additionally, we briefly discuss the implementation and deployment status of a fault injection framework which incorporates the developed technology.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Qing and Rios, Jesus and Jha, Saurabh and Shanmugam, Karthikeyan and Bagehorn, Frank and Yang, Xi and Filepp, Robert and Abe, Naoki and Shwartz, Larisa}, year={2024}, month={Jul.}, pages={15738-15744} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26868/26640", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26868", + "pdf_size": 313753, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11866391257524315947&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "", + "project": "https://uptimeinstitute.com/webinars/webinar-critical-update-uptime-institute-2022-outage-report/", + "author_num": 9, + "aff_unique_index": "0;0;0;1+0;0;0;0;0;0", + "aff_unique_norm": "IBM;Google", + "aff_unique_dep": "Global Chief Data Office;Google Research", + "aff_unique_url": "https://www.ibm.com;https://research.google", + "aff_unique_abbr": "IBM;Google Research", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26376", + "title": "Fault-Tolerant Offline Multi-Agent Path Planning", + "track": "main", + "status": "Technical", + "abstract": "We study a novel graph path planning problem for multiple agents that may crash at runtime, and block part of the workspace. In our setting, agents can detect neighboring crashed agents, and change followed paths at runtime. The objective is then to prepare a set of paths and switching rules for each agent, ensuring that all correct agents reach their destinations without collisions or deadlocks, despite unforeseen crashes of other agents. Such planning is attractive to build reliable multi-robot systems. We present problem formalization, theoretical analysis such as computational complexities, and how to solve this offline planning problem.", + "primary_area": "multiagent systems", + "author": "Keisuke Okumura; S\u00e9bastien Tixeuil", + "authorids": "", + "aff": "Tokyo Institute of Technology, Japan; Sorbonne University, CNRS, LIP6, Institut Universitaire de France, France", + "bibtex": "@article{Okumura_Tixeuil_2023, title={Fault-Tolerant Offline Multi-Agent Path Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26376}, DOI={10.1609/aaai.v37i10.26376}, abstractNote={We study a novel graph path planning problem for multiple agents that may crash at runtime, and block part of the workspace. In our setting, agents can detect neighboring crashed agents, and change followed paths at runtime. The objective is then to prepare a set of paths and switching rules for each agent, ensuring that all correct agents reach their destinations without collisions or deadlocks, despite unforeseen crashes of other agents. Such planning is attractive to build reliable multi-robot systems. We present problem formalization, theoretical analysis such as computational complexities, and how to solve this offline planning problem.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Okumura, Keisuke and Tixeuil, S\u00e9bastien}, year={2023}, month={Jun.}, pages={11647-11654} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26376/26148", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26376", + "pdf_size": 9080509, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14769493370900368910&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "coord.c.titech.ac.jp;lip6.fr", + "email": "coord.c.titech.ac.jp;lip6.fr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Tokyo Institute of Technology;Sorbonne University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.titech.ac.jp;https://www.sorbonne.universite.fr", + "aff_unique_abbr": "Titech;Sorbonne", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Japan;France" + }, + { + "id": "article-27055", + "title": "Feature Decomposition for Reducing Negative Transfer: A Novel Multi-Task Learning Method for Recommender System (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We propose a novel multi-task learning method termed Feature Decomposition Network (FDN). The key idea of the proposed FDN is to reduce the phenomenon of feature redundancy by explicitly decomposing features into task-specific features and task-shared features with carefully designed constraints. Experimental results show that our proposed FDN can outperform the state-of-the-art (SOTA) methods by a noticeable margin on Ali-CCP.", + "primary_area": "", + "author": "Jie Zhou; Qian Yu; Chuan Luo; Jing Zhang", + "authorids": "", + "aff": "School of Software, Beihang University, China; School of Software, Beihang University, China; School of Software, Beihang University, China; School of Software, Beihang University, China", + "bibtex": "@article{Zhou_Yu_Luo_Zhang_2024, title={Feature Decomposition for Reducing Negative Transfer: A Novel Multi-Task Learning Method for Recommender System (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27055}, DOI={10.1609/aaai.v37i13.27055}, abstractNote={We propose a novel multi-task learning method termed Feature Decomposition Network (FDN). The key idea of the proposed FDN is to reduce the phenomenon of feature redundancy by explicitly decomposing features into task-specific features and task-shared features with carefully designed constraints. Experimental results show that our proposed FDN can outperform the state-of-the-art (SOTA) methods by a noticeable margin on Ali-CCP.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Jie and Yu, Qian and Luo, Chuan and Zhang, Jing}, year={2024}, month={Jul.}, pages={16390-16391} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27055/26827", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27055", + "pdf_size": 121222, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7718177790160854701&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "School of Software", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "Beihang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26228", + "title": "Feature Distribution Fitting with Direction-Driven Weighting for Few-Shot Images Classification", + "track": "main", + "status": "Technical", + "abstract": "Few-shot learning has received increasing attention and witnessed significant advances in recent years. However, most of the few-shot learning methods focus on the optimization of training process, and the learning of metric and sample generating networks. They ignore the importance of learning the ground-truth feature distributions of few-shot classes. This paper proposes a direction-driven weighting method to make the feature distributions of few-shot classes precisely fit the ground-truth distributions. The learned feature distributions can generate an unlimited number of training samples for the few-shot classes to avoid overfitting. Specifically, the proposed method consists of two optimization strategies. The direction-driven strategy is for capturing more complete direction information that can describe the feature distributions. The similarity-weighting strategy is proposed to estimate the impact of different classes in the fitting procedure and assign corresponding weights. Our method outperforms the current state-of-the-art performance by an average of 3% for 1-shot on standard few-shot learning benchmarks like miniImageNet, CIFAR-FS, and CUB. The excellent performance and compelling visualization show that our method can more accurately estimate the ground-truth distributions.", + "primary_area": "machine learning iv", + "author": "Xin Wei; Wei Du; Huan Wan; Weidong Min", + "authorids": "", + "aff": "School of Software, Nanchang University; School of Software, Nanchang University; School of Computer and Information Engineering, Jiangxi Normal University; School of Mathematics and Computer Science, Institute of Metaverse, Nanchang University + Jiangxi Key Laboratory of Smart City, Nanchang University", + "bibtex": "@article{Wei_Du_Wan_Min_2023, title={Feature Distribution Fitting with Direction-Driven Weighting for Few-Shot Images Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26228}, DOI={10.1609/aaai.v37i9.26228}, abstractNote={Few-shot learning has received increasing attention and witnessed significant advances in recent years. However, most of the few-shot learning methods focus on the optimization of training process, and the learning of metric and sample generating networks. They ignore the importance of learning the ground-truth feature distributions of few-shot classes. This paper proposes a direction-driven weighting method to make the feature distributions of few-shot classes precisely fit the ground-truth distributions. The learned feature distributions can generate an unlimited number of training samples for the few-shot classes to avoid overfitting. Specifically, the proposed method consists of two optimization strategies. The direction-driven strategy is for capturing more complete direction information that can describe the feature distributions. The similarity-weighting strategy is proposed to estimate the impact of different classes in the fitting procedure and assign corresponding weights. Our method outperforms the current state-of-the-art performance by an average of 3% for 1-shot on standard few-shot learning benchmarks like miniImageNet, CIFAR-FS, and CUB. The excellent performance and compelling visualization show that our method can more accurately estimate the ground-truth distributions.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wei, Xin and Du, Wei and Wan, Huan and Min, Weidong}, year={2023}, month={Jun.}, pages={10315-10323} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26228/26000", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26228", + "pdf_size": 2199711, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3191158799215676560&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "ncu.edu.cn;email.ncu.edu.cn;jxnu.edu.cn;ncu.edu.cn", + "email": "ncu.edu.cn;email.ncu.edu.cn;jxnu.edu.cn;ncu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+0", + "aff_unique_norm": "Nanchang University;Jiangxi Normal University", + "aff_unique_dep": "School of Software;School of Computer and Information Engineering", + "aff_unique_url": "https://www.ncu.edu.cn;http://www.jxnu.edu.cn", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26514", + "title": "Feature Normalization and Cartography-Based Demonstrations for Prompt-Based Fine-Tuning on Emotion-Related Tasks", + "track": "main", + "status": "Technical", + "abstract": "To train a model in a traditional supervised learning classification system for natural language processing (NLP) tasks, it is essential to have labeled data, which is not present in large amounts for many tasks. Prompt-based learning methods attempt to combat the supervised learning need for labeled data by directly adapting pre-trained language models and modeling the probability of text itself. In this paper, we propose a novel data-agnostic strategy for prompt-based fine-tuning that leverages feature moments (a.k.a., mean and standard deviation) as a data augmentation technique and employs training dynamics (i.e., confidence and variability) to allow more informative samples to be concatenated for generating demonstrations as input context. Our approach is a strong method for few-shot learning that forces the language model to pay special attention to the feature moments and allows more informative samples to be concatenated for generating demonstrations as input context by selecting high confidence and low variance samples. To demonstrate its effectiveness given limited training data, we conduct extensive experiments in different few-shot settings on three empathy and emotion classification datasets (from various domains). We further evaluate our method's robustness by introducing noise to our few-shot input data and labels and show that exchanging moments between samples and incorporating cartography-based demonstrations are beneficial when the available data is limited and noisy.", + "primary_area": "speech natural language processing", + "author": "Mahshid Hosseini; Cornelia Caragea", + "authorids": "", + "aff": "Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago", + "bibtex": "@article{Hosseini_Caragea_2023, title={Feature Normalization and Cartography-Based Demonstrations for Prompt-Based Fine-Tuning on Emotion-Related Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26514}, DOI={10.1609/aaai.v37i11.26514}, abstractNote={To train a model in a traditional supervised learning classification system for natural language processing (NLP) tasks, it is essential to have labeled data, which is not present in large amounts for many tasks. Prompt-based learning methods attempt to combat the supervised learning need for labeled data by directly adapting pre-trained language models and modeling the probability of text itself. In this paper, we propose a novel data-agnostic strategy for prompt-based fine-tuning that leverages feature moments (a.k.a., mean and standard deviation) as a data augmentation technique and employs training dynamics (i.e., confidence and variability) to allow more informative samples to be concatenated for generating demonstrations as input context. Our approach is a strong method for few-shot learning that forces the language model to pay special attention to the feature moments and allows more informative samples to be concatenated for generating demonstrations as input context by selecting high confidence and low variance samples. To demonstrate its effectiveness given limited training data, we conduct extensive experiments in different few-shot settings on three empathy and emotion classification datasets (from various domains). We further evaluate our method\u2019s robustness by introducing noise to our few-shot input data and labels and show that exchanging moments between samples and incorporating cartography-based demonstrations are beneficial when the available data is limited and noisy.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hosseini, Mahshid and Caragea, Cornelia}, year={2023}, month={Jun.}, pages={12881-12889} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26514/26286", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26514", + "pdf_size": 257921, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10959490217048842914&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "uic.edu;uic.edu", + "email": "uic.edu;uic.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26567", + "title": "Feature-Level Debiased Natural Language Understanding", + "track": "main", + "status": "Technical", + "abstract": "Natural language understanding (NLU) models often rely on dataset biases rather than intended task-relevant features to achieve high performance on specific datasets. As a result, these models perform poorly on datasets outside the training distribution. Some recent studies address this issue by reducing the weights of biased samples during the training process. However, these methods still encode biased latent features in representations and neglect the dynamic nature of bias, which hinders model prediction. We propose an NLU debiasing method, named debiasing contrastive learning (DCT), to simultaneously alleviate the above problems based on contrastive learning. We devise a debiasing, positive sampling strategy to mitigate biased latent features by selecting the least similar biased positive samples. We also propose a dynamic negative sampling strategy to capture the dynamic influence of biases by employing a bias-only model to dynamically select the most similar biased negative samples. We conduct experiments on three NLU benchmark datasets. Experimental results show that DCT outperforms state-of-the-art baselines on out-of-distribution datasets while maintaining in-distribution performance. We also verify that DCT can reduce biased latent features from the model's representation.", + "primary_area": "speech natural language processing", + "author": "Yougang Lyu; Piji Li; Yechang Yang; Maarten de Rijke; Pengjie Ren; Yukun Zhao; Dawei Yin; Zhaochun Ren", + "authorids": "", + "aff": "School of Computer Science and Technology, Shandong University, Qingdao, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China; University of Amsterdam, Amsterdam, The Netherlands; School of Computer Science and Technology, Shandong University, Qingdao, China; School of Computer Science and Technology, Shandong University, Qingdao, China; School of Computer Science and Technology, Shandong University, Qingdao, China + Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; School of Computer Science and Technology, Shandong University, Qingdao, China", + "bibtex": "@article{Lyu_Li_Yang_de Rijke_Ren_Zhao_Yin_Ren_2023, title={Feature-Level Debiased Natural Language Understanding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26567}, DOI={10.1609/aaai.v37i11.26567}, abstractNote={Natural language understanding (NLU) models often rely on dataset biases rather than intended task-relevant features to achieve high performance on specific datasets. As a result, these models perform poorly on datasets outside the training distribution. Some recent studies address this issue by reducing the weights of biased samples during the training process. However, these methods still encode biased latent features in representations and neglect the dynamic nature of bias, which hinders model prediction. We propose an NLU debiasing method, named debiasing contrastive learning (DCT), to simultaneously alleviate the above problems based on contrastive learning. We devise a debiasing, positive sampling strategy to mitigate biased latent features by selecting the least similar biased positive samples. We also propose a dynamic negative sampling strategy to capture the dynamic influence of biases by employing a bias-only model to dynamically select the most similar biased negative samples. We conduct experiments on three NLU benchmark datasets. Experimental results show that DCT outperforms state-of-the-art baselines on out-of-distribution datasets while maintaining in-distribution performance. We also verify that DCT can reduce biased latent features from the model\u2019s representation.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Yougang and Li, Piji and Yang, Yechang and de Rijke, Maarten and Ren, Pengjie and Zhao, Yukun and Yin, Dawei and Ren, Zhaochun}, year={2023}, month={Jun.}, pages={13353-13361} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26567/26339", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26567", + "pdf_size": 307502, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13420829836476857115&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.sdu.edu.cn;mail.sdu.edu.cn;nuaa.edu.cn;uva.nl;outlook.com;baidu.com;acm.org;sdu.edu.cn", + "email": "mail.sdu.edu.cn;mail.sdu.edu.cn;nuaa.edu.cn;uva.nl;outlook.com;baidu.com;acm.org;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;0;0;0+3;3;0", + "aff_unique_norm": "Shandong University;Nanjing University of Aeronautics and Astronautics;University of Amsterdam;Baidu Inc.", + "aff_unique_dep": "School of Computer Science and Technology;College of Computer Science and Technology;;", + "aff_unique_url": "http://www.sdu.edu.cn;http://www.nuaa.edu.cn;https://www.uva.nl;https://www.baidu.com", + "aff_unique_abbr": "SDU;NUAA;UvA;Baidu", + "aff_campus_unique_index": "0;1;2;0;0;0+3;3;0", + "aff_campus_unique": "Qingdao;Nanjing;Amsterdam;Beijing", + "aff_country_unique_index": "0;0;1;0;0;0+0;0;0", + "aff_country_unique": "China;The Netherlands" + }, + { + "id": "article-26727", + "title": "Feature-Space Bayesian Adversarial Learning Improved Malware Detector Robustness", + "track": "aaai special track", + "status": "Technical", + "abstract": "We present a new algorithm to train a robust malware detector. Malware is a prolific problem and malware detectors are a front-line defense. Modern detectors rely on machine learning algorithms. Now, the adversarial objective is to devise alterations to the malware code to decrease the chance of being detected whilst preserving the functionality and realism of the malware. Adversarial learning is effective in improving robustness but generating functional and realistic adversarial malware samples is non-trivial. Because: i) in contrast to tasks capable of using gradient-based feedback, adversarial learning in a domain without a differentiable mapping function from the problem space (malware code inputs) to the feature space is hard; and ii) it is difficult to ensure the adversarial malware is realistic and functional. \nThis presents a challenge for developing scalable adversarial machine learning algorithms for large datasets at a production or commercial scale to realize robust malware detectors. We propose an alternative; perform adversarial learning in the feature space in contrast to the problem space. We prove the projection of perturbed, yet valid malware, in the problem space into feature space will always be a subset of adversarials generated in the feature space. Hence, by generating a robust network against feature-space adversarial examples, we inherently achieve robustness against problem-space adversarial examples. We formulate a Bayesian adversarial learning objective that captures the distribution of models for improved robustness. \nTo explain the robustness of the Bayesian adversarial learning algorithm, we prove that our learning method bounds the difference between the adversarial risk and empirical risk and improves robustness. We show that Bayesian neural networks (BNNs) achieve state-of-the-art results; especially in the False Positive Rate (FPR) regime. Adversarially trained BNNs achieve state-of-the-art robustness. Notably, adversarially trained BNNs are robust against stronger attacks with larger attack budgets by a margin of up to 15% on a recent production-scale malware dataset of more than 20 million samples. Importantly, our efforts create a benchmark for future defenses in the malware domain.", + "primary_area": "safe and robust ai", + "author": "Bao Gia Doan; Shuiqiao Yang; Paul Montague; Olivier De Vel; Tamas Abraham; Seyit Camtepe; Salil S. Kanhere; Ehsan Abbasnejad; Damith C. Ranashinghe", + "authorids": "", + "aff": "The University of Adelaide, Australia; The University of New South Wales, Australia; Defence Science and Technology Group, Australia; Data61, CSIRO, Australia; Defence Science and Technology Group, Australia; Data61, CSIRO, Australia; The University of New South Wales, Australia; The University of Adelaide, Australia; The University of Adelaide, Australia", + "bibtex": "@article{Doan_Yang_Montague_De Vel_Abraham_Camtepe_Kanhere_Abbasnejad_Ranashinghe_2023, title={Feature-Space Bayesian Adversarial Learning Improved Malware Detector Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26727}, DOI={10.1609/aaai.v37i12.26727}, abstractNote={We present a new algorithm to train a robust malware detector. Malware is a prolific problem and malware detectors are a front-line defense. Modern detectors rely on machine learning algorithms. Now, the adversarial objective is to devise alterations to the malware code to decrease the chance of being detected whilst preserving the functionality and realism of the malware. Adversarial learning is effective in improving robustness but generating functional and realistic adversarial malware samples is non-trivial. Because: i) in contrast to tasks capable of using gradient-based feedback, adversarial learning in a domain without a differentiable mapping function from the problem space (malware code inputs) to the feature space is hard; and ii) it is difficult to ensure the adversarial malware is realistic and functional. This presents a challenge for developing scalable adversarial machine learning algorithms for large datasets at a production or commercial scale to realize robust malware detectors. We propose an alternative; perform adversarial learning in the feature space in contrast to the problem space. We prove the projection of perturbed, yet valid malware, in the problem space into feature space will always be a subset of adversarials generated in the feature space. Hence, by generating a robust network against feature-space adversarial examples, we inherently achieve robustness against problem-space adversarial examples. We formulate a Bayesian adversarial learning objective that captures the distribution of models for improved robustness. To explain the robustness of the Bayesian adversarial learning algorithm, we prove that our learning method bounds the difference between the adversarial risk and empirical risk and improves robustness. We show that Bayesian neural networks (BNNs) achieve state-of-the-art results; especially in the False Positive Rate (FPR) regime. Adversarially trained BNNs achieve state-of-the-art robustness. Notably, adversarially trained BNNs are robust against stronger attacks with larger attack budgets by a margin of up to 15% on a recent production-scale malware dataset of more than 20 million samples. Importantly, our efforts create a benchmark for future defenses in the malware domain.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Doan, Bao Gia and Yang, Shuiqiao and Montague, Paul and De Vel, Olivier and Abraham, Tamas and Camtepe, Seyit and Kanhere, Salil S. and Abbasnejad, Ehsan and Ranashinghe, Damith C.}, year={2023}, month={Jun.}, pages={14783-14791} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26727/26499", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26727", + "pdf_size": 252935, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4112698795089498665&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "adelaide.edu.au;unsw.edu.au;defence.gov.au;yahoo.com.au;defence.gov.au;data61.csiro.au;unsw.edu.au;adelaide.edu.au;adelaide.edu.au", + "email": "adelaide.edu.au;unsw.edu.au;defence.gov.au;yahoo.com.au;defence.gov.au;data61.csiro.au;unsw.edu.au;adelaide.edu.au;adelaide.edu.au", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2;3;2;3;1;0;0", + "aff_unique_norm": "The University of Adelaide;University of New South Wales;Defence Science and Technology Group;CSIRO", + "aff_unique_dep": ";;;Data61", + "aff_unique_url": "https://www.adelaide.edu.au;https://www.unsw.edu.au;https://www.dst.defence.gov.au;https://www.csiro.au", + "aff_unique_abbr": "Adelaide;UNSW;DST Group;CSIRO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26203", + "title": "FedABC: Targeting Fair Competition in Personalized Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Federated learning aims to collaboratively train models without accessing their client's local private data. The data may be Non-IID for different clients and thus resulting in poor performance. Recently, personalized federated learning (PFL) has achieved great success in handling Non-IID data by enforcing regularization in local optimization or improving the model aggregation scheme on the server. However, most of the PFL approaches do not take into account the unfair competition issue caused by the imbalanced data distribution and lack of positive samples for some classes in each client. To address this issue, we propose a novel and generic PFL framework termed Federated Averaging via Binary Classification, dubbed FedABC. In particular, we adopt the ``one-vs-all'' training strategy in each client to alleviate the unfair competition between classes by constructing a personalized binary classification problem for each class. This may aggravate the class imbalance challenge and thus a novel personalized binary classification loss that incorporates both the under-sampling and hard sample mining strategies is designed. Extensive experiments are conducted on two popular datasets under different settings, and the results demonstrate that our FedABC can significantly outperform the existing counterparts.", + "primary_area": "machine learning iii", + "author": "Dui Wang; Li Shen; Yong Luo; Han Hu; Kehua Su; Yonggang Wen; Dacheng Tao", + "authorids": "", + "aff": "National Engineering Research Center for Multimedia Software, School of Computer Science, Institute of Artificial Intelligence and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China + Hubei Luojia Laboratory, Wuhan, China + JD Explore Academy, China; JD Explore Academy, China; National Engineering Research Center for Multimedia Software, School of Computer Science, Institute of Artificial Intelligence and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China + Hubei Luojia Laboratory, Wuhan, China; School of Information and Electronics, Beijing Institute of Technology, China; National Engineering Research Center for Multimedia Software, School of Computer Science, Institute of Artificial Intelligence and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore; JD Explore Academy, China", + "bibtex": "@article{Wang_Shen_Luo_Hu_Su_Wen_Tao_2023, title={FedABC: Targeting Fair Competition in Personalized Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26203}, DOI={10.1609/aaai.v37i8.26203}, abstractNote={Federated learning aims to collaboratively train models without accessing their client\u2019s local private data. The data may be Non-IID for different clients and thus resulting in poor performance. Recently, personalized federated learning (PFL) has achieved great success in handling Non-IID data by enforcing regularization in local optimization or improving the model aggregation scheme on the server. However, most of the PFL approaches do not take into account the unfair competition issue caused by the imbalanced data distribution and lack of positive samples for some classes in each client. To address this issue, we propose a novel and generic PFL framework termed Federated Averaging via Binary Classification, dubbed FedABC. In particular, we adopt the ``one-vs-all\u2019\u2019 training strategy in each client to alleviate the unfair competition between classes by constructing a personalized binary classification problem for each class. This may aggravate the class imbalance challenge and thus a novel personalized binary classification loss that incorporates both the under-sampling and hard sample mining strategies is designed. Extensive experiments are conducted on two popular datasets under different settings, and the results demonstrate that our FedABC can significantly outperform the existing counterparts.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Dui and Shen, Li and Luo, Yong and Hu, Han and Su, Kehua and Wen, Yonggang and Tao, Dacheng}, year={2023}, month={Jun.}, pages={10095-10103} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26203/25975", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26203", + "pdf_size": 1150550, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5278646545594879287&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "whu.edu.cn;whu.edu.cn;whu.edu.cn;gmail.com;bit.edu.cn;ntu.edu.sg;gmail.com", + "email": "whu.edu.cn;whu.edu.cn;whu.edu.cn;gmail.com;bit.edu.cn;ntu.edu.sg;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;2;0+1;3;0;4;2", + "aff_unique_norm": "Wuhan University;Hubei Luojia Laboratory;JD Explore Academy;Beijing Institute of Technology;Nanyang Technological University", + "aff_unique_dep": "School of Computer Science;;;School of Information and Electronics;School of Computer Science and Engineering", + "aff_unique_url": "http://www.whu.edu.cn;;;http://www.bit.edu.cn/;https://www.ntu.edu.sg", + "aff_unique_abbr": "WHU;;;BIT;NTU", + "aff_campus_unique_index": "0+0;0+0;0;2", + "aff_campus_unique": "Wuhan;;Singapore", + "aff_country_unique_index": "0+0+0;0;0+0;0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26330", + "title": "FedALA: Adaptive Local Aggregation for Personalized Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "A key challenge in federated learning (FL) is the statistical heterogeneity that impairs the generalization of the global model on each client. To address this, we propose a method Federated learning with Adaptive Local Aggregation (FedALA) by capturing the desired information in the global model for client models in personalized FL. The key component of FedALA is an Adaptive Local Aggregation (ALA) module, which can adaptively aggregate the downloaded global model and local model towards the local objective on each client to initialize the local model before training in each iteration. To evaluate the effectiveness of FedALA, we conduct extensive experiments with five benchmark datasets in computer vision and natural language processing domains. FedALA outperforms eleven state-of-the-art baselines by up to 3.27% in test accuracy. Furthermore, we also apply ALA module to other federated learning methods and achieve up to 24.19% improvement in test accuracy. Code is available at https://github.com/TsingZ0/FedALA.", + "primary_area": "machine learning iv", + "author": "Jianqing Zhang; Yang Hua; Hao Wang; Tao Song; Zhengui Xue; Ruhui Ma; Haibing Guan", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Queen\u2019s University Belfast; Louisiana State University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Zhang_Hua_Wang_Song_Xue_Ma_Guan_2023, title={FedALA: Adaptive Local Aggregation for Personalized Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26330}, DOI={10.1609/aaai.v37i9.26330}, abstractNote={A key challenge in federated learning (FL) is the statistical heterogeneity that impairs the generalization of the global model on each client. To address this, we propose a method Federated learning with Adaptive Local Aggregation (FedALA) by capturing the desired information in the global model for client models in personalized FL. The key component of FedALA is an Adaptive Local Aggregation (ALA) module, which can adaptively aggregate the downloaded global model and local model towards the local objective on each client to initialize the local model before training in each iteration. To evaluate the effectiveness of FedALA, we conduct extensive experiments with five benchmark datasets in computer vision and natural language processing domains. FedALA outperforms eleven state-of-the-art baselines by up to 3.27% in test accuracy. Furthermore, we also apply ALA module to other federated learning methods and achieve up to 24.19% improvement in test accuracy. Code is available at https://github.com/TsingZ0/FedALA.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jianqing and Hua, Yang and Wang, Hao and Song, Tao and Xue, Zhengui and Ma, Ruhui and Guan, Haibing}, year={2023}, month={Jun.}, pages={11237-11244} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26330/26102", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26330", + "pdf_size": 439286, + "gs_citation": 282, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5058482457491790432&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;qub.ac.uk;lsu.edu", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;qub.ac.uk;lsu.edu", + "github": "https://github.com/TsingZ0/FedALA", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Queen's University Belfast;Louisiana State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.qub.ac.uk;https://www.lsu.edu", + "aff_unique_abbr": "SJTU;QUB;LSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0;0;0;0", + "aff_country_unique": "China;United Kingdom;United States" + }, + { + "id": "article-26223", + "title": "FedGS: Federated Graph-Based Sampling with Arbitrary Client Availability", + "track": "main", + "status": "Technical", + "abstract": "While federated learning has shown strong results in opti- mizing a machine learning model without direct access to the original data, its performance may be hindered by in- termittent client availability which slows down the conver- gence and biases the final learned model. There are significant challenges to achieve both stable and bias-free training un- der arbitrary client availability. To address these challenges, we propose a framework named Federated Graph-based Sam- pling (FEDGS), to stabilize the global model update and mitigate the long-term bias given arbitrary client availabil- ity simultaneously. First, we model the data correlations of clients with a Data-Distribution-Dependency Graph (3DG) that helps keep the sampled clients data apart from each other, which is theoretically shown to improve the approximation to the optimal model update. Second, constrained by the far- distance in data distribution of the sampled clients, we fur- ther minimize the variance of the numbers of times that the clients are sampled, to mitigate long-term bias. To validate the effectiveness of FEDGS, we conduct experiments on three datasets under a comprehensive set of seven client availability modes. Our experimental results confirm FEDGS\u2019s advantage in both enabling a fair client-sampling scheme and improving the model performance under arbitrary client availability. Our code is available at https://github.com/WwZzz/FedGS.", + "primary_area": "machine learning iii", + "author": "Zheng Wang; Xiaoliang Fan; Jianzhong Qi; Haibing Jin; Peizhen Yang; Siqi Shen; Cheng Wang", + "authorids": "", + "aff": "Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China; Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China; School of Computing and Information Systems, University of Melbourne, Melbourne, Australia; Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China; Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China; Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China; Fujian Key Laboratory of Sensing and Computing for Smart Cities, School of Informatics, Xiamen University, Xiamen, China", + "bibtex": "@article{Wang_Fan_Qi_Jin_Yang_Shen_Wang_2023, title={FedGS: Federated Graph-Based Sampling with Arbitrary Client Availability}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26223}, DOI={10.1609/aaai.v37i8.26223}, abstractNote={While federated learning has shown strong results in opti- mizing a machine learning model without direct access to the original data, its performance may be hindered by in- termittent client availability which slows down the conver- gence and biases the final learned model. There are significant challenges to achieve both stable and bias-free training un- der arbitrary client availability. To address these challenges, we propose a framework named Federated Graph-based Sam- pling (FEDGS), to stabilize the global model update and mitigate the long-term bias given arbitrary client availabil- ity simultaneously. First, we model the data correlations of clients with a Data-Distribution-Dependency Graph (3DG) that helps keep the sampled clients data apart from each other, which is theoretically shown to improve the approximation to the optimal model update. Second, constrained by the far- distance in data distribution of the sampled clients, we fur- ther minimize the variance of the numbers of times that the clients are sampled, to mitigate long-term bias. To validate the effectiveness of FEDGS, we conduct experiments on three datasets under a comprehensive set of seven client availability modes. Our experimental results confirm FEDGS\u2019s advantage in both enabling a fair client-sampling scheme and improving the model performance under arbitrary client availability. Our code is available at https://github.com/WwZzz/FedGS.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zheng and Fan, Xiaoliang and Qi, Jianzhong and Jin, Haibing and Yang, Peizhen and Shen, Siqi and Wang, Cheng}, year={2023}, month={Jun.}, pages={10271-10278} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26223/25995", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26223", + "pdf_size": 777419, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9784781468516309080&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;unimelb.edu.au;stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn;unimelb.edu.au;stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn", + "github": "https://github.com/WwZzz/FedGS", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;0", + "aff_unique_norm": "Xiamen University;University of Melbourne", + "aff_unique_dep": "School of Informatics;School of Computing and Information Systems", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.unimelb.edu.au", + "aff_unique_abbr": "XMU;UniMelb", + "aff_campus_unique_index": "0;0;1;0;0;0;0", + "aff_campus_unique": "Xiamen;Melbourne", + "aff_country_unique_index": "0;0;1;0;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26122", + "title": "FedMDFG: Federated Learning with Multi-Gradient Descent and Fair Guidance", + "track": "main", + "status": "Technical", + "abstract": "Fairness has been considered as a critical problem in federated learning (FL). In this work, we analyze two direct causes of unfairness in FL - an unfair direction and an improper step size when updating the model. To solve these issues, we introduce an effective way to measure fairness of the model through the cosine similarity, and then propose a federated multiple gradient descent algorithm with fair guidance (FedMDFG) to drive the model fairer. We first convert FL into a multi-objective optimization problem (MOP) and design an advanced multiple gradient descent algorithm to calculate a fair descent direction by adding a fair-driven objective to MOP. A low-communication-cost line search strategy is then designed to find a better step size for the model update. We further show the theoretical analysis on how it can enhance fairness and guarantee the convergence. Finally, extensive experiments in several FL scenarios verify that FedMDFG is robust and outperforms the SOTA FL algorithms in convergence and fairness. The source code is available at https://github.com/zibinpan/FedMDFG.", + "primary_area": "machine learning iii", + "author": "Zibin Pan; Shuyi Wang; Chi Li; Haijin Wang; Xiaoying Tang; Junhua Zhao", + "authorids": "", + "aff": "The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China+The Shenzhen Institute of Arti\ufb01cial Intelligence and Robotics for Society; The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China+The Shenzhen Institute of Arti\ufb01cial Intelligence and Robotics for Society; The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China; The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China; The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China+The Shenzhen Institute of Arti\ufb01cial Intelligence and Robotics for Society+The Guangdong Provincial Key Laboratory of Future Networks of Intelligence; The School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China+The Shenzhen Institute of Arti\ufb01cial Intelligence and Robotics for Society", + "bibtex": "@article{Pan_Wang_Li_Wang_Tang_Zhao_2023, title={FedMDFG: Federated Learning with Multi-Gradient Descent and Fair Guidance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26122}, DOI={10.1609/aaai.v37i8.26122}, abstractNote={Fairness has been considered as a critical problem in federated learning (FL). In this work, we analyze two direct causes of unfairness in FL - an unfair direction and an improper step size when updating the model. To solve these issues, we introduce an effective way to measure fairness of the model through the cosine similarity, and then propose a federated multiple gradient descent algorithm with fair guidance (FedMDFG) to drive the model fairer. We first convert FL into a multi-objective optimization problem (MOP) and design an advanced multiple gradient descent algorithm to calculate a fair descent direction by adding a fair-driven objective to MOP. A low-communication-cost line search strategy is then designed to find a better step size for the model update. We further show the theoretical analysis on how it can enhance fairness and guarantee the convergence. Finally, extensive experiments in several FL scenarios verify that FedMDFG is robust and outperforms the SOTA FL algorithms in convergence and fairness. The source code is available at https://github.com/zibinpan/FedMDFG.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Zibin and Wang, Shuyi and Li, Chi and Wang, Haijin and Tang, Xiaoying and Zhao, Junhua}, year={2023}, month={Jun.}, pages={9364-9371} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26122/25894", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26122", + "pdf_size": 592074, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3965370883500042435&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;cuhk.edu.cn;cuhk.edu.cn", + "email": "link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn;cuhk.edu.cn;cuhk.edu.cn", + "github": "https://github.com/zibinpan/FedMDFG", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0;0;0+1+2;0+1", + "aff_unique_norm": "The Chinese University of Hong Kong;Shenzhen Institute of Artificial Intelligence and Robotics for Society;Guangdong Provincial Key Laboratory of Future Networks of Intelligence", + "aff_unique_dep": "School of Science and Engineering;;Key Laboratory of Future Networks of Intelligence", + "aff_unique_url": "https://www.cuhk.edu.cn;http://www.siarfs.cn;", + "aff_unique_abbr": "CUHK;SIARFS;", + "aff_campus_unique_index": "0+0;0+0;0;0;0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0;0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26237", + "title": "FedNP: Towards Non-IID Federated Learning via Federated Neural Propagation", + "track": "main", + "status": "Technical", + "abstract": "Traditional federated learning (FL) algorithms, such as FedAvg, fail to handle non-i.i.d data because they learn a global model by simply averaging biased local models that are trained on non-i.i.d local data, therefore failing to model the global data distribution. \nIn this paper, we present a novel Bayesian FL algorithm that successfully handles such a non-i.i.d FL setting by enhancing the local training task with an auxiliary task that explicitly estimates the global data distribution. \nOne key challenge in estimating the global data distribution is that the data are partitioned in FL, and therefore the ground-truth global data distribution is inaccessible.\nTo address this challenge, we propose an expectation-propagation-inspired probabilistic neural network, dubbed federated neural propagation (FedNP), which efficiently estimates the global data distribution given non-i.i.d data partitions. Our algorithm is sampling-free and end-to-end differentiable, can be applied with any conventional FL frameworks and learns richer global data representation.\nExperiments on both image classification tasks with synthetic non-i.i.d image data partitions and real-world non-i.i.d speech recognition tasks demonstrate that our framework effectively alleviates the performance deterioration caused by non-i.i.d data.", + "primary_area": "machine learning iv", + "author": "Xueyang Wu; Hengguan Huang; Youlong Ding; Hao Wang; Ye Wang; Qian Xu", + "authorids": "", + "aff": "Hong Kong University of Science and Technology, Hong Kong SAR, China; National University of Singapore, Singapore; Shenzhen University, Shenzhen, China; Rutgers University, Piscataway, NJ, USA; National University of Singapore, Singapore; Hong Kong University of Science and Technology, Hong Kong SAR, China", + "bibtex": "@article{Wu_Huang_Ding_Wang_Wang_Xu_2023, title={FedNP: Towards Non-IID Federated Learning via Federated Neural Propagation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26237}, DOI={10.1609/aaai.v37i9.26237}, abstractNote={Traditional federated learning (FL) algorithms, such as FedAvg, fail to handle non-i.i.d data because they learn a global model by simply averaging biased local models that are trained on non-i.i.d local data, therefore failing to model the global data distribution. In this paper, we present a novel Bayesian FL algorithm that successfully handles such a non-i.i.d FL setting by enhancing the local training task with an auxiliary task that explicitly estimates the global data distribution. One key challenge in estimating the global data distribution is that the data are partitioned in FL, and therefore the ground-truth global data distribution is inaccessible.\nTo address this challenge, we propose an expectation-propagation-inspired probabilistic neural network, dubbed federated neural propagation (FedNP), which efficiently estimates the global data distribution given non-i.i.d data partitions. Our algorithm is sampling-free and end-to-end differentiable, can be applied with any conventional FL frameworks and learns richer global data representation.\nExperiments on both image classification tasks with synthetic non-i.i.d image data partitions and real-world non-i.i.d speech recognition tasks demonstrate that our framework effectively alleviates the performance deterioration caused by non-i.i.d data.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xueyang and Huang, Hengguan and Ding, Youlong and Wang, Hao and Wang, Ye and Xu, Qian}, year={2023}, month={Jun.}, pages={10399-10407} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26237/26009", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26237", + "pdf_size": 474736, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14557541619407088158&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "connect.ust.hk;u.nus.edu;gmail.com;cs.rutgers.edu;comp.nus.edu.sg;ust.hk", + "email": "connect.ust.hk;u.nus.edu;gmail.com;cs.rutgers.edu;comp.nus.edu.sg;ust.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;National University of Singapore;Shenzhen University;Rutgers University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ust.hk;https://www.nus.edu.sg;https://www.szu.edu.cn;https://www.rutgers.edu", + "aff_unique_abbr": "HKUST;NUS;SZU;Rutgers", + "aff_campus_unique_index": "0;2;3;0", + "aff_campus_unique": "Hong Kong;;Shenzhen;Piscataway", + "aff_country_unique_index": "0;1;0;2;1;0", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "article-26252", + "title": "Federated Generative Model on Multi-Source Heterogeneous Data in IoT", + "track": "main", + "status": "Technical", + "abstract": "The study of generative models is a promising branch of deep learning techniques, which has been successfully applied to different scenarios, such as Artificial Intelligence and the Internet of Things. While in most of the existing works, the generative models are realized as a centralized structure, raising the threats of security and privacy and the overburden of communication costs. Rare efforts have been committed to investigating distributed generative models, especially when the training data comes from multiple heterogeneous sources under realistic IoT settings. In this paper, to handle this challenging problem, we design a federated generative model framework that can learn a powerful generator for the hierarchical IoT systems. Particularly, our generative model framework can solve the problem of distributed data generation on multi-source heterogeneous data in two scenarios, i.e., feature related scenario and label related scenario. In addition, in our federated generative models, we develop a synchronous and an asynchronous updating methods to satisfy different application requirements. Extensive experiments on a simulated dataset and multiple real datasets are conducted to evaluate the data generation performance of our proposed generative models through comparison with the state-of-the-arts.", + "primary_area": "machine learning iv", + "author": "Zuobin Xiong; Wei Li; Zhipeng Cai", + "authorids": "", + "aff": "Department of Computer Science, Georgia State University; Department of Computer Science, Georgia State University; Department of Computer Science, Georgia State University", + "bibtex": "@article{Xiong_Li_Cai_2023, title={Federated Generative Model on Multi-Source Heterogeneous Data in IoT}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26252}, DOI={10.1609/aaai.v37i9.26252}, abstractNote={The study of generative models is a promising branch of deep learning techniques, which has been successfully applied to different scenarios, such as Artificial Intelligence and the Internet of Things. While in most of the existing works, the generative models are realized as a centralized structure, raising the threats of security and privacy and the overburden of communication costs. Rare efforts have been committed to investigating distributed generative models, especially when the training data comes from multiple heterogeneous sources under realistic IoT settings. In this paper, to handle this challenging problem, we design a federated generative model framework that can learn a powerful generator for the hierarchical IoT systems. Particularly, our generative model framework can solve the problem of distributed data generation on multi-source heterogeneous data in two scenarios, i.e., feature related scenario and label related scenario. In addition, in our federated generative models, we develop a synchronous and an asynchronous updating methods to satisfy different application requirements. Extensive experiments on a simulated dataset and multiple real datasets are conducted to evaluate the data generation performance of our proposed generative models through comparison with the state-of-the-arts.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiong, Zuobin and Li, Wei and Cai, Zhipeng}, year={2023}, month={Jun.}, pages={10537-10545} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26252/26024", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26252", + "pdf_size": 2232678, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4961410339285047535&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "student.gsu.edu;gsu.edu;gsu.edu", + "email": "student.gsu.edu;gsu.edu;gsu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Georgia State University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.gsu.edu", + "aff_unique_abbr": "GSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26187", + "title": "Federated Learning on Non-IID Graphs via Structural Knowledge Sharing", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) have shown their superiority in modeling graph data. Owing to the advantages of federated learning, federated graph learning (FGL) enables clients to train strong GNN models in a distributed manner without sharing their private data. A core challenge in federated systems is the non-IID problem, which also widely exists in real-world graph data. For example, local data of clients may come from diverse datasets or even domains, e.g., social networks and molecules, increasing the difficulty for FGL methods to capture commonly shared knowledge and learn a generalized encoder. From real-world graph datasets, we observe that some structural properties are shared by various domains, presenting great potential for sharing structural knowledge in FGL. Inspired by this, we propose FedStar, an FGL framework that extracts and shares the common underlying structure information for inter-graph federated learning tasks. To explicitly extract the structure information rather than encoding them along with the node features, we define structure embeddings and encode them with an independent structure encoder. Then, the structure encoder is shared across clients while the feature-based knowledge is learned in a personalized way, making FedStar capable of capturing more structure-based domain-invariant information and avoiding feature misalignment issues. We perform extensive experiments over both cross-dataset and cross-domain non-IID FGL settings, demonstrating the superiority of FedStar.", + "primary_area": "machine learning iii", + "author": "Yue Tan; Yixin Liu; Guodong Long; Jing Jiang; Qinghua Lu; Chengqi Zhang", + "authorids": "", + "aff": "Australian Artificial Intelligence Institute, University of Technology Sydney, Australia; Monash University, Australia; Australian Artificial Intelligence Institute, University of Technology Sydney, Australia; Australian Artificial Intelligence Institute, University of Technology Sydney, Australia; Data61, CSIRO, Australia; Australian Artificial Intelligence Institute, University of Technology Sydney, Australia", + "bibtex": "@article{Tan_Liu_Long_Jiang_Lu_Zhang_2023, title={Federated Learning on Non-IID Graphs via Structural Knowledge Sharing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26187}, DOI={10.1609/aaai.v37i8.26187}, abstractNote={Graph neural networks (GNNs) have shown their superiority in modeling graph data. Owing to the advantages of federated learning, federated graph learning (FGL) enables clients to train strong GNN models in a distributed manner without sharing their private data. A core challenge in federated systems is the non-IID problem, which also widely exists in real-world graph data. For example, local data of clients may come from diverse datasets or even domains, e.g., social networks and molecules, increasing the difficulty for FGL methods to capture commonly shared knowledge and learn a generalized encoder. From real-world graph datasets, we observe that some structural properties are shared by various domains, presenting great potential for sharing structural knowledge in FGL. Inspired by this, we propose FedStar, an FGL framework that extracts and shares the common underlying structure information for inter-graph federated learning tasks. To explicitly extract the structure information rather than encoding them along with the node features, we define structure embeddings and encode them with an independent structure encoder. Then, the structure encoder is shared across clients while the feature-based knowledge is learned in a personalized way, making FedStar capable of capturing more structure-based domain-invariant information and avoiding feature misalignment issues. We perform extensive experiments over both cross-dataset and cross-domain non-IID FGL settings, demonstrating the superiority of FedStar.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tan, Yue and Liu, Yixin and Long, Guodong and Jiang, Jing and Lu, Qinghua and Zhang, Chengqi}, year={2023}, month={Jun.}, pages={9953-9961} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26187/25959", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26187", + "pdf_size": 1670170, + "gs_citation": 163, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7640854877130409830&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "student.uts.edu.au;monash.edu; fguodong.long;uts.edu.au;data61.csiro.au;uts.edu.au", + "email": "student.uts.edu.au;monash.edu; fguodong.long;uts.edu.au;data61.csiro.au;uts.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;2;0", + "aff_unique_norm": "University of Technology Sydney;Monash University;CSIRO", + "aff_unique_dep": "Australian Artificial Intelligence Institute;;Data61", + "aff_unique_url": "https://www.uts.edu.au;https://www.monash.edu;https://www.csiro.au", + "aff_unique_abbr": "UTS;Monash;CSIRO", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Sydney;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25955", + "title": "Federated Robustness Propagation: Sharing Adversarial Robustness in Heterogeneous Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Federated learning (FL) emerges as a popular distributed learning schema that learns a model from a set of participating users without sharing raw data. One major challenge of FL comes with heterogeneous users, who may have distributionally different (or non-iid) data and varying computation resources. As federated users would use the model for prediction, they often demand the trained model to be robust against malicious attackers at test time. Whereas adversarial training (AT) provides a sound solution for centralized learning, extending its usage for federated users has imposed significant challenges, as many users may have very limited training data and tight computational budgets, to afford the data-hungry and costly AT. In this paper, we study a novel FL strategy: propagating adversarial robustness from rich-resource users that can afford AT, to those with poor resources that cannot afford it, during federated learning. We show that existing FL techniques cannot be effectively integrated with the strategy to propagate robustness among non-iid users and propose an efficient propagation approach by the proper use of batch-normalization. We demonstrate the rationality and effectiveness of our method through extensive experiments. Especially, the proposed method is shown to grant federated models remarkable robustness even when only a small portion of users afford AT during learning. Source code can be accessed at https://github.com/illidanlab/FedRBN.", + "primary_area": "machine learning ii", + "author": "Junyuan Hong; Haotao Wang; Zhangyang Wang; Jiayu Zhou", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Michigan State University; Department of Computer Science and Engineering, University of Texas at Austin; Department of Computer Science and Engineering, University of Texas at Austin; Department of Computer Science and Engineering, Michigan State University", + "bibtex": "@article{Hong_Wang_Wang_Zhou_2023, title={Federated Robustness Propagation: Sharing Adversarial Robustness in Heterogeneous Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25955}, DOI={10.1609/aaai.v37i7.25955}, abstractNote={Federated learning (FL) emerges as a popular distributed learning schema that learns a model from a set of participating users without sharing raw data. One major challenge of FL comes with heterogeneous users, who may have distributionally different (or non-iid) data and varying computation resources. As federated users would use the model for prediction, they often demand the trained model to be robust against malicious attackers at test time. Whereas adversarial training (AT) provides a sound solution for centralized learning, extending its usage for federated users has imposed significant challenges, as many users may have very limited training data and tight computational budgets, to afford the data-hungry and costly AT. In this paper, we study a novel FL strategy: propagating adversarial robustness from rich-resource users that can afford AT, to those with poor resources that cannot afford it, during federated learning. We show that existing FL techniques cannot be effectively integrated with the strategy to propagate robustness among non-iid users and propose an efficient propagation approach by the proper use of batch-normalization. We demonstrate the rationality and effectiveness of our method through extensive experiments. Especially, the proposed method is shown to grant federated models remarkable robustness even when only a small portion of users afford AT during learning. Source code can be accessed at https://github.com/illidanlab/FedRBN.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hong, Junyuan and Wang, Haotao and Wang, Zhangyang and Zhou, Jiayu}, year={2023}, month={Jun.}, pages={7893-7901} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25955/25727", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25955", + "pdf_size": 231961, + "gs_citation": 55, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7625857740070454186&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "msu.edu;utexas.edu;utexas.edu;msu.edu", + "email": "msu.edu;utexas.edu;utexas.edu;msu.edu", + "github": "https://github.com/illidanlab/FedRBN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "Michigan State University;University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.msu.edu;https://www.utexas.edu", + "aff_unique_abbr": "MSU;UT Austin", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25321", + "title": "FeedFormer: Revisiting Transformer Decoder for Efficient Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "With the success of Vision Transformer (ViT) in image classification, its variants have yielded great success in many downstream vision tasks. Among those, the semantic segmentation task has also benefited greatly from the advance of ViT variants. However, most studies of the transformer for semantic segmentation only focus on designing efficient transformer encoders, rarely giving attention to designing the decoder. Several studies make attempts in using the transformer decoder as the segmentation decoder with class-wise learnable query. Instead, we aim to directly use the encoder features as the queries. This paper proposes the Feature Enhancing Decoder transFormer (FeedFormer) that enhances structural information using the transformer decoder. Our goal is to decode the high-level encoder features using the lowest-level encoder feature. We do this by formulating high-level features as queries, and the lowest-level feature as the key and value. This enhances the high-level features by collecting the structural information from the lowest-level feature. Additionally, we use a simple reformation trick of pushing the encoder blocks to take the place of the existing self-attention module of the decoder to improve efficiency. We show the superiority of our decoder with various light-weight transformer-based decoders on popular semantic segmentation datasets. Despite the minute computation, our model has achieved state-of-the-art performance in the performance computation trade-off. Our model FeedFormer-B0 surpasses SegFormer-B0 with 1.8% higher mIoU and 7.1% less computation on ADE20K, and 1.7% higher mIoU and 14.4% less computation on Cityscapes, respectively. Code will be released at: https://github.com/jhshim1995/FeedFormer.", + "primary_area": "computer vision ii", + "author": "Jae-hun Shim; Hyunwoo Yu; Kyeongbo Kong; Suk-Ju Kang", + "authorids": "", + "aff": "Department of Electronic Engineering, Sogang University; Department of Electronic Engineering, Sogang University; Department of Media School, Pukyong National University; Department of Electronic Engineering, Sogang University", + "bibtex": "@article{Shim_Yu_Kong_Kang_2023, title={FeedFormer: Revisiting Transformer Decoder for Efficient Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25321}, DOI={10.1609/aaai.v37i2.25321}, abstractNote={With the success of Vision Transformer (ViT) in image classification, its variants have yielded great success in many downstream vision tasks. Among those, the semantic segmentation task has also benefited greatly from the advance of ViT variants. However, most studies of the transformer for semantic segmentation only focus on designing efficient transformer encoders, rarely giving attention to designing the decoder. Several studies make attempts in using the transformer decoder as the segmentation decoder with class-wise learnable query. Instead, we aim to directly use the encoder features as the queries. This paper proposes the Feature Enhancing Decoder transFormer (FeedFormer) that enhances structural information using the transformer decoder. Our goal is to decode the high-level encoder features using the lowest-level encoder feature. We do this by formulating high-level features as queries, and the lowest-level feature as the key and value. This enhances the high-level features by collecting the structural information from the lowest-level feature. Additionally, we use a simple reformation trick of pushing the encoder blocks to take the place of the existing self-attention module of the decoder to improve efficiency. We show the superiority of our decoder with various light-weight transformer-based decoders on popular semantic segmentation datasets. Despite the minute computation, our model has achieved state-of-the-art performance in the performance computation trade-off. Our model FeedFormer-B0 surpasses SegFormer-B0 with 1.8% higher mIoU and 7.1% less computation on ADE20K, and 1.7% higher mIoU and 14.4% less computation on Cityscapes, respectively. Code will be released at: https://github.com/jhshim1995/FeedFormer.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shim, Jae-hun and Yu, Hyunwoo and Kong, Kyeongbo and Kang, Suk-Ju}, year={2023}, month={Jun.}, pages={2263-2271} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25321/25093", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25321", + "pdf_size": 7543724, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2892078858557780856&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "sogang.ac.kr;sogang.ac.kr;pknu.ac.kr;sogang.ac.kr", + "email": "sogang.ac.kr;sogang.ac.kr;pknu.ac.kr;sogang.ac.kr", + "github": "https://github.com/jhshim1995/FeedFormer", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Sogang University;Pukyong National University", + "aff_unique_dep": "Department of Electronic Engineering;Department of Media School", + "aff_unique_url": "http://www.sogang.ac.kr;http://www.pukyong.ac.kr", + "aff_unique_abbr": "Sogang;PKNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25449", + "title": "Few-Shot 3D Point Cloud Semantic Segmentation via Stratified Class-Specific Attention Based Transformer Network", + "track": "main", + "status": "Technical", + "abstract": "3D point cloud semantic segmentation aims to group all points into different semantic categories, which benefits important applications such as point cloud scene reconstruction and understanding. Existing supervised point cloud semantic segmentation methods usually require large-scale annotated point clouds for training and cannot handle new categories. While a few-shot learning method was proposed recently to address these two problems, it suffers from high computational complexity caused by graph construction and inability to learn fine-grained relationships among points due to the use of pooling operations. In this paper, we further address these problems by developing a new multi-layer transformer network for few-shot point cloud semantic segmentation. In the proposed network, the query point cloud features are aggregated based on the class-specific support features in different scales. Without using pooling operations, our method makes full use of all pixel-level features from the support samples. By better leveraging the support features for few-shot learning, the proposed method achieves the new state-of-the-art performance, with 15% less inference time, over existing few-shot 3D point cloud segmentation models on the S3DIS dataset and the ScanNet dataset. Our code is available\nat https://github.com/czzhang179/SCAT.", + "primary_area": "computer vision iii", + "author": "Canyu Zhang; Zhenyao Wu; Xinyi Wu; Ziyu Zhao; Song Wang", + "authorids": "", + "aff": "University of South Carolina, USA; University of South Carolina, USA; University of South Carolina, USA; University of South Carolina, USA; University of South Carolina, USA", + "bibtex": "@article{Zhang_Wu_Wu_Zhao_Wang_2023, title={Few-Shot 3D Point Cloud Semantic Segmentation via Stratified Class-Specific Attention Based Transformer Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25449}, DOI={10.1609/aaai.v37i3.25449}, abstractNote={3D point cloud semantic segmentation aims to group all points into different semantic categories, which benefits important applications such as point cloud scene reconstruction and understanding. Existing supervised point cloud semantic segmentation methods usually require large-scale annotated point clouds for training and cannot handle new categories. While a few-shot learning method was proposed recently to address these two problems, it suffers from high computational complexity caused by graph construction and inability to learn fine-grained relationships among points due to the use of pooling operations. In this paper, we further address these problems by developing a new multi-layer transformer network for few-shot point cloud semantic segmentation. In the proposed network, the query point cloud features are aggregated based on the class-specific support features in different scales. Without using pooling operations, our method makes full use of all pixel-level features from the support samples. By better leveraging the support features for few-shot learning, the proposed method achieves the new state-of-the-art performance, with 15% less inference time, over existing few-shot 3D point cloud segmentation models on the S3DIS dataset and the ScanNet dataset. Our code is available\nat https://github.com/czzhang179/SCAT.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Canyu and Wu, Zhenyao and Wu, Xinyi and Zhao, Ziyu and Wang, Song}, year={2023}, month={Jun.}, pages={3410-3417} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25449/25221", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25449", + "pdf_size": 2728883, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14025304316580482768&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "email.sc.edu;email.sc.edu;email.sc.edu;email.sc.edu;cec.sc.edu", + "email": "email.sc.edu;email.sc.edu;email.sc.edu;email.sc.edu;cec.sc.edu", + "github": "https://github.com/czzhang179/SCAT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of South Carolina", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25597", + "title": "Few-Shot Composition Learning for Image Retrieval with Prompt Tuning", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of composition learning for image retrieval, for which we learn to retrieve target images with search queries in the form of a composition of a reference image and a modification text that describes desired modifications of the image. Existing models of composition learning for image retrieval are generally built with large-scale datasets, demanding extensive training samples, i.e., query-target pairs, as supervision, which restricts their application for the scenario of few-shot learning with only few query-target pairs available. Recently, prompt tuning with frozen pretrained language models has shown remarkable performance when the amount of training data is limited. Inspired by this, we propose a prompt tuning mechanism with the pretrained CLIP model for the task of few-shot composition learning for image retrieval. Specifically, we regard the representation of the reference image as a trainable visual prompt, prefixed to the embedding of the text sequence. One challenge is to efficiently train visual prompt with few-shot samples. To deal with this issue, we further propose a self-upervised auxiliary task via ensuring that the reference image can retrieve itself when no modification information is given from the text, which facilitates training for the visual prompt, while not requiring additional annotations for query-target pairs. Experiments on multiple benchmarks show that our proposed model can yield superior performance when trained with only few query-target pairs.", + "primary_area": "data mining and knowledge management", + "author": "Junda Wu; Rui Wang; Handong Zhao; Ruiyi Zhang; Chaochao Lu; Shuai Li; Ricardo Henao", + "authorids": "", + "aff": "New York University; Duke University; Adobe Research; Adobe Research; University of Cambridge; Shanghai Jiao Tong University; Duke University+King Abdullah University of Science and Technology (KAUST)", + "bibtex": "@article{Wu_Wang_Zhao_Zhang_Lu_Li_Henao_2023, title={Few-Shot Composition Learning for Image Retrieval with Prompt Tuning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25597}, DOI={10.1609/aaai.v37i4.25597}, abstractNote={We study the problem of composition learning for image retrieval, for which we learn to retrieve target images with search queries in the form of a composition of a reference image and a modification text that describes desired modifications of the image. Existing models of composition learning for image retrieval are generally built with large-scale datasets, demanding extensive training samples, i.e., query-target pairs, as supervision, which restricts their application for the scenario of few-shot learning with only few query-target pairs available. Recently, prompt tuning with frozen pretrained language models has shown remarkable performance when the amount of training data is limited. Inspired by this, we propose a prompt tuning mechanism with the pretrained CLIP model for the task of few-shot composition learning for image retrieval. Specifically, we regard the representation of the reference image as a trainable visual prompt, prefixed to the embedding of the text sequence. One challenge is to efficiently train visual prompt with few-shot samples. To deal with this issue, we further propose a self-upervised auxiliary task via ensuring that the reference image can retrieve itself when no modification information is given from the text, which facilitates training for the visual prompt, while not requiring additional annotations for query-target pairs. Experiments on multiple benchmarks show that our proposed model can yield superior performance when trained with only few query-target pairs.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Junda and Wang, Rui and Zhao, Handong and Zhang, Ruiyi and Lu, Chaochao and Li, Shuai and Henao, Ricardo}, year={2023}, month={Jun.}, pages={4729-4737} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25597/25369", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25597", + "pdf_size": 565999, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9641650746520545309&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "nyu.edu;duke.edu;adobe.com;adobe.com;cam.ac.uk;sjtu.edu.cn;duke.edu", + "email": "nyu.edu;duke.edu;adobe.com;adobe.com;cam.ac.uk;sjtu.edu.cn;duke.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;2;3;4;1+5", + "aff_unique_norm": "New York University;Duke University;Adobe;University of Cambridge;Shanghai Jiao Tong University;King Abdullah University of Science and Technology", + "aff_unique_dep": ";;Adobe Research;;;", + "aff_unique_url": "https://www.nyu.edu;https://www.duke.edu;https://research.adobe.com;https://www.cam.ac.uk;https://www.sjtu.edu.cn;https://www.kaust.edu.sa", + "aff_unique_abbr": "NYU;Duke;Adobe;Cambridge;SJTU;KAUST", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0;0;0;1;2;0+3", + "aff_country_unique": "United States;United Kingdom;China;Saudi Arabia" + }, + { + "id": "article-25132", + "title": "Few-Shot Defect Image Generation via Defect-Aware Feature Manipulation", + "track": "main", + "status": "Technical", + "abstract": "The performances of defect inspection have been severely hindered by insufficient defect images in industries, which can be alleviated by generating more samples as data augmentation. We propose the first defect image generation method in the challenging few-shot cases. Given just a handful of defect images and relatively more defect-free ones, our goal is to augment the dataset with new defect images. Our method consists of two training stages. First, we train a data-efficient StyleGAN2 on defect-free images as the backbone. Second, we attach defect-aware residual blocks to the backbone, which learn to produce reasonable defect masks and accordingly manipulate the features within the masked regions by training the added modules on limited defect images. Extensive experiments on MVTec AD dataset not only validate the effectiveness of our method in generating realistic and diverse defect images, but also manifest the benefits it brings to downstream defect inspection tasks. Codes are available at https://github.com/Ldhlwh/DFMGAN.", + "primary_area": "computer vision i", + "author": "Yuxuan Duan; Yan Hong; Li Niu; Liqing Zhang", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University", + "bibtex": "@article{Duan_Hong_Niu_Zhang_2023, title={Few-Shot Defect Image Generation via Defect-Aware Feature Manipulation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25132}, DOI={10.1609/aaai.v37i1.25132}, abstractNote={The performances of defect inspection have been severely hindered by insufficient defect images in industries, which can be alleviated by generating more samples as data augmentation. We propose the first defect image generation method in the challenging few-shot cases. Given just a handful of defect images and relatively more defect-free ones, our goal is to augment the dataset with new defect images. Our method consists of two training stages. First, we train a data-efficient StyleGAN2 on defect-free images as the backbone. Second, we attach defect-aware residual blocks to the backbone, which learn to produce reasonable defect masks and accordingly manipulate the features within the masked regions by training the added modules on limited defect images. Extensive experiments on MVTec AD dataset not only validate the effectiveness of our method in generating realistic and diverse defect images, but also manifest the benefits it brings to downstream defect inspection tasks. Codes are available at https://github.com/Ldhlwh/DFMGAN.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Duan, Yuxuan and Hong, Yan and Niu, Li and Zhang, Liqing}, year={2023}, month={Jun.}, pages={571-578} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25132/24904", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25132", + "pdf_size": 2042894, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5858214750024422205&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn;gmail.com;sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;gmail.com;sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "https://github.com/Ldhlwh/DFMGAN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "MoE Key Lab of Artificial Intelligence", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25153", + "title": "Few-Shot Object Detection via Variational Feature Aggregation", + "track": "main", + "status": "Technical", + "abstract": "As few-shot object detectors are often trained with abundant base samples and fine-tuned on few-shot novel examples, the learned models are usually biased to base classes and sensitive to the variance of novel examples. To address this issue, we propose a meta-learning framework with two novel feature aggregation schemes. More precisely, we first present a Class-Agnostic Aggregation (CAA) method, where the query and support features can be aggregated regardless of their categories. The interactions between different classes encourage class-agnostic representations and reduce confusion between base and novel classes.\nBased on the CAA, we then propose a Variational Feature Aggregation (VFA) method, which encodes support examples into class-level support features for robust feature aggregation. We use a variational autoencoder to estimate class distributions and sample variational features from distributions that are more robust to the variance of support examples. Besides, we decouple classification and regression tasks so that VFA is performed on the classification branch without affecting object localization. Extensive experiments on PASCAL VOC and COCO demonstrate that our method significantly outperforms a strong baseline (up to 16%) and previous state-of-the-art methods (4% in average).", + "primary_area": "computer vision i", + "author": "Jiaming Han; Yuqiang Ren; Jian Ding; Ke Yan; Gui-Song Xia", + "authorids": "", + "aff": "NERCMS, School of Computer Science, Wuhan University + State Key Lab. LIESMARS, Wuhan University; YouTu Lab, Tencent; NERCMS, School of Computer Science, Wuhan University + State Key Lab. LIESMARS, Wuhan University; YouTu Lab, Tencent; NERCMS, School of Computer Science, Wuhan University + State Key Lab. LIESMARS, Wuhan University", + "bibtex": "@article{Han_Ren_Ding_Yan_Xia_2023, title={Few-Shot Object Detection via Variational Feature Aggregation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25153}, DOI={10.1609/aaai.v37i1.25153}, abstractNote={As few-shot object detectors are often trained with abundant base samples and fine-tuned on few-shot novel examples, the learned models are usually biased to base classes and sensitive to the variance of novel examples. To address this issue, we propose a meta-learning framework with two novel feature aggregation schemes. More precisely, we first present a Class-Agnostic Aggregation (CAA) method, where the query and support features can be aggregated regardless of their categories. The interactions between different classes encourage class-agnostic representations and reduce confusion between base and novel classes.\nBased on the CAA, we then propose a Variational Feature Aggregation (VFA) method, which encodes support examples into class-level support features for robust feature aggregation. We use a variational autoencoder to estimate class distributions and sample variational features from distributions that are more robust to the variance of support examples. Besides, we decouple classification and regression tasks so that VFA is performed on the classification branch without affecting object localization. Extensive experiments on PASCAL VOC and COCO demonstrate that our method significantly outperforms a strong baseline (up to 16%) and previous state-of-the-art methods (4% in average).}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Jiaming and Ren, Yuqiang and Ding, Jian and Yan, Ke and Xia, Gui-Song}, year={2023}, month={Jun.}, pages={755-763} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25153/24925", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25153", + "pdf_size": 1109108, + "gs_citation": 83, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7574778229979929295&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "whu.edu.cn;tencent.com;whu.edu.cn;tencent.com;whu.edu.cn", + "email": "whu.edu.cn;tencent.com;whu.edu.cn;tencent.com;whu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;0+0;1;0+0", + "aff_unique_norm": "Wuhan University;Tencent", + "aff_unique_dep": "School of Computer Science;YouTu Lab", + "aff_unique_url": "http://www.whu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "WHU;Tencent", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0+0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26629", + "title": "FiTs: Fine-Grained Two-Stage Training for Knowledge-Aware Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Knowledge-aware question answering (KAQA) requires the model to answer questions over a knowledge base, which is essential for both open-domain QA and domain-specific QA, especially when language models alone cannot provide all the knowledge needed.\nDespite the promising result of recent KAQA systems which tend to integrate linguistic knowledge from pre-trained language models (PLM) and factual knowledge from knowledge graphs (KG) to answer complex questions, a bottleneck exists in effectively fusing the representations from PLMs and KGs because of (i) the semantic and distributional gaps between them, and (ii) the difficulties in joint reasoning over the provided knowledge from both modalities.\nTo address the above two problems, we propose a Fine-grained Two-stage training framework (FiTs) to boost the KAQA system performance: The first stage aims at aligning representations from the PLM and the KG, thus bridging the modality gaps between them, named knowledge adaptive post-training. The second stage, called knowledge-aware fine-tuning, aims to improve the model's joint reasoning ability based on the aligned representations.\nIn detail, we fine-tune the post-trained model via two auxiliary self-supervised tasks in addition to the QA supervision.\nExtensive experiments demonstrate that our approach achieves state-of-the-art performance on three benchmarks in the commonsense reasoning (i.e., CommonsenseQA, OpenbookQA) and medical question answering (i.e., MedQA-USMILE) domains.", + "primary_area": "speech natural language processing", + "author": "Qichen Ye; Bowen Cao; Nuo Chen; Weiyuan Xu; Yuexian Zou", + "authorids": "", + "aff": "ADSPLAB, School of ECE, Peking University, Shenzhen, China; ADSPLAB, School of ECE, Peking University, Shenzhen, China; Hong Kong University of Science and Technology (Guangzhou) + Hong Kong University of Science and Technology; ADSPLAB, School of ECE, Peking University, Shenzhen, China; ADSPLAB, School of ECE, Peking University, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China", + "bibtex": "@article{Ye_Cao_Chen_Xu_Zou_2023, title={FiTs: Fine-Grained Two-Stage Training for Knowledge-Aware Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26629}, DOI={10.1609/aaai.v37i11.26629}, abstractNote={Knowledge-aware question answering (KAQA) requires the model to answer questions over a knowledge base, which is essential for both open-domain QA and domain-specific QA, especially when language models alone cannot provide all the knowledge needed.\nDespite the promising result of recent KAQA systems which tend to integrate linguistic knowledge from pre-trained language models (PLM) and factual knowledge from knowledge graphs (KG) to answer complex questions, a bottleneck exists in effectively fusing the representations from PLMs and KGs because of (i) the semantic and distributional gaps between them, and (ii) the difficulties in joint reasoning over the provided knowledge from both modalities.\nTo address the above two problems, we propose a Fine-grained Two-stage training framework (FiTs) to boost the KAQA system performance: The first stage aims at aligning representations from the PLM and the KG, thus bridging the modality gaps between them, named knowledge adaptive post-training. The second stage, called knowledge-aware fine-tuning, aims to improve the model\u2019s joint reasoning ability based on the aligned representations.\nIn detail, we fine-tune the post-trained model via two auxiliary self-supervised tasks in addition to the QA supervision.\nExtensive experiments demonstrate that our approach achieves state-of-the-art performance on three benchmarks in the commonsense reasoning (i.e., CommonsenseQA, OpenbookQA) and medical question answering (i.e., MedQA-USMILE) domains.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Qichen and Cao, Bowen and Chen, Nuo and Xu, Weiyuan and Zou, Yuexian}, year={2023}, month={Jun.}, pages={13914-13922} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26629/26401", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26629", + "pdf_size": 930670, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13281538575510040198&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 9, + "aff_domain": "pku.edu.cn;stu.pku.edu.cn;gmail.com;stu.pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;stu.pku.edu.cn;gmail.com;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1+1;0;0+2", + "aff_unique_norm": "Peking University;Hong Kong University of Science and Technology;Peng Cheng Laboratory", + "aff_unique_dep": "School of ECE;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.ust.hk;", + "aff_unique_abbr": "PKU;HKUST;", + "aff_campus_unique_index": "0;0;1;0;0+0", + "aff_campus_unique": "Shenzhen;Guangzhou;", + "aff_country_unique_index": "0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25577", + "title": "FinalMLP: An Enhanced Two-Stream MLP Model for CTR Prediction", + "track": "main", + "status": "Technical", + "abstract": "Click-through rate (CTR) prediction is one of the fundamental tasks in online advertising and recommendation. Multi-layer perceptron (MLP) serves as a core component in many deep CTR prediction models, but it has been widely shown that applying a vanilla MLP network alone is ineffective in learning complex feature interactions. As such, many two-stream models (e.g., Wide&Deep, DeepFM, and DCN) have recently been proposed, aiming to integrate two parallel sub-networks to learn feature interactions from two different views for enhanced CTR prediction. In addition to one MLP stream that learns feature interactions implicitly, most of the existing research focuses on designing another stream to complement the MLP stream with explicitly enhanced feature interactions. Instead, this paper presents a simple two-stream feature interaction model, namely FinalMLP, which employs only MLPs in both streams yet achieves surprisingly strong performance. In contrast to sophisticated network design in each stream, our work enhances CTR modeling through a feature selection module, which produces differentiated feature inputs to two streams, and a group-wise bilinear fusion module, which effectively captures stream-level interactions across two streams. We show that FinalMLP achieves competitive or even better performance against many existing two-stream CTR models on four open benchmark datasets and also brings significant CTR improvements during an online A/B test in our industrial news recommender system. We envision that the simple yet effective FinalMLP model could serve as a new strong baseline for future development of two-stream CTR models. Our source code will be available at MindSpore/models and FuxiCTR/model_zoo.", + "primary_area": "data mining and knowledge management", + "author": "Kelong Mao; Jieming Zhu; Liangcai Su; Guohao Cai; Yuru Li; Zhenhua Dong", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Huawei Noah\u2019s Ark Lab; Tsinghua University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "bibtex": "@article{Mao_Zhu_Su_Cai_Li_Dong_2023, title={FinalMLP: An Enhanced Two-Stream MLP Model for CTR Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25577}, DOI={10.1609/aaai.v37i4.25577}, abstractNote={Click-through rate (CTR) prediction is one of the fundamental tasks in online advertising and recommendation. Multi-layer perceptron (MLP) serves as a core component in many deep CTR prediction models, but it has been widely shown that applying a vanilla MLP network alone is ineffective in learning complex feature interactions. As such, many two-stream models (e.g., Wide&Deep, DeepFM, and DCN) have recently been proposed, aiming to integrate two parallel sub-networks to learn feature interactions from two different views for enhanced CTR prediction. In addition to one MLP stream that learns feature interactions implicitly, most of the existing research focuses on designing another stream to complement the MLP stream with explicitly enhanced feature interactions. Instead, this paper presents a simple two-stream feature interaction model, namely FinalMLP, which employs only MLPs in both streams yet achieves surprisingly strong performance. In contrast to sophisticated network design in each stream, our work enhances CTR modeling through a feature selection module, which produces differentiated feature inputs to two streams, and a group-wise bilinear fusion module, which effectively captures stream-level interactions across two streams. We show that FinalMLP achieves competitive or even better performance against many existing two-stream CTR models on four open benchmark datasets and also brings significant CTR improvements during an online A/B test in our industrial news recommender system. We envision that the simple yet effective FinalMLP model could serve as a new strong baseline for future development of two-stream CTR models. Our source code will be available at MindSpore/models and FuxiCTR/model_zoo.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Kelong and Zhu, Jieming and Su, Liangcai and Cai, Guohao and Li, Yuru and Dong, Zhenhua}, year={2023}, month={Jun.}, pages={4552-4560} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25577/25349", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25577", + "pdf_size": 225573, + "gs_citation": 88, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17530916796284472414&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 8, + "aff_domain": "gmail.com;ieee.org;mails.tsinghua.edu.cn; ; ; ", + "email": "gmail.com;ieee.org;mails.tsinghua.edu.cn; ; ; ", + "github": "", + "project": "MindSpore/models; FuxiCTR/model zoo", + "author_num": 6, + "aff_unique_index": "0;1;2;1;1;1", + "aff_unique_norm": "Renmin University of China;Huawei;Tsinghua University", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.huawei.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "RUC;Huawei;THU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25293", + "title": "Find Beauty in the Rare: Contrastive Composition Feature Clustering for Nontrivial Cropping Box Regression", + "track": "main", + "status": "Technical", + "abstract": "Automatic image cropping algorithms aim to recompose images like human-being photographers by generating the cropping boxes with improved composition quality. Cropping box regression approaches learn the beauty of composition from annotated cropping boxes. However, the bias of annotations leads to quasi-trivial recomposing results, which has an obvious tendency to the average location of training samples. The crux of this predicament is that the task is naively treated as a box regression problem, where rare samples might be dominated by normal samples, and the composition patterns of rare samples are not well exploited. Observing that similar composition patterns tend to be shared by the cropping boundaries annotated nearly, we argue to find the beauty of composition from the rare samples by clustering the samples with similar cropping boundary annotations, i.e., similar composition patterns. We propose a novel Contrastive Composition Clustering (C2C) to regularize the composition features by contrasting dynamically established similar and dissimilar pairs. In this way, common composition patterns of multiple images can be better summarized, which especially benefits the rare samples and endows our model with better generalizability to render nontrivial results. Extensive experimental results show the superiority of our model compared with prior arts. We also illustrate the philosophy of our design with an interesting analytical visualization.", + "primary_area": "computer vision ii", + "author": "Zhiyu Pan; Yinpeng Chen; Jiale Zhang; Hao Lu; Zhiguo Cao; Weicai Zhong", + "authorids": "", + "aff": "Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; Huawei CBG Consumer Cloud Service Search Product & Big Data Platform Department", + "bibtex": "@article{Pan_Chen_Zhang_Lu_Cao_Zhong_2023, title={Find Beauty in the Rare: Contrastive Composition Feature Clustering for Nontrivial Cropping Box Regression}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25293}, DOI={10.1609/aaai.v37i2.25293}, abstractNote={Automatic image cropping algorithms aim to recompose images like human-being photographers by generating the cropping boxes with improved composition quality. Cropping box regression approaches learn the beauty of composition from annotated cropping boxes. However, the bias of annotations leads to quasi-trivial recomposing results, which has an obvious tendency to the average location of training samples. The crux of this predicament is that the task is naively treated as a box regression problem, where rare samples might be dominated by normal samples, and the composition patterns of rare samples are not well exploited. Observing that similar composition patterns tend to be shared by the cropping boundaries annotated nearly, we argue to find the beauty of composition from the rare samples by clustering the samples with similar cropping boundary annotations, i.e., similar composition patterns. We propose a novel Contrastive Composition Clustering (C2C) to regularize the composition features by contrasting dynamically established similar and dissimilar pairs. In this way, common composition patterns of multiple images can be better summarized, which especially benefits the rare samples and endows our model with better generalizability to render nontrivial results. Extensive experimental results show the superiority of our model compared with prior arts. We also illustrate the philosophy of our design with an interesting analytical visualization.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Zhiyu and Chen, Yinpeng and Zhang, Jiale and Lu, Hao and Cao, Zhiguo and Zhong, Weicai}, year={2023}, month={Jun.}, pages={2011-2019} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25293/25065", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25293", + "pdf_size": 6314750, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12096282145087056264&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn; ; ;hust.edu.cn; ", + "email": "hust.edu.cn;hust.edu.cn; ; ;hust.edu.cn; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Huazhong University of Science and Technology;Huawei", + "aff_unique_dep": "School of Artificial Intelligence and Automation;Consumer Cloud Service Search Product & Big Data Platform Department", + "aff_unique_url": "http://www.hust.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "HUST;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25681", + "title": "Finding Fair Allocations under Budget Constraints", + "track": "main", + "status": "Technical", + "abstract": "We study the fair allocation of indivisible goods among agents with identical, additive valuations but individual budget constraints. Here, the indivisible goods--each with a specific size and value--need to be allocated such that the bundle assigned to each agent is of total size at most the agent's budget. Since envy-free allocations do not necessarily exist in the indivisible goods context, compelling relaxations--in particular, the notion of envy-freeness up to k goods (EFk)--have received significant attention in recent years. In an EFk allocation, each agent prefers its own bundle over that of any other agent, up to the removal of k goods, and the agents have similarly bounded envy against the charity (which corresponds to the set of all unallocated goods). It has been shown in prior work that an allocation that satisfies the budget constraints and maximizes the Nash social welfare is 1/4-approximately EF1. However, the computation (or even existence) of exact EFk allocations remained an intriguing open problem.\n\nWe make notable progress towards this by proposing a simple, greedy, polynomial-time algorithm that computes EF2 allocations under budget constraints. Our algorithmic result implies the universal existence of EF2 allocations in this fair division context. The analysis of the algorithm exploits intricate structural properties of envy-freeness. Interestingly, the same algorithm also provides EF1 guarantees for important special cases. Specifically, we settle the existence of EF1 allocations for instances in which: (i) the value of each good is proportional to its size, (ii) all the goods have the same size, or (iii) all the goods have the same value. Our EF2 result even extends to the setting wherein the goods' sizes are agent specific.", + "primary_area": "game theory and economic paradigms", + "author": "Siddharth Barman; Arindam Khan; Sudarshan Shyam; K. V. N. Sreenivas", + "authorids": "", + "aff": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore; Department of Computer Science and Automation, Indian Institute of Science, Bangalore; Department of Computer Science and Automation, Indian Institute of Science, Bangalore + Department of Computer Science, Aarhus University; Department of Computer Science and Automation, Indian Institute of Science, Bangalore", + "bibtex": "@article{Barman_Khan_Shyam_Sreenivas_2023, title={Finding Fair Allocations under Budget Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25681}, DOI={10.1609/aaai.v37i5.25681}, abstractNote={We study the fair allocation of indivisible goods among agents with identical, additive valuations but individual budget constraints. Here, the indivisible goods--each with a specific size and value--need to be allocated such that the bundle assigned to each agent is of total size at most the agent\u2019s budget. Since envy-free allocations do not necessarily exist in the indivisible goods context, compelling relaxations--in particular, the notion of envy-freeness up to k goods (EFk)--have received significant attention in recent years. In an EFk allocation, each agent prefers its own bundle over that of any other agent, up to the removal of k goods, and the agents have similarly bounded envy against the charity (which corresponds to the set of all unallocated goods). It has been shown in prior work that an allocation that satisfies the budget constraints and maximizes the Nash social welfare is 1/4-approximately EF1. However, the computation (or even existence) of exact EFk allocations remained an intriguing open problem. We make notable progress towards this by proposing a simple, greedy, polynomial-time algorithm that computes EF2 allocations under budget constraints. Our algorithmic result implies the universal existence of EF2 allocations in this fair division context. The analysis of the algorithm exploits intricate structural properties of envy-freeness. Interestingly, the same algorithm also provides EF1 guarantees for important special cases. Specifically, we settle the existence of EF1 allocations for instances in which: (i) the value of each good is proportional to its size, (ii) all the goods have the same size, or (iii) all the goods have the same value. Our EF2 result even extends to the setting wherein the goods\u2019 sizes are agent specific.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Barman, Siddharth and Khan, Arindam and Shyam, Sudarshan and Sreenivas, K. V. N.}, year={2023}, month={Jun.}, pages={5481-5489} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25681/25453", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25681", + "pdf_size": 173395, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8145121730212731300&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "iisc.ac.in;iisc.ac.in;gmail.com;iisc.ac.in", + "email": "iisc.ac.in;iisc.ac.in;gmail.com;iisc.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "Indian Institute of Science;Aarhus University", + "aff_unique_dep": "Department of Computer Science and Automation;Department of Computer Science", + "aff_unique_url": "https://www.iisc.ac.in;https://au.dk", + "aff_unique_abbr": "IISc;AU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Bangalore;", + "aff_country_unique_index": "0;0;0+1;0", + "aff_country_unique": "India;Denmark" + }, + { + "id": "article-25518", + "title": "Finding Good Partial Assignments during Restart-Based Branch and Bound Search", + "track": "main", + "status": "Technical", + "abstract": "Restart-based Branch-and-Bound Search (BBS) is a standard algorithm for solving Constraint Optimization Problems (COPs). In this paper, we propose an approach to find good partial assignments to jumpstart search at each restart for general COPs, which are identified by comparing different best solutions found in different restart runs. We consider information extracted from historical solutions to evaluate the quality of the partial assignments. Thus the good partial assignments are dynamically updated as the current best solution evolves. Our approach makes restart-based BBS explore different promising sub-search-spaces to find high-quality solutions. Experiments on the MiniZinc benchmark suite show how our approach brings significant improvements to a black-box COP solver equipped with the state of the art search techniques. Our method finds better solutions and proves optimality for more instances.", + "primary_area": "constraint satisfaction and optimization", + "author": "Hongbo Li; Jimmy H.M. Lee", + "authorids": "", + "aff": "School of Information Science and Technology, Northeast Normal University, Changchun, China; Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong", + "bibtex": "@article{Li_Lee_2023, title={Finding Good Partial Assignments during Restart-Based Branch and Bound Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25518}, DOI={10.1609/aaai.v37i4.25518}, abstractNote={Restart-based Branch-and-Bound Search (BBS) is a standard algorithm for solving Constraint Optimization Problems (COPs). In this paper, we propose an approach to find good partial assignments to jumpstart search at each restart for general COPs, which are identified by comparing different best solutions found in different restart runs. We consider information extracted from historical solutions to evaluate the quality of the partial assignments. Thus the good partial assignments are dynamically updated as the current best solution evolves. Our approach makes restart-based BBS explore different promising sub-search-spaces to find high-quality solutions. Experiments on the MiniZinc benchmark suite show how our approach brings significant improvements to a black-box COP solver equipped with the state of the art search techniques. Our method finds better solutions and proves optimality for more instances.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Hongbo and Lee, Jimmy H.M.}, year={2023}, month={Jun.}, pages={4035-4043} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25518/25290", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25518", + "pdf_size": 229119, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:SHsTFOpT6FIJ:scholar.google.com/&scioq=Finding+Good+Partial+Assignments+during+Restart-Based+Branch+and+Bound+Search&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "nenu.edu.cn;cse.cuhk.edu.hk", + "email": "nenu.edu.cn;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Northeast Normal University;The Chinese University of Hong Kong", + "aff_unique_dep": "School of Information Science and Technology;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.nenu.edu.cn;https://www.cuhk.edu.hk", + "aff_unique_abbr": "NENU;CUHK", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Changchun;Shatin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25650", + "title": "Fine-Grained Position Helps Memorizing More, a Novel Music Compound Transformer Model with Feature Interaction Fusion", + "track": "main", + "status": "Technical", + "abstract": "Due to the particularity of the simultaneous occurrence of multiple events in music sequences, compound Transformer is proposed to deal with the challenge of long sequences. However, there are two deficiencies in the compound Transformer. First, since the order of events is more important for music than natural language, the information provided by the original absolute position embedding is not precise enough. Second, there is an important correlation between the tokens in the compound word, which is ignored by the current compound Transformer. Therefore, in this work, we propose an improved compound Transformer model for music understanding. Specifically, we propose an attribute embedding fusion module and a novel position encoding scheme with absolute-relative consideration. In the attribute embedding fusion module, different attributes are fused through feature permutation by using a multi-head self-attention mechanism in order to capture rich interactions between attributes.\nIn the novel position encoding scheme, we propose RoAR position encoding, which realizes rotational absolute position encoding, relative position encoding, and absolute-relative position interactive encoding, providing clear and rich orders for musical events. \nEmpirical study on four typical music understanding tasks shows that our attribute fusion approach and RoAR position encoding brings large performance gains. In addition, we further investigate the impact of masked language modeling and casual language modeling pre-training on music understanding.", + "primary_area": "domain s of application", + "author": "Zuchao Li; Ruhan Gong; Yineng Chen; Kehua Su", + "authorids": "", + "aff": "School of Computer Science, Wuhan University, Wuhan 430072, P. R. China; School of Computer Science, Wuhan University, Wuhan 430072, P. R. China; School of Computer Science, Wuhan University, Wuhan 430072, P. R. China; School of Computer Science, Wuhan University, Wuhan 430072, P. R. China", + "bibtex": "@article{Li_Gong_Chen_Su_2023, title={Fine-Grained Position Helps Memorizing More, a Novel Music Compound Transformer Model with Feature Interaction Fusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25650}, DOI={10.1609/aaai.v37i4.25650}, abstractNote={Due to the particularity of the simultaneous occurrence of multiple events in music sequences, compound Transformer is proposed to deal with the challenge of long sequences. However, there are two deficiencies in the compound Transformer. First, since the order of events is more important for music than natural language, the information provided by the original absolute position embedding is not precise enough. Second, there is an important correlation between the tokens in the compound word, which is ignored by the current compound Transformer. Therefore, in this work, we propose an improved compound Transformer model for music understanding. Specifically, we propose an attribute embedding fusion module and a novel position encoding scheme with absolute-relative consideration. In the attribute embedding fusion module, different attributes are fused through feature permutation by using a multi-head self-attention mechanism in order to capture rich interactions between attributes.\nIn the novel position encoding scheme, we propose RoAR position encoding, which realizes rotational absolute position encoding, relative position encoding, and absolute-relative position interactive encoding, providing clear and rich orders for musical events. Empirical study on four typical music understanding tasks shows that our attribute fusion approach and RoAR position encoding brings large performance gains. In addition, we further investigate the impact of masked language modeling and casual language modeling pre-training on music understanding.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zuchao and Gong, Ruhan and Chen, Yineng and Su, Kehua}, year={2023}, month={Jun.}, pages={5203-5212} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25650/25422", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25650", + "pdf_size": 568165, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9646145145660786615&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.whu.edu.cn", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25363", + "title": "Fine-Grained Retrieval Prompt Tuning", + "track": "main", + "status": "Technical", + "abstract": "Fine-grained object retrieval aims to learn discriminative representation to retrieve visually similar objects. However, existing top-performing works usually impose pairwise similarities on the semantic embedding spaces or design a localization sub-network to continually fine-tune the entire model in limited data scenarios, thus resulting in convergence to suboptimal solutions. In this paper, we develop Fine-grained Retrieval Prompt Tuning (FRPT), which steers a frozen pre-trained model to perform the fine-grained retrieval task from the perspectives of sample prompting and feature adaptation. Specifically, FRPT only needs to learn fewer parameters in the prompt and adaptation instead of fine-tuning the entire model, thus solving the issue of convergence to suboptimal solutions caused by fine-tuning the entire model. Technically, a discriminative perturbation prompt (DPP) is introduced and deemed as a sample prompting process, which amplifies and even exaggerates some discriminative elements contributing to category prediction via a content-aware inhomogeneous sampling operation. In this way, DPP can make the fine-grained retrieval task aided by the perturbation prompts close to the solved task during the original pre-training. Thereby, it preserves the generalization and discrimination of representation extracted from input samples. Besides, a category-specific awareness head is proposed and regarded as feature adaptation, which removes the species discrepancies in features extracted by the pre-trained model using category-guided instance normalization. And thus, it makes the optimized features only include the discrepancies among subcategories. Extensive experiments demonstrate that our FRPT with fewer learnable parameters achieves the state-of-the-art performance on three widely-used fine-grained datasets.", + "primary_area": "computer vision ii", + "author": "Shijie Wang; Jianlong Chang; Zhihui Wang; Haojie Li; Wanli Ouyang; Qi Tian", + "authorids": "", + "aff": "International School of Information Science & Engineering, Dalian University of Technology, China; Huawei Cloud & AI, China; International School of Information Science & Engineering, Dalian University of Technology, China; College of Computer and Engineering, Shandong University of Science and Technology, China + International School of Information Science & Engineering, Dalian University of Technology, China; SenseTime Computer Vision Research Group, The University of Sydney, Australia; Huawei Cloud & AI, China", + "bibtex": "@article{Wang_Chang_Wang_Li_Ouyang_Tian_2023, title={Fine-Grained Retrieval Prompt Tuning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25363}, DOI={10.1609/aaai.v37i2.25363}, abstractNote={Fine-grained object retrieval aims to learn discriminative representation to retrieve visually similar objects. However, existing top-performing works usually impose pairwise similarities on the semantic embedding spaces or design a localization sub-network to continually fine-tune the entire model in limited data scenarios, thus resulting in convergence to suboptimal solutions. In this paper, we develop Fine-grained Retrieval Prompt Tuning (FRPT), which steers a frozen pre-trained model to perform the fine-grained retrieval task from the perspectives of sample prompting and feature adaptation. Specifically, FRPT only needs to learn fewer parameters in the prompt and adaptation instead of fine-tuning the entire model, thus solving the issue of convergence to suboptimal solutions caused by fine-tuning the entire model. Technically, a discriminative perturbation prompt (DPP) is introduced and deemed as a sample prompting process, which amplifies and even exaggerates some discriminative elements contributing to category prediction via a content-aware inhomogeneous sampling operation. In this way, DPP can make the fine-grained retrieval task aided by the perturbation prompts close to the solved task during the original pre-training. Thereby, it preserves the generalization and discrimination of representation extracted from input samples. Besides, a category-specific awareness head is proposed and regarded as feature adaptation, which removes the species discrepancies in features extracted by the pre-trained model using category-guided instance normalization. And thus, it makes the optimized features only include the discrepancies among subcategories. Extensive experiments demonstrate that our FRPT with fewer learnable parameters achieves the state-of-the-art performance on three widely-used fine-grained datasets.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Shijie and Chang, Jianlong and Wang, Zhihui and Li, Haojie and Ouyang, Wanli and Tian, Qi}, year={2023}, month={Jun.}, pages={2644-2652} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25363/25135", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25363", + "pdf_size": 2125411, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12410273432788063857&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "dlut.edu.cn; ;dlut.edu.cn;dlut.edu.cn; ; ", + "email": "dlut.edu.cn; ;dlut.edu.cn;dlut.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2+0;3;1", + "aff_unique_norm": "Dalian University of Technology;Huawei Cloud & AI;Shandong University of Science and Technology;The University of Sydney", + "aff_unique_dep": "International School of Information Science & Engineering;;College of Computer and Engineering;Computer Vision Research Group", + "aff_unique_url": "http://en.dlut.edu.cn/;https://www.huawei.com/en/cloud;;https://www.sydney.edu.au", + "aff_unique_abbr": "DUT;Huawei Cloud & AI;;USYD", + "aff_campus_unique_index": "0;0;0;2", + "aff_campus_unique": "Dalian;;Sydney", + "aff_country_unique_index": "0;0;0;0+0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25786", + "title": "Finite Based Contraction and Expansion via Models", + "track": "main", + "status": "Technical", + "abstract": "We propose a new paradigm for Belief Change in which the new information is represented as sets of models, while the agent's body of knowledge is represented as a finite set of formulae, that is, a finite base. The focus on finiteness is crucial when we consider limited agents and reasoning algorithms. Moreover, having the input as arbitrary set of models is more general than the usual treatment of formulas as input. In this setting, we define new Belief Change operations akin to traditional expansion and contraction, and we identify the rationality postulates that emerge due to the finite representability requirement. We also analyse different logics concerning compatibility with our framework.", + "primary_area": "knowledge representation and reasoning", + "author": "Ricardo Guimar\u00e3es; Ana Ozaki; Jandson S. Ribeiro", + "authorids": "", + "aff": "University of Bergen; University of Bergen; University of Hagen", + "bibtex": "@article{Guimar\u00e3es_Ozaki_Ribeiro_2023, title={Finite Based Contraction and Expansion via Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25786}, DOI={10.1609/aaai.v37i5.25786}, abstractNote={We propose a new paradigm for Belief Change in which the new information is represented as sets of models, while the agent\u2019s body of knowledge is represented as a finite set of formulae, that is, a finite base. The focus on finiteness is crucial when we consider limited agents and reasoning algorithms. Moreover, having the input as arbitrary set of models is more general than the usual treatment of formulas as input. In this setting, we define new Belief Change operations akin to traditional expansion and contraction, and we identify the rationality postulates that emerge due to the finite representability requirement. We also analyse different logics concerning compatibility with our framework.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guimar\u00e3es, Ricardo and Ozaki, Ana and Ribeiro, Jandson S.}, year={2023}, month={Jun.}, pages={6389-6397} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25786/25558", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25786", + "pdf_size": 155068, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7779388857675249432&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "uib.no;uib.no;fernuni-hagen.de", + "email": "uib.no;uib.no;fernuni-hagen.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of Bergen;University of Hagen", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uib.no;https://www.fh-hagenberg.at", + "aff_unique_abbr": "uib;UH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Norway;Germany" + }, + { + "id": "article-25731", + "title": "Fisher Markets with Social Influence", + "track": "main", + "status": "Technical", + "abstract": "A Fisher market is an economic model of buyer and seller interactions in which each buyer\u2019s utility depends only on the bundle of goods she obtains. Many people\u2019s interests, however, are affected by their social interactions with others. In this paper, we introduce a generalization of Fisher markets, namely influence Fisher markets, which captures the impact of social influence on buyers\u2019 utilities. We show that competitive equilibria in influence Fisher markets correspond to generalized Nash equilibria in an associated pseudo-game, which implies the existence of competitive equilibria in all influence Fisher markets with continuous and concave utility functions. We then construct a monotone pseudo-game, whose variational equilibria and their duals together characterize competitive equilibria in influence Fisher markets with continuous, jointly concave, and homogeneous utility functions. This observation implies that competitive equilibria in these markets can be computed in polynomial time under standard smoothness assumptions on the utility functions. The dual of this second pseudo-game enables us to interpret the competitive equilibria of influence CCH Fisher markets as the solutions to a system of simultaneous Stackelberg games. Finally, we derive a novel first-order method that solves this Stackelberg system in polynomial time, prove that it is equivalent to computing competitive equilibrium prices via t\u00e2tonnement, and run experiments that confirm our theoretical results.", + "primary_area": "game theory and economic paradigms", + "author": "Jiayi Zhao; Denizalp Goktas; Amy Greenwald", + "authorids": "", + "aff": "Department of Computer Science, Pomona College; Department of Computer Science, Brown University; Department of Computer Science, Brown University", + "bibtex": "@article{Zhao_Goktas_Greenwald_2023, title={Fisher Markets with Social Influence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25731}, DOI={10.1609/aaai.v37i5.25731}, abstractNote={A Fisher market is an economic model of buyer and seller interactions in which each buyer\u2019s utility depends only on the bundle of goods she obtains. Many people\u2019s interests, however, are affected by their social interactions with others. In this paper, we introduce a generalization of Fisher markets, namely influence Fisher markets, which captures the impact of social influence on buyers\u2019 utilities. We show that competitive equilibria in influence Fisher markets correspond to generalized Nash equilibria in an associated pseudo-game, which implies the existence of competitive equilibria in all influence Fisher markets with continuous and concave utility functions. We then construct a monotone pseudo-game, whose variational equilibria and their duals together characterize competitive equilibria in influence Fisher markets with continuous, jointly concave, and homogeneous utility functions. This observation implies that competitive equilibria in these markets can be computed in polynomial time under standard smoothness assumptions on the utility functions. The dual of this second pseudo-game enables us to interpret the competitive equilibria of influence CCH Fisher markets as the solutions to a system of simultaneous Stackelberg games. Finally, we derive a novel first-order method that solves this Stackelberg system in polynomial time, prove that it is equivalent to computing competitive equilibrium prices via t\u00e2tonnement, and run experiments that confirm our theoretical results.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Jiayi and Goktas, Denizalp and Greenwald, Amy}, year={2023}, month={Jun.}, pages={5900-5909} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25731/25503", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25731", + "pdf_size": 226206, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6678896637087666522&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mymail.pomona.edu;brown.edu;brown.edu", + "email": "mymail.pomona.edu;brown.edu;brown.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Pomona College;Brown University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.pomona.edu;https://www.brown.edu", + "aff_unique_abbr": "Pomona College;Brown", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26171", + "title": "Fixed-Weight Difference Target Propagation", + "track": "main", + "status": "Technical", + "abstract": "Target Propagation (TP) is a biologically more plausible algorithm than the error backpropagation (BP) to train deep networks, and improving practicality of TP is an open issue. TP methods require the feedforward and feedback networks to form layer-wise autoencoders for propagating the target values generated at the output layer. However, this causes certain drawbacks; e.g., careful hyperparameter tuning is required to synchronize the feedforward and feedback training, and frequent updates of the feedback path are usually required than that of the feedforward path. Learning of the feedforward and feedback networks is sufficient to make TP methods capable of training, but is having these layer-wise autoencoders a necessary condition for TP to work? We answer this question by presenting Fixed-Weight Difference Target Propagation (FW-DTP) that keeps the feedback weights constant during training. We confirmed that this simple method, which naturally resolves the abovementioned problems of TP, can still deliver informative target values to hidden layers for a given task; indeed, FW-DTP consistently achieves higher test performance than a baseline, the Difference Target Propagation (DTP), on four classification datasets. We also present a novel propagation architecture that explains the exact form of the feedback function of DTP to analyze FW-DTP. Our code is available at https://github.com/TatsukichiShibuya/Fixed-Weight-Difference-Target-Propagation.", + "primary_area": "machine learning iii", + "author": "Tatsukichi Shibuya; Nakamasa Inoue; Rei Kawakami; Ikuro Sato", + "authorids": "", + "aff": "Tokyo Institute of Technology; Tokyo Institute of Technology; Tokyo Institute of Technology; Tokyo Institute of Technology+Denso IT Laboratory", + "bibtex": "@article{Shibuya_Inoue_Kawakami_Sato_2023, title={Fixed-Weight Difference Target Propagation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26171}, DOI={10.1609/aaai.v37i8.26171}, abstractNote={Target Propagation (TP) is a biologically more plausible algorithm than the error backpropagation (BP) to train deep networks, and improving practicality of TP is an open issue. TP methods require the feedforward and feedback networks to form layer-wise autoencoders for propagating the target values generated at the output layer. However, this causes certain drawbacks; e.g., careful hyperparameter tuning is required to synchronize the feedforward and feedback training, and frequent updates of the feedback path are usually required than that of the feedforward path. Learning of the feedforward and feedback networks is sufficient to make TP methods capable of training, but is having these layer-wise autoencoders a necessary condition for TP to work? We answer this question by presenting Fixed-Weight Difference Target Propagation (FW-DTP) that keeps the feedback weights constant during training. We confirmed that this simple method, which naturally resolves the abovementioned problems of TP, can still deliver informative target values to hidden layers for a given task; indeed, FW-DTP consistently achieves higher test performance than a baseline, the Difference Target Propagation (DTP), on four classification datasets. We also present a novel propagation architecture that explains the exact form of the feedback function of DTP to analyze FW-DTP. Our code is available at https://github.com/TatsukichiShibuya/Fixed-Weight-Difference-Target-Propagation.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shibuya, Tatsukichi and Inoue, Nakamasa and Kawakami, Rei and Sato, Ikuro}, year={2023}, month={Jun.}, pages={9811-9819} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26171/25943", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26171", + "pdf_size": 1142914, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2673629707851164370&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "m.titech.ac.jp;c.titech.ac.jp;sc.e.titech.ac.jp;c.titech.ac.jp", + "email": "m.titech.ac.jp;c.titech.ac.jp;sc.e.titech.ac.jp;c.titech.ac.jp", + "github": "https://github.com/TatsukichiShibuya/Fixed-Weight-Difference-Target-Propagation", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Tokyo Institute of Technology;Denso Corporation", + "aff_unique_dep": ";IT Laboratory", + "aff_unique_url": "https://www.titech.ac.jp;https://www.denso.com", + "aff_unique_abbr": "Titech;Denso", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26993", + "title": "Flaky Performances When Pretraining on Relational Databases (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We explore the downstream task performances for graph neural network (GNN) self-supervised learning (SSL) methods trained on subgraphs extracted from relational databases (RDBs). Intuitively, this joint use of SSL and GNNs should allow to leverage more of the available data, which could translate to better results. However, we found that naively porting contrastive SSL techniques can cause ``negative transfer'': linear evaluation on fixed representation from a pretrained model performs worse than on representations from the randomly-initialized model. Based on the conjecture that contrastive SSL conflicts with the message passing layers of the GNN, we propose InfoNode: a contrastive loss aiming to maximize the mutual information between a node's initial- and final-layer representation. The primary empirical results support our conjecture and the effectiveness of InfoNode.", + "primary_area": "", + "author": "Shengchao Liu; David Vazquez; Jian Tang; Pierre-Andr\u00e9 No\u00ebl", + "authorids": "", + "aff": "Mila, Qu \u00b4ebec AI Institute + Universit \u00b4e de Montr \u00b4eal; ServiceNow Research; Mila, Qu \u00b4ebec AI Institute + HEC Montr \u00b4eal + CIFAR AI Chair; ServiceNow Research", + "bibtex": "@article{Liu_Vazquez_Tang_No\u00ebl_2024, title={Flaky Performances When Pretraining on Relational Databases (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26993}, DOI={10.1609/aaai.v37i13.26993}, abstractNote={We explore the downstream task performances for graph neural network (GNN) self-supervised learning (SSL) methods trained on subgraphs extracted from relational databases (RDBs). Intuitively, this joint use of SSL and GNNs should allow to leverage more of the available data, which could translate to better results. However, we found that naively porting contrastive SSL techniques can cause ``negative transfer\u2019\u2019: linear evaluation on fixed representation from a pretrained model performs worse than on representations from the randomly-initialized model. Based on the conjecture that contrastive SSL conflicts with the message passing layers of the GNN, we propose InfoNode: a contrastive loss aiming to maximize the mutual information between a node\u2019s initial- and final-layer representation. The primary empirical results support our conjecture and the effectiveness of InfoNode.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shengchao and Vazquez, David and Tang, Jian and No\u00ebl, Pierre-Andr\u00e9}, year={2024}, month={Jul.}, pages={16266-16267} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26993/26765", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26993", + "pdf_size": 88346, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6049781733761667347&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mila.quebec; ; ;servicenow.com", + "email": "mila.quebec; ; ;servicenow.com", + "github": "", + "project": "https://arxiv.org/abs/2211.05213", + "author_num": 4, + "aff_unique_index": "0+1;2;0+3+4;2", + "aff_unique_norm": "Mila;Universit\u00e9 de Montr\u00e9al;ServiceNow;HEC Montr\u00e9al;CIFAR", + "aff_unique_dep": "AI Institute;;Research;;AI Chair", + "aff_unique_url": "https://mila.quebec;https://www.umontreal.ca;https://www.servicenow.com;https://www.hec.ca;https://www.cifar.ca", + "aff_unique_abbr": "Mila;UdeM;ServiceNow;HEC;CIFAR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Qu\u00c3\u00a9bec;", + "aff_country_unique_index": "0+0;1;0+0+0;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "article-25146", + "title": "Flexible 3D Lane Detection by Hierarchical Shape Matching", + "track": "main", + "status": "Technical", + "abstract": "As one of the basic while vital technologies for HD map construction, 3D lane detection is still an open problem due to varying visual conditions, complex typologies, and strict demands for precision. In this paper, an end-to-end flexible and hierarchical lane detector is proposed to precisely predict 3D lane lines from point clouds. Specifically, we design a hierarchical network predicting flexible representations of lane shapes at different levels, simultaneously collecting global instance semantics and avoiding local errors. In the global scope, we propose to regress parametric curves w.r.t adaptive axes that help to make more robust predictions towards complex scenes, while in the local vision the structure of lane segment is detected in each of the dynamic anchor cells sampled along the global predicted curves. Moreover, corresponding global and local shape matching losses and anchor cell generation strategies are designed. Experiments on two datasets show that we overwhelm current top methods under high precision standards, and full ablation studies also verify each part of our method. Our codes will be released at https://github.com/Doo-do/FHLD.", + "primary_area": "computer vision i", + "author": "Zhihao Guan; Ruixin Liu; Zejian Yuan; Ao Liu; Kun Tang; Tong Zhou; Erlong Li; Chao Zheng; Shuqi Mei", + "authorids": "", + "aff": "Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, China; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, China; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, China; T Lab, Tencent Map, Tencent, China; T Lab, Tencent Map, Tencent, China; T Lab, Tencent Map, Tencent, China; T Lab, Tencent Map, Tencent, China; T Lab, Tencent Map, Tencent, China; T Lab, Tencent Map, Tencent, China", + "bibtex": "@article{Guan_Liu_Yuan_Liu_Tang_Zhou_Li_Zheng_Mei_2023, title={Flexible 3D Lane Detection by Hierarchical Shape Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25146}, DOI={10.1609/aaai.v37i1.25146}, abstractNote={As one of the basic while vital technologies for HD map construction, 3D lane detection is still an open problem due to varying visual conditions, complex typologies, and strict demands for precision. In this paper, an end-to-end flexible and hierarchical lane detector is proposed to precisely predict 3D lane lines from point clouds. Specifically, we design a hierarchical network predicting flexible representations of lane shapes at different levels, simultaneously collecting global instance semantics and avoiding local errors. In the global scope, we propose to regress parametric curves w.r.t adaptive axes that help to make more robust predictions towards complex scenes, while in the local vision the structure of lane segment is detected in each of the dynamic anchor cells sampled along the global predicted curves. Moreover, corresponding global and local shape matching losses and anchor cell generation strategies are designed. Experiments on two datasets show that we overwhelm current top methods under high precision standards, and full ablation studies also verify each part of our method. Our codes will be released at https://github.com/Doo-do/FHLD.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guan, Zhihao and Liu, Ruixin and Yuan, Zejian and Liu, Ao and Tang, Kun and Zhou, Tong and Li, Erlong and Zheng, Chao and Mei, Shuqi}, year={2023}, month={Jun.}, pages={694-701} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25146/24918", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25146", + "pdf_size": 1795802, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14333855837047002446&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "email": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/Doo-do/FHLD", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;1;1;1;1;1;1", + "aff_unique_norm": "Xi'an Jiaotong University;Tencent", + "aff_unique_dep": "Institute of Artificial Intelligence and Robotics;Tencent Map", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "XJTU;Tencent", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26427", + "title": "Flexible Budgets in Restless Bandits: A Primal-Dual Algorithm for Efficient Budget Allocation", + "track": "main", + "status": "Technical", + "abstract": "Restless multi-armed bandits (RMABs) are an important model to optimize allocation of limited resources in sequential decision-making settings. Typical RMABs assume the budget --- the number of arms pulled --- to be fixed for each step in the planning horizon. However, for realistic real-world planning, resources are not necessarily limited at each planning step; we may be able to distribute surplus resources in one round to an earlier or later round. In real-world planning settings, this flexibility in budget is often constrained to within a subset of consecutive planning steps, e.g., weekly planning of a monthly budget. In this paper we define a general class of RMABs with flexible budget, which we term F-RMABs, and provide an algorithm to optimally solve for them. We derive a min-max formulation to find optimal policies for F-RMABs and leverage gradient primal-dual algorithms to solve for reward-maximizing policies with flexible budgets. We introduce a scheme to sample expected gradients to apply primal-dual algorithms to the F-RMAB setting and make an otherwise computationally expensive approach tractable. Additionally, we provide heuristics that trade off solution quality for efficiency and present experimental comparisons of different F-RMAB solution approaches.", + "primary_area": "planning routing and scheduling", + "author": "Paula Rodriguez Diaz; Jackson A. Killian; Lily Xu; Arun Sai Suggala; Aparna Taneja; Milind Tambe", + "authorids": "", + "aff": "Harvard University; Harvard University; Harvard University; Google Research; Google Research; Harvard University+Google Research", + "bibtex": "@article{Rodriguez Diaz_Killian_Xu_Suggala_Taneja_Tambe_2023, title={Flexible Budgets in Restless Bandits: A Primal-Dual Algorithm for Efficient Budget Allocation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26427}, DOI={10.1609/aaai.v37i10.26427}, abstractNote={Restless multi-armed bandits (RMABs) are an important model to optimize allocation of limited resources in sequential decision-making settings. Typical RMABs assume the budget --- the number of arms pulled --- to be fixed for each step in the planning horizon. However, for realistic real-world planning, resources are not necessarily limited at each planning step; we may be able to distribute surplus resources in one round to an earlier or later round. In real-world planning settings, this flexibility in budget is often constrained to within a subset of consecutive planning steps, e.g., weekly planning of a monthly budget. In this paper we define a general class of RMABs with flexible budget, which we term F-RMABs, and provide an algorithm to optimally solve for them. We derive a min-max formulation to find optimal policies for F-RMABs and leverage gradient primal-dual algorithms to solve for reward-maximizing policies with flexible budgets. We introduce a scheme to sample expected gradients to apply primal-dual algorithms to the F-RMAB setting and make an otherwise computationally expensive approach tractable. Additionally, we provide heuristics that trade off solution quality for efficiency and present experimental comparisons of different F-RMAB solution approaches.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rodriguez Diaz, Paula and Killian, Jackson A. and Xu, Lily and Suggala, Arun Sai and Taneja, Aparna and Tambe, Milind}, year={2023}, month={Jun.}, pages={12103-12111} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26427/26199", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26427", + "pdf_size": 485658, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10936627889133236312&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "g.harvard.edu;g.harvard.edu;g.harvard.edu;google.com;google.com;harvard.edu", + "email": "g.harvard.edu;g.harvard.edu;g.harvard.edu;google.com;google.com;harvard.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0+1", + "aff_unique_norm": "Harvard University;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.harvard.edu;https://research.google", + "aff_unique_abbr": "Harvard;Google Research", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25358", + "title": "Flora: Dual-Frequency LOss-Compensated ReAl-Time Monocular 3D Video Reconstruction", + "track": "main", + "status": "Technical", + "abstract": "In this work, we propose a real-time monocular 3D video reconstruction approach named Flora for reconstructing delicate and complete 3D scenes from RGB video sequences in an end-to-end manner. Specifically, we introduce a novel method with two main contributions. Firstly, the proposed feature aggregation module retains both color and reliability in a dual-frequency form. Secondly, the loss compensation module solves missing structure by correcting losses for falsely pruned voxels. The dual-frequency feature aggregation module enhances reconstruction quality in both precision and recall, and the loss compensation module benefits the recall. Notably, both proposed contributions achieve great results with negligible inferencing overhead. Our state-of-the-art experimental results on real-world datasets demonstrate Flora's leading performance in both effectiveness and efficiency. The code is available at https://github.com/NoOneUST/Flora.", + "primary_area": "computer vision ii", + "author": "Likang Wang; Yue Gong; Qirui Wang; Kaixuan Zhou; Lei Chen", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology; Distributed and Parallel Software Lab, Huawei Technologies; Distributed and Parallel Software Lab, Huawei Technologies; Riemann Lab, Huawei Technologies + Fundamental Software Innovation Lab, Huawei Technologies; Department of Computer Science and Engineering, The Hong Kong University of Science and Technology + Data Science and Analytics Thrust, The Hong Kong University of Science and Technology (Guangzhou)", + "bibtex": "@article{Wang_Gong_Wang_Zhou_Chen_2023, title={Flora: Dual-Frequency LOss-Compensated ReAl-Time Monocular 3D Video Reconstruction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25358}, DOI={10.1609/aaai.v37i2.25358}, abstractNote={In this work, we propose a real-time monocular 3D video reconstruction approach named Flora for reconstructing delicate and complete 3D scenes from RGB video sequences in an end-to-end manner. Specifically, we introduce a novel method with two main contributions. Firstly, the proposed feature aggregation module retains both color and reliability in a dual-frequency form. Secondly, the loss compensation module solves missing structure by correcting losses for falsely pruned voxels. The dual-frequency feature aggregation module enhances reconstruction quality in both precision and recall, and the loss compensation module benefits the recall. Notably, both proposed contributions achieve great results with negligible inferencing overhead. Our state-of-the-art experimental results on real-world datasets demonstrate Flora\u2019s leading performance in both effectiveness and efficiency. The code is available at https://github.com/NoOneUST/Flora.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Likang and Gong, Yue and Wang, Qirui and Zhou, Kaixuan and Chen, Lei}, year={2023}, month={Jun.}, pages={2599-2607} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25358/25130", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25358", + "pdf_size": 3453271, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8072898030923733821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "connect.ust.hk;huawei.com;huawei.com;huawei.com;cse.ust.hk", + "email": "connect.ust.hk;huawei.com;huawei.com;huawei.com;cse.ust.hk", + "github": "https://github.com/NoOneUST/Flora", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1+1;0+0", + "aff_unique_norm": "The Hong Kong University of Science and Technology;Huawei Technologies", + "aff_unique_dep": "Department of Computer Science and Engineering;Distributed and Parallel Software Lab", + "aff_unique_url": "https://www.ust.hk;https://www.huawei.com", + "aff_unique_abbr": "HKUST;Huawei", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26286", + "title": "Flow to Control: Offline Reinforcement Learning with Lossless Primitive Discovery", + "track": "main", + "status": "Technical", + "abstract": "Offline reinforcement learning (RL) enables the agent to effectively learn from logged data, which significantly extends the applicability of RL algorithms in real-world scenarios where exploration can be expensive or unsafe. Previous works have shown that extracting primitive skills from the recurring and temporally extended structures in the logged data yields better learning. However, these methods suffer greatly when the primitives have limited representation ability to recover the original policy space, especially in offline settings. In this paper, we give a quantitative characterization of the performance of offline hierarchical learning and highlight the importance of learning lossless primitives. To this end, we propose to use a flow-based structure as the representation for low-level policies. This allows us to represent the behaviors in the dataset faithfully while keeping the expression ability to recover the whole policy space. We show that such lossless primitives can drastically improve the performance of hierarchical policies. The experimental results and extensive ablation studies on the standard D4RL benchmark show that our method has a good representation ability for policies and achieves superior performance in most tasks.", + "primary_area": "machine learning iv", + "author": "Yiqin Yang; Hao Hu; Wenzhe Li; Siyuan Li; Jun Yang; Qianchuan Zhao; Chongjie Zhang", + "authorids": "", + "aff": "Department of Automation, Tsinghua University; Institute for Interdisciplinary Information Sciences, Tsinghua University; Institute for Interdisciplinary Information Sciences, Tsinghua University; Harbin Institute of Technology; Department of Automation, Tsinghua University; Department of Automation, Tsinghua University; Institute for Interdisciplinary Information Sciences, Tsinghua University", + "bibtex": "@article{Yang_Hu_Li_Li_Yang_Zhao_Zhang_2023, title={Flow to Control: Offline Reinforcement Learning with Lossless Primitive Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26286}, DOI={10.1609/aaai.v37i9.26286}, abstractNote={Offline reinforcement learning (RL) enables the agent to effectively learn from logged data, which significantly extends the applicability of RL algorithms in real-world scenarios where exploration can be expensive or unsafe. Previous works have shown that extracting primitive skills from the recurring and temporally extended structures in the logged data yields better learning. However, these methods suffer greatly when the primitives have limited representation ability to recover the original policy space, especially in offline settings. In this paper, we give a quantitative characterization of the performance of offline hierarchical learning and highlight the importance of learning lossless primitives. To this end, we propose to use a flow-based structure as the representation for low-level policies. This allows us to represent the behaviors in the dataset faithfully while keeping the expression ability to recover the whole policy space. We show that such lossless primitives can drastically improve the performance of hierarchical policies. The experimental results and extensive ablation studies on the standard D4RL benchmark show that our method has a good representation ability for policies and achieves superior performance in most tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Yiqin and Hu, Hao and Li, Wenzhe and Li, Siyuan and Yang, Jun and Zhao, Qianchuan and Zhang, Chongjie}, year={2023}, month={Jun.}, pages={10843-10851} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26286/26058", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26286", + "pdf_size": 633393, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17849154721048090206&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;0;0", + "aff_unique_norm": "Tsinghua University;Harbin Institute of Technology", + "aff_unique_dep": "Department of Automation;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "THU;HIT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25633", + "title": "Flow-Based Robust Watermarking with Invertible Noise Layer for Black-Box Distortions", + "track": "main", + "status": "Technical", + "abstract": "Deep learning-based digital watermarking frameworks have been widely studied recently. Most existing methods adopt an ``encoder-noise layer-decoder''-based architecture where the embedding and extraction processes are accomplished separately by the encoder and the decoder. However, one potential drawback of such a framework is that the encoder and the decoder may not be well coupled, resulting in the fact that the encoder may embed some redundant features into the host image thus influencing the invisibility and robustness of the whole algorithm. To address this limitation, this paper proposes a flow-based robust watermarking framework. The basic component of such framework is an invertible up-down-sampling neural block that can realize the embedding and extraction simultaneously. As a consequence, the encoded feature could keep high consistency with the feature that the decoder needed, which effectively avoids the embedding of redundant features. In addition, to ensure the robustness of black-box distortion, an invertible noise layer (INL) is designed to simulate the distortion and is served as a noise layer in the training stage. Benefiting from its reversibility, INL is also applied as a preprocessing before extraction to eliminate the distortion, which further improves the robustness of the algorithm. Extensive experiments demonstrate the superiority of the proposed framework in terms of visual quality and robustness. Compared with the state-of-the-art architecture, the visual quality (measured by PSNR) of the proposed framework improves by 2dB and the extraction accuracy after JPEG compression (QF=50) improves by more than 4%. Besides, the robustness against black-box distortions can be greatly achieved with more than 95% extraction accuracy.", + "primary_area": "domain s of application", + "author": "Han Fang; Yupeng Qiu; Kejiang Chen; Jiyi Zhang; Weiming Zhang; Ee-Chien Chang", + "authorids": "", + "aff": "National University of Singapore; National University of Singapore; University of Science and Technology of China; National University of Singapore; University of Science and Technology of China; National University of Singapore", + "bibtex": "@article{Fang_Qiu_Chen_Zhang_Zhang_Chang_2023, title={Flow-Based Robust Watermarking with Invertible Noise Layer for Black-Box Distortions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25633}, DOI={10.1609/aaai.v37i4.25633}, abstractNote={Deep learning-based digital watermarking frameworks have been widely studied recently. Most existing methods adopt an ``encoder-noise layer-decoder\u2019\u2019-based architecture where the embedding and extraction processes are accomplished separately by the encoder and the decoder. However, one potential drawback of such a framework is that the encoder and the decoder may not be well coupled, resulting in the fact that the encoder may embed some redundant features into the host image thus influencing the invisibility and robustness of the whole algorithm. To address this limitation, this paper proposes a flow-based robust watermarking framework. The basic component of such framework is an invertible up-down-sampling neural block that can realize the embedding and extraction simultaneously. As a consequence, the encoded feature could keep high consistency with the feature that the decoder needed, which effectively avoids the embedding of redundant features. In addition, to ensure the robustness of black-box distortion, an invertible noise layer (INL) is designed to simulate the distortion and is served as a noise layer in the training stage. Benefiting from its reversibility, INL is also applied as a preprocessing before extraction to eliminate the distortion, which further improves the robustness of the algorithm. Extensive experiments demonstrate the superiority of the proposed framework in terms of visual quality and robustness. Compared with the state-of-the-art architecture, the visual quality (measured by PSNR) of the proposed framework improves by 2dB and the extraction accuracy after JPEG compression (QF=50) improves by more than 4%. Besides, the robustness against black-box distortions can be greatly achieved with more than 95% extraction accuracy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Han and Qiu, Yupeng and Chen, Kejiang and Zhang, Jiyi and Zhang, Weiming and Chang, Ee-Chien}, year={2023}, month={Jun.}, pages={5054-5061} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25633/25405", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25633", + "pdf_size": 18999152, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11379041603925557966&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "nus.edu.sg;u.nus.edu;ustc.edu.cn;u.nus.edu;ustc.edu.cn;comp.nus.edu.sg", + "email": "nus.edu.sg;u.nus.edu;ustc.edu.cn;u.nus.edu;ustc.edu.cn;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;1;0", + "aff_unique_norm": "National University of Singapore;University of Science and Technology of China", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nus.edu.sg;http://www.ustc.edu.cn", + "aff_unique_abbr": "NUS;USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25444", + "title": "FlowFace: Semantic Flow-Guided Shape-Aware Face Swapping", + "track": "main", + "status": "Technical", + "abstract": "In this work, we propose a semantic flow-guided two-stage framework for shape-aware face swapping, namely FlowFace. Unlike most previous methods that focus on transferring the source inner facial features but neglect facial contours, our FlowFace can transfer both of them to a target face, thus leading to more realistic face swapping. Concretely, our FlowFace consists of a face reshaping network and a face swapping network. The face reshaping network addresses the shape outline differences between the source and target faces. It first estimates a semantic flow (i.e. face shape differences) between the source and the target face, and then explicitly warps the target face shape with the estimated semantic flow. After reshaping, the face swapping network generates inner facial features that exhibit the identity of the source face. We employ a pre-trained face masked autoencoder (MAE) to extract facial features from both the source face and the target face. In contrast to previous methods that use identity embedding to preserve identity information, the features extracted by our encoder can better capture facial appearances and identity information. Then, we develop a cross-attention fusion module to adaptively fuse inner facial features from the source face with the target facial attributes, thus leading to better identity preservation. Extensive quantitative and qualitative experiments on in-the-wild faces demonstrate that our FlowFace outperforms the state-of-the-art significantly.", + "primary_area": "computer vision iii", + "author": "Hao Zeng; Wei Zhang; Changjie Fan; Tangjie Lv; Suzhen Wang; Zhimeng Zhang; Bowen Ma; Lincheng Li; Yu Ding; Xin Yu", + "authorids": "", + "aff": "Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University; University of Technology Sydney", + "bibtex": "@article{Zeng_Zhang_Fan_Lv_Wang_Zhang_Ma_Li_Ding_Yu_2023, title={FlowFace: Semantic Flow-Guided Shape-Aware Face Swapping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25444}, DOI={10.1609/aaai.v37i3.25444}, abstractNote={In this work, we propose a semantic flow-guided two-stage framework for shape-aware face swapping, namely FlowFace. Unlike most previous methods that focus on transferring the source inner facial features but neglect facial contours, our FlowFace can transfer both of them to a target face, thus leading to more realistic face swapping. Concretely, our FlowFace consists of a face reshaping network and a face swapping network. The face reshaping network addresses the shape outline differences between the source and target faces. It first estimates a semantic flow (i.e. face shape differences) between the source and the target face, and then explicitly warps the target face shape with the estimated semantic flow. After reshaping, the face swapping network generates inner facial features that exhibit the identity of the source face. We employ a pre-trained face masked autoencoder (MAE) to extract facial features from both the source face and the target face. In contrast to previous methods that use identity embedding to preserve identity information, the features extracted by our encoder can better capture facial appearances and identity information. Then, we develop a cross-attention fusion module to adaptively fuse inner facial features from the source face with the target facial attributes, thus leading to better identity preservation. Extensive quantitative and qualitative experiments on in-the-wild faces demonstrate that our FlowFace outperforms the state-of-the-art significantly.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Hao and Zhang, Wei and Fan, Changjie and Lv, Tangjie and Wang, Suzhen and Zhang, Zhimeng and Ma, Bowen and Li, Lincheng and Ding, Yu and Yu, Xin}, year={2023}, month={Jun.}, pages={3367-3375} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25444/25216", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25444", + "pdf_size": 1578661, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1174086680210285798&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;uts.edu.au", + "email": "gmail.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;uts.edu.au", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;0;0+1;2", + "aff_unique_norm": "Netease Fuxi AI Lab;Zhejiang University;University of Technology Sydney", + "aff_unique_dep": "Virtual Human Group;;", + "aff_unique_url": "https://www.netease.com;https://www.zju.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "Netease;ZJU;UTS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25303", + "title": "FoPro: Few-Shot Guided Robust Webly-Supervised Prototypical Learning", + "track": "main", + "status": "Technical", + "abstract": "Recently, webly supervised learning (WSL) has been studied to leverage numerous and accessible data from the Internet. Most existing methods focus on learning noise-robust models from web images while neglecting the performance drop caused by the differences between web domain and real-world domain. However, only by tackling the performance gap above can we fully exploit the practical value of web datasets. To this end, we propose a Few-shot guided Prototypical (FoPro) representation learning method, which only needs a few labeled examples from reality and can significantly improve the performance in the real-world domain. Specifically, we initialize each class center with few-shot real-world data as the ``realistic\" prototype. Then, the intra-class distance between web instances and ``realistic\" prototypes is narrowed by contrastive learning. Finally, we measure image-prototype distance with a learnable metric. Prototypes are polished by adjacent high-quality web images and involved in removing distant out-of-distribution samples. In experiments, FoPro is trained on web datasets with a few real-world examples guided and evaluated on real-world datasets. Our method achieves the state-of-the-art performance on three fine-grained datasets and two large-scale datasets. Compared with existing WSL methods under the same few-shot settings, FoPro still excels in real-world generalization. Code is available at https://github.com/yuleiqin/fopro.", + "primary_area": "computer vision ii", + "author": "Yulei Qin; Xingyu Chen; Chao Chen; Yunhang Shen; Bo Ren; Yun Gu; Jie Yang; Chunhua Shen", + "authorids": "", + "aff": "Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Zhejiang University", + "bibtex": "@article{Qin_Chen_Chen_Shen_Ren_Gu_Yang_Shen_2023, title={FoPro: Few-Shot Guided Robust Webly-Supervised Prototypical Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25303}, DOI={10.1609/aaai.v37i2.25303}, abstractNote={Recently, webly supervised learning (WSL) has been studied to leverage numerous and accessible data from the Internet. Most existing methods focus on learning noise-robust models from web images while neglecting the performance drop caused by the differences between web domain and real-world domain. However, only by tackling the performance gap above can we fully exploit the practical value of web datasets. To this end, we propose a Few-shot guided Prototypical (FoPro) representation learning method, which only needs a few labeled examples from reality and can significantly improve the performance in the real-world domain. Specifically, we initialize each class center with few-shot real-world data as the ``realistic" prototype. Then, the intra-class distance between web instances and ``realistic" prototypes is narrowed by contrastive learning. Finally, we measure image-prototype distance with a learnable metric. Prototypes are polished by adjacent high-quality web images and involved in removing distant out-of-distribution samples. In experiments, FoPro is trained on web datasets with a few real-world examples guided and evaluated on real-world datasets. Our method achieves the state-of-the-art performance on three fine-grained datasets and two large-scale datasets. Compared with existing WSL methods under the same few-shot settings, FoPro still excels in real-world generalization. Code is available at https://github.com/yuleiqin/fopro.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Yulei and Chen, Xingyu and Chen, Chao and Shen, Yunhang and Ren, Bo and Gu, Yun and Yang, Jie and Shen, Chunhua}, year={2023}, month={Jun.}, pages={2101-2109} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25303/25075", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25303", + "pdf_size": 4749649, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17318007469495939032&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;ieee.org;sjtu.edu.cn;zju.edu.cn", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;ieee.org;sjtu.edu.cn;zju.edu.cn", + "github": "https://github.com/yuleiqin/fopro", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1;1;2", + "aff_unique_norm": "Tencent;Shanghai Jiao Tong University;Zhejiang University", + "aff_unique_dep": "YouTu Lab;;", + "aff_unique_url": "https://www.tencent.com;https://www.sjtu.edu.cn;https://www.zju.edu.cn", + "aff_unique_abbr": "Tencent;SJTU;ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26808", + "title": "Food Information Engineering: A Systematic Literature Review", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "In recent years, the research on food information gave rise to the food information engineering domain. The goal of this paper is to provide to the research community with a systematic literature review of methodologies, methods and tools used in this domain.", + "primary_area": "", + "author": "Azanzi Jiomekong", + "authorids": "", + "aff": "Department of Computer Science, University of Yaounde I", + "bibtex": "@article{Jiomekong_2024, title={Food Information Engineering: A Systematic Literature Review}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26808}, DOI={10.1609/aaai.v37i13.26808}, abstractNote={In recent years, the research on food information gave rise to the food information engineering domain. The goal of this paper is to provide to the research community with a systematic literature review of methodologies, methods and tools used in this domain.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiomekong, Azanzi}, year={2024}, month={Jul.}, pages={15441-15441} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26808/26580", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26808", + "pdf_size": 37665, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3611911700975576&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "facsciences-uy1.cm", + "email": "facsciences-uy1.cm", + "github": "", + "project": "https://orkg.org/author/R138055", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Yaounde I", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "http://www.u1.cm", + "aff_unique_abbr": "UY1", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Yaounde", + "aff_country_unique_index": "0", + "aff_country_unique": "Cameroon" + }, + { + "id": "article-26688", + "title": "For the Underrepresented in Gender Bias Research: Chinese Name Gender Prediction with Heterogeneous Graph Attention Network", + "track": "aaai special track", + "status": "Technical", + "abstract": "Achieving gender equality is an important pillar for humankind\u2019s sustainable future. Pioneering data-driven gender bias research is based on large-scale public records such as scientific papers, patents, and company registrations, covering female researchers, inventors and entrepreneurs, and so on. Since gender information is often missing in relevant datasets, studies rely on tools to infer genders from names. However, available open-sourced Chinese gender-guessing tools are not yet suitable for scientific purposes, which may be partially responsible for female Chinese being underrepresented in mainstream gender bias research and affect their universality. Specifically, these tools focus on character-level information while overlooking the fact that the combinations of Chinese characters in multi-character names, as well as the components and pronunciations of characters, convey important messages. As a first effort, we design a Chinese Heterogeneous Graph Attention (CHGAT) model to capture the heterogeneity in component relationships and incorporate the pronunciations of characters. Our model largely surpasses current tools and also outperforms the state-of-the-art algorithm. Last but not least, the most popular Chinese name-gender dataset is single-character based with far less female coverage from an unreliable source, naturally hindering relevant studies. We open-source a more balanced multi-character dataset from an official source together with our code, hoping to help future research promoting gender equality.", + "primary_area": "ai for social impact", + "author": "Zihao Pan; Kai Peng; Shuai Ling; Haipeng Zhang", + "authorids": "", + "aff": "ShanghaiTech University; ShanghaiTech University; ShanghaiTech University; ShanghaiTech University", + "bibtex": "@article{Pan_Peng_Ling_Zhang_2023, title={For the Underrepresented in Gender Bias Research: Chinese Name Gender Prediction with Heterogeneous Graph Attention Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26688}, DOI={10.1609/aaai.v37i12.26688}, abstractNote={Achieving gender equality is an important pillar for humankind\u2019s sustainable future. Pioneering data-driven gender bias research is based on large-scale public records such as scientific papers, patents, and company registrations, covering female researchers, inventors and entrepreneurs, and so on. Since gender information is often missing in relevant datasets, studies rely on tools to infer genders from names. However, available open-sourced Chinese gender-guessing tools are not yet suitable for scientific purposes, which may be partially responsible for female Chinese being underrepresented in mainstream gender bias research and affect their universality. Specifically, these tools focus on character-level information while overlooking the fact that the combinations of Chinese characters in multi-character names, as well as the components and pronunciations of characters, convey important messages. As a first effort, we design a Chinese Heterogeneous Graph Attention (CHGAT) model to capture the heterogeneity in component relationships and incorporate the pronunciations of characters. Our model largely surpasses current tools and also outperforms the state-of-the-art algorithm. Last but not least, the most popular Chinese name-gender dataset is single-character based with far less female coverage from an unreliable source, naturally hindering relevant studies. We open-source a more balanced multi-character dataset from an official source together with our code, hoping to help future research promoting gender equality.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Zihao and Peng, Kai and Ling, Shuai and Zhang, Haipeng}, year={2023}, month={Jun.}, pages={14436-14443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26688/26460", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26688", + "pdf_size": 893130, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15313925886967026699&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "github": "https://github.com/observerss/ngender", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "ShanghaiTech University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.shanghaitech.edu.cn", + "aff_unique_abbr": "ShanghaiTech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26154", + "title": "Forecasting with Sparse but Informative Variables: A Case Study in Predicting Blood Glucose", + "track": "main", + "status": "Technical", + "abstract": "In time-series forecasting, future target values may be affected by both intrinsic and extrinsic effects. When forecasting blood glucose, for example, intrinsic effects can be inferred from the history of the target signal alone (i.e. blood glucose), but accurately modeling the impact of extrinsic effects requires auxiliary signals, like the amount of carbohydrates ingested. Standard forecasting techniques often assume that extrinsic and intrinsic effects vary at similar rates. However, when auxiliary signals are generated at a much lower frequency than the target variable (e.g., blood glucose measurements are made every 5 minutes, while meals occur once every few hours), even well-known extrinsic effects (e.g., carbohydrates increase blood glucose) may prove difficult to learn. To better utilize these sparse but informative variables (SIVs), we introduce a novel encoder/decoder forecasting approach that accurately learns the per-timepoint effect of the SIV, by (i) isolating it from intrinsic effects and (ii) restricting its learned effect based on domain knowledge. On a simulated dataset pertaining to the task of blood glucose forecasting, when the SIV is accurately recorded our approach outperforms baseline approaches in terms of rMSE (13.07 [95% CI: 11.77,14.16] vs. 14.14 [12.69,15.27]). In the presence of a corrupted SIV, the proposed approach can still result in lower error compared to the baseline but the advantage is reduced as noise increases. By isolating their effects and incorporating domain knowledge, our approach makes it possible to better utilize SIVs in forecasting.", + "primary_area": "machine learning iii", + "author": "Harry Rubin-Falcone; Joyce Lee; Jenna Wiens", + "authorids": "", + "aff": "Division of Computer Science and Engineering, University of Michigan; Division of Pediatric Endocrinology, University of Michigan; Division of Computer Science and Engineering, University of Michigan", + "bibtex": "@article{Rubin-Falcone_Lee_Wiens_2023, title={Forecasting with Sparse but Informative Variables: A Case Study in Predicting Blood Glucose}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26154}, DOI={10.1609/aaai.v37i8.26154}, abstractNote={In time-series forecasting, future target values may be affected by both intrinsic and extrinsic effects. When forecasting blood glucose, for example, intrinsic effects can be inferred from the history of the target signal alone (i.e. blood glucose), but accurately modeling the impact of extrinsic effects requires auxiliary signals, like the amount of carbohydrates ingested. Standard forecasting techniques often assume that extrinsic and intrinsic effects vary at similar rates. However, when auxiliary signals are generated at a much lower frequency than the target variable (e.g., blood glucose measurements are made every 5 minutes, while meals occur once every few hours), even well-known extrinsic effects (e.g., carbohydrates increase blood glucose) may prove difficult to learn. To better utilize these sparse but informative variables (SIVs), we introduce a novel encoder/decoder forecasting approach that accurately learns the per-timepoint effect of the SIV, by (i) isolating it from intrinsic effects and (ii) restricting its learned effect based on domain knowledge. On a simulated dataset pertaining to the task of blood glucose forecasting, when the SIV is accurately recorded our approach outperforms baseline approaches in terms of rMSE (13.07 [95% CI: 11.77,14.16] vs. 14.14 [12.69,15.27]). In the presence of a corrupted SIV, the proposed approach can still result in lower error compared to the baseline but the advantage is reduced as noise increases. By isolating their effects and incorporating domain knowledge, our approach makes it possible to better utilize SIVs in forecasting.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rubin-Falcone, Harry and Lee, Joyce and Wiens, Jenna}, year={2023}, month={Jun.}, pages={9650-9657} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26154/25926", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26154", + "pdf_size": 1639034, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17968722147717431033&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "; ; ", + "email": "; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "Division of Computer Science and Engineering", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Ann Arbor;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26320", + "title": "Foresee What You Will Learn: Data Augmentation for Domain Generalization in Non-stationary Environment", + "track": "main", + "status": "Technical", + "abstract": "Existing domain generalization aims to learn a generalizable model to perform well even on unseen domains. For many real-world machine learning applications, the data distribution often shifts gradually along domain indices. For example, a self-driving car with a vision system drives from dawn to dusk, with the sky gradually darkening. Therefore, the system must be able to adapt to changes in ambient illuminations and continue to drive safely on the road. In this paper, we formulate such problems as Evolving Domain Generalization, where a model aims to generalize well on a target domain by discovering and leveraging the evolving pattern of the environment. We then propose Directional Domain Augmentation (DDA), which simulates the unseen target features by mapping source data as augmentations through a domain transformer. Specifically, we formulate DDA as a bi-level optimization problem and solve it through a novel meta-learning approach in the representation space. We evaluate the proposed method on both synthetic datasets and real-world datasets, and empirical results show that our approach can outperform other existing methods.", + "primary_area": "machine learning iv", + "author": "Qiuhao Zeng; Wei Wang; Fan Zhou; Charles Ling; Boyu Wang", + "authorids": "", + "aff": "University of Western Ontario, Canada; University of Western Ontario, Canada; Beihang University, China; University of Western Ontario, Canada; University of Western Ontario, Canada", + "bibtex": "@article{Zeng_Wang_Zhou_Ling_Wang_2023, title={Foresee What You Will Learn: Data Augmentation for Domain Generalization in Non-stationary Environment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26320}, DOI={10.1609/aaai.v37i9.26320}, abstractNote={Existing domain generalization aims to learn a generalizable model to perform well even on unseen domains. For many real-world machine learning applications, the data distribution often shifts gradually along domain indices. For example, a self-driving car with a vision system drives from dawn to dusk, with the sky gradually darkening. Therefore, the system must be able to adapt to changes in ambient illuminations and continue to drive safely on the road. In this paper, we formulate such problems as Evolving Domain Generalization, where a model aims to generalize well on a target domain by discovering and leveraging the evolving pattern of the environment. We then propose Directional Domain Augmentation (DDA), which simulates the unseen target features by mapping source data as augmentations through a domain transformer. Specifically, we formulate DDA as a bi-level optimization problem and solve it through a novel meta-learning approach in the representation space. We evaluate the proposed method on both synthetic datasets and real-world datasets, and empirical results show that our approach can outperform other existing methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Qiuhao and Wang, Wei and Zhou, Fan and Ling, Charles and Wang, Boyu}, year={2023}, month={Jun.}, pages={11147-11155} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26320/26092", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26320", + "pdf_size": 982401, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "University of Western Ontario;Beihang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uwo.ca;http://www.buaa.edu.cn", + "aff_unique_abbr": "UWO;BUAA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "Canada;China" + }, + { + "id": "article-26373", + "title": "Formal Verification of Bayesian Mechanisms", + "track": "main", + "status": "Technical", + "abstract": "In this paper, for the first time, we study the formal verification of Bayesian mechanisms through strategic reasoning. We rely on the framework of Probabilistic Strategy Logic (PSL), which is well-suited for representing and verifying multi-agent systems with incomplete information. We take advantage of the recent results on the decidability of PSL model checking under memoryless strategies, and reduce the problem of formally verifying Bayesian mechanisms to PSL model checking. We show how to encode Bayesian-Nash equilibrium and economical properties, and illustrate our approach with different kinds of mechanisms.", + "primary_area": "multiagent systems", + "author": "Munyque Mittelmann; Bastien Maubert; Aniello Murano; Laurent Perrussel", + "authorids": "", + "aff": "Universit `a degli Studi di Napoli \u201cFederico II\u201d, Italy; Universit `a degli Studi di Napoli \u201cFederico II\u201d, Italy; Universit `a degli Studi di Napoli \u201cFederico II\u201d, Italy; IRIT - Universit \u00b4e Toulouse Capitole, France", + "bibtex": "@article{Mittelmann_Maubert_Murano_Perrussel_2023, title={Formal Verification of Bayesian Mechanisms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26373}, DOI={10.1609/aaai.v37i10.26373}, abstractNote={In this paper, for the first time, we study the formal verification of Bayesian mechanisms through strategic reasoning. We rely on the framework of Probabilistic Strategy Logic (PSL), which is well-suited for representing and verifying multi-agent systems with incomplete information. We take advantage of the recent results on the decidability of PSL model checking under memoryless strategies, and reduce the problem of formally verifying Bayesian mechanisms to PSL model checking. We show how to encode Bayesian-Nash equilibrium and economical properties, and illustrate our approach with different kinds of mechanisms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mittelmann, Munyque and Maubert, Bastien and Murano, Aniello and Perrussel, Laurent}, year={2023}, month={Jun.}, pages={11621-11629} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26373/26145", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26373", + "pdf_size": 183025, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2077137182523836063&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ut-capitole.fr;gmail.com;gmail.com;ut-capitole.fr", + "email": "ut-capitole.fr;gmail.com;gmail.com;ut-capitole.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Universit\u00e0 degli Studi di Napoli \u201cFederico II\u201d;Universit\u00e9 Toulouse Capitole", + "aff_unique_dep": ";IRIT", + "aff_unique_url": "https://www.unina.it;https://www.univ-toulouse.fr", + "aff_unique_abbr": "UNINA;UT1", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Toulouse", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "Italy;France" + }, + { + "id": "article-26740", + "title": "Formalising the Robustness of Counterfactual Explanations for Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "The use of counterfactual explanations (CFXs) is an increasingly popular explanation strategy for machine learning models. However, recent studies have shown that these explanations may not be robust to changes in the underlying model (e.g., following retraining), which raises questions about their reliability in real-world applications. Existing attempts towards solving this problem are heuristic, and the robustness to model changes of the resulting CFXs is evaluated with only a small number of retrained models, failing to provide exhaustive guarantees. To remedy this, we propose \u2206-robustness, the first notion to formally and deterministically assess the robustness (to model changes) of CFXs for neural networks. We introduce an abstraction framework based on interval neural networks \nto verify the \u2206-robustness of CFXs against a possibly infinite set of changes to the model parameters, i.e., weights and biases. We then demonstrate the utility of this approach in two distinct ways. First, we analyse the \u2206-robustness of a number of CFX generation methods from the literature and show that they unanimously host significant deficiencies in this regard. Second, we demonstrate how embedding \u2206-robustness within existing methods can provide CFXs which are provably robust.", + "primary_area": "safe and robust ai", + "author": "Junqi Jiang; Francesco Leofante; Antonio Rago; Francesca Toni", + "authorids": "", + "aff": "Department of Computing, Imperial College London, UK; Department of Computing, Imperial College London, UK; Department of Computing, Imperial College London, UK; Department of Computing, Imperial College London, UK", + "bibtex": "@article{Jiang_Leofante_Rago_Toni_2023, title={Formalising the Robustness of Counterfactual Explanations for Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26740}, DOI={10.1609/aaai.v37i12.26740}, abstractNote={The use of counterfactual explanations (CFXs) is an increasingly popular explanation strategy for machine learning models. However, recent studies have shown that these explanations may not be robust to changes in the underlying model (e.g., following retraining), which raises questions about their reliability in real-world applications. Existing attempts towards solving this problem are heuristic, and the robustness to model changes of the resulting CFXs is evaluated with only a small number of retrained models, failing to provide exhaustive guarantees. To remedy this, we propose \u2206-robustness, the first notion to formally and deterministically assess the robustness (to model changes) of CFXs for neural networks. We introduce an abstraction framework based on interval neural networks to verify the \u2206-robustness of CFXs against a possibly infinite set of changes to the model parameters, i.e., weights and biases. We then demonstrate the utility of this approach in two distinct ways. First, we analyse the \u2206-robustness of a number of CFX generation methods from the literature and show that they unanimously host significant deficiencies in this regard. Second, we demonstrate how embedding \u2206-robustness within existing methods can provide CFXs which are provably robust.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Junqi and Leofante, Francesco and Rago, Antonio and Toni, Francesca}, year={2023}, month={Jun.}, pages={14901-14909} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26740/26512", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26740", + "pdf_size": 1804202, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16277942782103352481&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "imperial.ac.uk;imperial.ac.uk;imperial.ac.uk;imperial.ac.uk", + "email": "imperial.ac.uk;imperial.ac.uk;imperial.ac.uk;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Imperial College London", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.imperial.ac.uk", + "aff_unique_abbr": "Imperial", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26714", + "title": "Formally Verified SAT-Based AI Planning", + "track": "aaai special track", + "status": "Technical", + "abstract": "We present an executable formally verified SAT encoding of ground classical AI planning problems. We use the theorem prover Isabelle/HOL to perform the verification. We experimentally test the verified encoding and show that it can be used for reasonably sized standard planning benchmarks. We also use it as a reference to test a state-of-the-art SAT-based\nplanner, showing that it sometimes falsely claims that problems have no solutions of certain lengths.", + "primary_area": "safe and robust ai", + "author": "Mohammad Abdulaziz; Friedrich Kurz", + "authorids": "", + "aff": "King\u2019s College London + Techniche Universit \u00a8at M \u00a8unchen; Techniche Universit \u00a8at M \u00a8unchen", + "bibtex": "@article{Abdulaziz_Kurz_2023, title={Formally Verified SAT-Based AI Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26714}, DOI={10.1609/aaai.v37i12.26714}, abstractNote={We present an executable formally verified SAT encoding of ground classical AI planning problems. We use the theorem prover Isabelle/HOL to perform the verification. We experimentally test the verified encoding and show that it can be used for reasonably sized standard planning benchmarks. We also use it as a reference to test a state-of-the-art SAT-based\nplanner, showing that it sometimes falsely claims that problems have no solutions of certain lengths.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abdulaziz, Mohammad and Kurz, Friedrich}, year={2023}, month={Jun.}, pages={14665-14673} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26714/26486", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26714", + "pdf_size": 154212, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8654376895953815821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "King's College London;Technische Universit\u00e4t M\u00fcnchen", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kcl.ac.uk;https://www.tum.de", + "aff_unique_abbr": "KCL;TUM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "United Kingdom;Germany" + }, + { + "id": "article-26759", + "title": "Formally Verified Solution Methods for Markov Decision Processes", + "track": "aaai special track", + "status": "Technical", + "abstract": "We formally verify executable algorithms for solving Markov decision processes (MDPs) in the interactive theorem prover Isabelle/HOL. We build on existing formalizations of probability theory to analyze the expected total reward criterion on finite and infinite-horizon problems. Our developments formalize the Bellman equation and give conditions under which optimal policies exist. Based on this analysis, we verify dynamic programming algorithms to solve tabular MDPs. We evaluate the formally verified implementations experimentally on standard problems, compare them with state-of-the-art systems, and show that they are practical.", + "primary_area": "safe and robust ai", + "author": "Maximilian Sch\u00e4ffeler; Mohammad Abdulaziz", + "authorids": "", + "aff": "Technische Universit\u00e4t M\u00fcnchen, Germany; Technische Universit\u00e4t M\u00fcnchen, Germany + King's College London, United Kingdom", + "bibtex": "@article{Sch\u00e4ffeler_Abdulaziz_2023, title={Formally Verified Solution Methods for Markov Decision Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26759}, DOI={10.1609/aaai.v37i12.26759}, abstractNote={We formally verify executable algorithms for solving Markov decision processes (MDPs) in the interactive theorem prover Isabelle/HOL. We build on existing formalizations of probability theory to analyze the expected total reward criterion on finite and infinite-horizon problems. Our developments formalize the Bellman equation and give conditions under which optimal policies exist. Based on this analysis, we verify dynamic programming algorithms to solve tabular MDPs. We evaluate the formally verified implementations experimentally on standard problems, compare them with state-of-the-art systems, and show that they are practical.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sch\u00e4ffeler, Maximilian and Abdulaziz, Mohammad}, year={2023}, month={Jun.}, pages={15073-15081} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26759/26531", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26759", + "pdf_size": 135782, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8604170005983330542&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "tum.de;kcl.ac.uk", + "email": "tum.de;kcl.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Technische Universit\u00e4t M\u00fcnchen;King's College London", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tum.de;https://www.kcl.ac.uk", + "aff_unique_abbr": "TUM;KCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "Germany;United Kingdom" + }, + { + "id": "article-26793", + "title": "Foundation Model for Material Science", + "track": "senior member presentation blue sky papers", + "status": "Technical", + "abstract": "Foundation models (FMs) are achieving remarkable successes to realize complex downstream tasks in domains including natural language and visions. In this paper, we propose building an FM for material science, which is trained with massive data across a wide variety of material domains and data modalities. Nowadays machine learning models play key roles in material discovery, particularly for property prediction and structure generation. However, those models have been independently developed to address only specific tasks without sharing more global knowledge. Development of an FM for material science will enable overarching modeling across material domains and data modalities by sharing their feature representations. We discuss fundamental challenges and required technologies to build an FM from the aspects of data preparation, model development, and downstream tasks.", + "primary_area": "", + "author": "Seiji Takeda; Akihiro Kishimoto; Lisa Hamada; Daiju Nakano; John R. Smith", + "authorids": "", + "aff": "IBM Research - Tokyo; IBM Research - Tokyo; IBM Research - Tokyo; IBM Research - Tokyo; IBM Thomas J. Watson Research Center", + "bibtex": "@article{Takeda_Kishimoto_Hamada_Nakano_Smith_2024, title={Foundation Model for Material Science}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26793}, DOI={10.1609/aaai.v37i13.26793}, abstractNote={Foundation models (FMs) are achieving remarkable successes to realize complex downstream tasks in domains including natural language and visions. In this paper, we propose building an FM for material science, which is trained with massive data across a wide variety of material domains and data modalities. Nowadays machine learning models play key roles in material discovery, particularly for property prediction and structure generation. However, those models have been independently developed to address only specific tasks without sharing more global knowledge. Development of an FM for material science will enable overarching modeling across material domains and data modalities by sharing their feature representations. We discuss fundamental challenges and required technologies to build an FM from the aspects of data preparation, model development, and downstream tasks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Takeda, Seiji and Kishimoto, Akihiro and Hamada, Lisa and Nakano, Daiju and Smith, John R.}, year={2024}, month={Jul.}, pages={15376-15383} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26793/26565", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26793", + "pdf_size": 406917, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11932418045752621919&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "jp.ibm.com;ibm.com;ibm.com;jp.ibm.com;us.ibm.com", + "email": "jp.ibm.com;ibm.com;ibm.com;jp.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "IBM Research;IBM", + "aff_unique_dep": "Research;Research", + "aff_unique_url": "https://www.ibm.com/research;https://www.ibm.com/research", + "aff_unique_abbr": "IBM;IBM", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Tokyo;Yorktown Heights", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "article-26791", + "title": "Foundations of Cooperative AI", + "track": "senior member presentation blue sky papers", + "status": "Technical", + "abstract": "AI systems can interact in unexpected ways, sometimes with disastrous consequences. As AI gets to control more of our world, these interactions will become more common and have higher stakes. As AI becomes more advanced, these interactions will become more sophisticated, and game theory will provide the tools for analyzing these interactions. However, AI agents are in some ways unlike the agents traditionally studied in game theory, introducing new challenges as well as opportunities. We propose a research agenda to develop the game theory of highly advanced AI agents, with a focus on achieving cooperation.", + "primary_area": "", + "author": "Vincent Conitzer; Caspar Oesterheld", + "authorids": "", + "aff": "Computer Science Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University", + "bibtex": "@article{Conitzer_Oesterheld_2024, title={Foundations of Cooperative AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26791}, DOI={10.1609/aaai.v37i13.26791}, abstractNote={AI systems can interact in unexpected ways, sometimes with disastrous consequences. As AI gets to control more of our world, these interactions will become more common and have higher stakes. As AI becomes more advanced, these interactions will become more sophisticated, and game theory will provide the tools for analyzing these interactions. However, AI agents are in some ways unlike the agents traditionally studied in game theory, introducing new challenges as well as opportunities. We propose a research agenda to develop the game theory of highly advanced AI agents, with a focus on achieving cooperation.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Conitzer, Vincent and Oesterheld, Caspar}, year={2024}, month={Jul.}, pages={15359-15367} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26791/26563", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26791", + "pdf_size": 125753, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9931669586396643695&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.cmu.edu;cmu.edu", + "email": "cs.cmu.edu;cmu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25182", + "title": "Fourier-Net: Fast Image Registration with Band-Limited Deformation", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised image registration commonly adopts U-Net style networks to predict dense displacement fields in the full-resolution spatial domain. For high-resolution volumetric image data, this process is however resource-intensive and time-consuming. To tackle this problem, we propose the Fourier-Net, replacing the expansive path in a U-Net style network with a parameter-free model-driven decoder. Specifically, instead of our Fourier-Net learning to output a full-resolution displacement field in the spatial domain, we learn its low-dimensional representation in a band-limited Fourier domain. This representation is then decoded by our devised model-driven decoder (consisting of a zero padding layer and an inverse discrete Fourier transform layer) to the dense, full-resolution displacement field in the spatial domain. These changes allow our unsupervised Fourier-Net to contain fewer parameters and computational operations, resulting in faster inference speeds. Fourier-Net is then evaluated on two public 3D brain datasets against various state-of-the-art approaches. For example, when compared to a recent transformer-based method, named TransMorph, our Fourier-Net, which only uses 2.2% of its parameters and 6.66% of the multiply-add operations, achieves a 0.5% higher Dice score and an 11.48 times faster inference speed. Code is available at https://github.com/xi-jia/Fourier-Net.", + "primary_area": "computer vision i", + "author": "Xi Jia; Joseph Bartlett; Wei Chen; Siyang Song; Tianyang Zhang; Xinxing Cheng; Wenqi Lu; Zhaowen Qiu; Jinming Duan", + "authorids": "", + "aff": "School of Computer Science, University of Birmingham, UK; School of Computer Science, University of Birmingham, UK+Department of Biomedical Engineering, University of Melbourne, Australia; School of Computer Science, University of Birmingham, UK; Department of Computer Science and Technology, University of Cambridge, UK; School of Computer Science, University of Birmingham, UK; School of Computer Science, University of Birmingham, UK; Department of Computer Science, University of Warwick, UK; Institute of Information Computer Engineering, Northeast Forestry University, China; School of Computer Science, University of Birmingham, UK+Alan Turing Institute, UK", + "bibtex": "@article{Jia_Bartlett_Chen_Song_Zhang_Cheng_Lu_Qiu_Duan_2023, title={Fourier-Net: Fast Image Registration with Band-Limited Deformation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25182}, DOI={10.1609/aaai.v37i1.25182}, abstractNote={Unsupervised image registration commonly adopts U-Net style networks to predict dense displacement fields in the full-resolution spatial domain. For high-resolution volumetric image data, this process is however resource-intensive and time-consuming. To tackle this problem, we propose the Fourier-Net, replacing the expansive path in a U-Net style network with a parameter-free model-driven decoder. Specifically, instead of our Fourier-Net learning to output a full-resolution displacement field in the spatial domain, we learn its low-dimensional representation in a band-limited Fourier domain. This representation is then decoded by our devised model-driven decoder (consisting of a zero padding layer and an inverse discrete Fourier transform layer) to the dense, full-resolution displacement field in the spatial domain. These changes allow our unsupervised Fourier-Net to contain fewer parameters and computational operations, resulting in faster inference speeds. Fourier-Net is then evaluated on two public 3D brain datasets against various state-of-the-art approaches. For example, when compared to a recent transformer-based method, named TransMorph, our Fourier-Net, which only uses 2.2% of its parameters and 6.66% of the multiply-add operations, achieves a 0.5% higher Dice score and an 11.48 times faster inference speed. Code is available at https://github.com/xi-jia/Fourier-Net.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Xi and Bartlett, Joseph and Chen, Wei and Song, Siyang and Zhang, Tianyang and Cheng, Xinxing and Lu, Wenqi and Qiu, Zhaowen and Duan, Jinming}, year={2023}, month={Jun.}, pages={1015-1023} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25182/24954", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25182", + "pdf_size": 1338707, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17311714941991110723&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bham.ac.uk;bham.ac.uk;bham.ac.uk;cl.cam.ac.uk;bham.ac.uk;bham.ac.uk;warwick.ac.uk;nefu.edu.cn;bham.ac.uk", + "email": "bham.ac.uk;bham.ac.uk;bham.ac.uk;cl.cam.ac.uk;bham.ac.uk;bham.ac.uk;warwick.ac.uk;nefu.edu.cn;bham.ac.uk", + "github": "https://github.com/xi-jia/Fourier-Net", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0+1;0;2;0;0;3;4;0+5", + "aff_unique_norm": "University of Birmingham;University of Melbourne;University of Cambridge;University of Warwick;Northeast Forestry University;Alan Turing Institute", + "aff_unique_dep": "School of Computer Science;Department of Biomedical Engineering;Department of Computer Science and Technology;Department of Computer Science;Institute of Information Computer Engineering;", + "aff_unique_url": "https://www.birmingham.ac.uk;https://www.unimelb.edu.au;https://www.cam.ac.uk;https://warwick.ac.uk;http://www.nefu.edu.cn;https://www.turing.ac.uk", + "aff_unique_abbr": "UoB;UniMelb;Cambridge;Warwick;;ATI", + "aff_campus_unique_index": "0;0;0;2;0;0;0", + "aff_campus_unique": "Birmingham;;Cambridge", + "aff_country_unique_index": "0;0+1;0;0;0;0;0;2;0+0", + "aff_country_unique": "United Kingdom;Australia;China" + }, + { + "id": "article-25439", + "title": "Frame-Level Label Refinement for Skeleton-Based Weakly-Supervised Action Recognition", + "track": "main", + "status": "Technical", + "abstract": "In recent years, skeleton-based action recognition has achieved remarkable performance in understanding human motion from sequences of skeleton data, which is an important medium for synthesizing realistic human movement in various applications. However, existing methods assume that each action clip is manually trimmed to contain one specific action, which requires a significant amount of effort for annotation. To solve this problem, we consider a novel problem of skeleton-based weakly-supervised temporal action localization (S-WTAL), where we need to recognize and localize human action segments in untrimmed skeleton videos given only the video-level labels. Although this task is challenging due to the sparsity of skeleton data and the lack of contextual clues from interaction with other objects and the environment, we present a frame-level label refinement framework based on a spatio-temporal graph convolutional network (ST-GCN) to overcome these difficulties. We use multiple instance learning (MIL) with video-level labels to generate the frame-level predictions. Inspired by advances in handling the noisy label problem, we introduce a label cleaning strategy of the frame-level pseudo labels to guide the learning process. The network parameters and the frame-level predictions are alternately updated to obtain the final results. We extensively evaluate the effectiveness of our learning approach on skeleton-based action recognition benchmarks. The state-of-the-art experimental results demonstrate that the proposed method can recognize and localize action segments of the skeleton data.", + "primary_area": "computer vision iii", + "author": "Qing Yu; Kent Fujiwara", + "authorids": "", + "aff": "The University of Tokyo, Japan; LINE Corporation, Japan", + "bibtex": "@article{Yu_Fujiwara_2023, title={Frame-Level Label Refinement for Skeleton-Based Weakly-Supervised Action Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25439}, DOI={10.1609/aaai.v37i3.25439}, abstractNote={In recent years, skeleton-based action recognition has achieved remarkable performance in understanding human motion from sequences of skeleton data, which is an important medium for synthesizing realistic human movement in various applications. However, existing methods assume that each action clip is manually trimmed to contain one specific action, which requires a significant amount of effort for annotation. To solve this problem, we consider a novel problem of skeleton-based weakly-supervised temporal action localization (S-WTAL), where we need to recognize and localize human action segments in untrimmed skeleton videos given only the video-level labels. Although this task is challenging due to the sparsity of skeleton data and the lack of contextual clues from interaction with other objects and the environment, we present a frame-level label refinement framework based on a spatio-temporal graph convolutional network (ST-GCN) to overcome these difficulties. We use multiple instance learning (MIL) with video-level labels to generate the frame-level predictions. Inspired by advances in handling the noisy label problem, we introduce a label cleaning strategy of the frame-level pseudo labels to guide the learning process. The network parameters and the frame-level predictions are alternately updated to obtain the final results. We extensively evaluate the effectiveness of our learning approach on skeleton-based action recognition benchmarks. The state-of-the-art experimental results demonstrate that the proposed method can recognize and localize action segments of the skeleton data.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Qing and Fujiwara, Kent}, year={2023}, month={Jun.}, pages={3322-3330} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25439/25211", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25439", + "pdf_size": 3230825, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2712229527761369637&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "hal.t.u-tokyo.ac.jp;linecorp.com", + "email": "hal.t.u-tokyo.ac.jp;linecorp.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "The University of Tokyo;LINE Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.linecorp.com", + "aff_unique_abbr": "UTokyo;LINE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-27005", + "title": "Fraud\u2019s Bargain Attacks to Textual Classifiers via Metropolis-Hasting Sampling (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent studies on adversarial examples expose vulnerabilities of natural language processing (NLP) models. Existing techniques for generating adversarial examples are typically driven by deterministic heuristic rules that are agnostic to the optimal adversarial examples, a strategy that often results in attack failures. To this end, this research proposes Fraud's Bargain Attack (FBA), which utilizes a novel randomization mechanism to enlarge the searching space and enables high-quality adversarial examples to be generated with high probabilities. FBA applies the Metropolis-Hasting algorithm to enhance the selection of adversarial examples from all candidates proposed by a customized Word Manipulation Process (WMP). WMP perturbs one word at a time via insertion, removal, or substitution in a contextual-aware manner. Extensive experiments demonstrate that FBA outperforms the baselines in terms of attack success rate and imperceptibility.", + "primary_area": "", + "author": "Mingze Ni; Zhensu Sun; Wei Liu", + "authorids": "", + "aff": "University of Technology Sydney; ShanghaiTech University; University of Technology Sydney", + "bibtex": "@article{Ni_Sun_Liu_2024, title={Fraud\u2019s Bargain Attacks to Textual Classifiers via Metropolis-Hasting Sampling (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27005}, DOI={10.1609/aaai.v37i13.27005}, abstractNote={Recent studies on adversarial examples expose vulnerabilities of natural language processing (NLP) models. Existing techniques for generating adversarial examples are typically driven by deterministic heuristic rules that are agnostic to the optimal adversarial examples, a strategy that often results in attack failures. To this end, this research proposes Fraud\u2019s Bargain Attack (FBA), which utilizes a novel randomization mechanism to enlarge the searching space and enables high-quality adversarial examples to be generated with high probabilities. FBA applies the Metropolis-Hasting algorithm to enhance the selection of adversarial examples from all candidates proposed by a customized Word Manipulation Process (WMP). WMP perturbs one word at a time via insertion, removal, or substitution in a contextual-aware manner. Extensive experiments demonstrate that FBA outperforms the baselines in terms of attack success rate and imperceptibility.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ni, Mingze and Sun, Zhensu and Liu, Wei}, year={2024}, month={Jul.}, pages={16290-16291} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27005/26777", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27005", + "pdf_size": 74523, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14379315591236561454&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "student.uts.edu.au;shanghaitech.edu.cn;uts.edu.au", + "email": "student.uts.edu.au;shanghaitech.edu.cn;uts.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Technology Sydney;ShanghaiTech University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uts.edu.au;https://www.shanghaitech.edu.cn", + "aff_unique_abbr": "UTS;ShanghaiTech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-25176", + "title": "FreeEnricher: Enriching Face Landmarks without Additional Cost", + "track": "main", + "status": "Technical", + "abstract": "Recent years have witnessed significant growth of face alignment. Though dense facial landmark is highly demanded in various scenarios, e.g., cosmetic medicine and facial beautification, most works only consider sparse face alignment. To address this problem, we present a framework that can enrich landmark density by existing sparse landmark datasets, e.g., 300W with 68 points and WFLW with 98 points. Firstly, we observe that the local patches along each semantic contour are highly similar in appearance. Then, we propose a weakly-supervised idea of learning the refinement ability on original sparse landmarks and adapting this ability to enriched dense landmarks. Meanwhile, several operators are devised and organized together to implement the idea. Finally, the trained model is applied as a plug-and-play module to the existing face alignment networks. To evaluate our method, we manually label the dense landmarks on 300W testset. Our method yields state-of-the-art accuracy not only in newly-constructed dense 300W testset but also in the original sparse 300W and WFLW testsets without additional cost.", + "primary_area": "computer vision i", + "author": "Yangyu Huang; Xi Chen; Jongyoo Kim; Hao Yang; Chong Li; Jiaolong Yang; Dong Chen", + "authorids": "", + "aff": "Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia", + "bibtex": "@article{Huang_Chen_Kim_Yang_Li_Yang_Chen_2023, title={FreeEnricher: Enriching Face Landmarks without Additional Cost}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25176}, DOI={10.1609/aaai.v37i1.25176}, abstractNote={Recent years have witnessed significant growth of face alignment. Though dense facial landmark is highly demanded in various scenarios, e.g., cosmetic medicine and facial beautification, most works only consider sparse face alignment. To address this problem, we present a framework that can enrich landmark density by existing sparse landmark datasets, e.g., 300W with 68 points and WFLW with 98 points. Firstly, we observe that the local patches along each semantic contour are highly similar in appearance. Then, we propose a weakly-supervised idea of learning the refinement ability on original sparse landmarks and adapting this ability to enriched dense landmarks. Meanwhile, several operators are devised and organized together to implement the idea. Finally, the trained model is applied as a plug-and-play module to the existing face alignment networks. To evaluate our method, we manually label the dense landmarks on 300W testset. Our method yields state-of-the-art accuracy not only in newly-constructed dense 300W testset but also in the original sparse 300W and WFLW testsets without additional cost.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Yangyu and Chen, Xi and Kim, Jongyoo and Yang, Hao and Li, Chong and Yang, Jiaolong and Chen, Dong}, year={2023}, month={Jun.}, pages={962-970} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25176/24948", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25176", + "pdf_size": 1223126, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15899415069526582264&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Microsoft Research", + "aff_unique_dep": "Research", + "aff_unique_url": "https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "MSR Asia", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25212", + "title": "Frequency Domain Disentanglement for Arbitrary Neural Style Transfer", + "track": "main", + "status": "Technical", + "abstract": "Arbitrary neural style transfer has been a popular research topic due to its rich application scenarios. Effective disentanglement of content and style is the critical factor for synthesizing an image with arbitrary style. The existing methods focus on disentangling feature representations of content and style in the spatial domain where the content and style components are innately entangled and difficult to be disentangled clearly. Therefore, these methods always suffer from low-quality results because of the sub-optimal disentanglement. To address such a challenge, this paper proposes the frequency mixer (FreMixer) module that disentangles and re-entangles the frequency spectrum of content and style components in the frequency domain. Since content and style components have different frequency-domain characteristics (frequency bands and frequency patterns), the FreMixer could well disentangle these two components. Based on the FreMixer module, we design a novel Frequency Domain Disentanglement (FDD) framework for arbitrary neural style transfer. Qualitative and quantitative experiments verify that the proposed method can render better stylized results compared to the state-of-the-art methods.", + "primary_area": "computer vision i", + "author": "Dongyang Li; Hao Luo; Pichao Wang; Zhibin Wang; Shang Liu; Fan Wang", + "authorids": "", + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Li_Luo_Wang_Wang_Liu_Wang_2023, title={Frequency Domain Disentanglement for Arbitrary Neural Style Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25212}, DOI={10.1609/aaai.v37i1.25212}, abstractNote={Arbitrary neural style transfer has been a popular research topic due to its rich application scenarios. Effective disentanglement of content and style is the critical factor for synthesizing an image with arbitrary style. The existing methods focus on disentangling feature representations of content and style in the spatial domain where the content and style components are innately entangled and difficult to be disentangled clearly. Therefore, these methods always suffer from low-quality results because of the sub-optimal disentanglement. To address such a challenge, this paper proposes the frequency mixer (FreMixer) module that disentangles and re-entangles the frequency spectrum of content and style components in the frequency domain. Since content and style components have different frequency-domain characteristics (frequency bands and frequency patterns), the FreMixer could well disentangle these two components. Based on the FreMixer module, we design a novel Frequency Domain Disentanglement (FDD) framework for arbitrary neural style transfer. Qualitative and quantitative experiments verify that the proposed method can render better stylized results compared to the state-of-the-art methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Dongyang and Luo, Hao and Wang, Pichao and Wang, Zhibin and Liu, Shang and Wang, Fan}, year={2023}, month={Jun.}, pages={1287-1295} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25212/24984", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25212", + "pdf_size": 5436005, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4818826708833739744&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25194", + "title": "Frequency Selective Augmentation for Video Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Recent self-supervised video representation learning methods focus on maximizing the similarity between multiple augmented views from the same video and largely rely on the quality of generated views. However, most existing methods lack a mechanism to prevent representation learning from bias towards static information in the video. In this paper, we propose frequency augmentation (FreqAug), a spatio-temporal data augmentation method in the frequency domain\nfor video representation learning. FreqAug stochastically removes specific frequency components from the video so that learned representation captures essential features more from the remaining information for various downstream tasks. Specifically, FreqAug pushes the model to focus more on dynamic features rather than static features in the video via dropping spatial or temporal low-frequency components. To verify the generality of the proposed method, we experiment with FreqAug on multiple self-supervised learning frameworks along with standard augmentations. Transferring the improved representation to five video action recognition and two temporal action localization downstream tasks shows consistent improvements over baselines.", + "primary_area": "computer vision i", + "author": "Jinhyung Kim; Taeoh Kim; Minho Shim; Dongyoon Han; Dongyoon Wee; Junmo Kim", + "authorids": "", + "aff": "LG AI Research; NA VER CLOV A Video; NA VER CLOV A Video; NA VER AI Lab; NA VER CLOV A Video; KAIST", + "bibtex": "@article{Kim_Kim_Shim_Han_Wee_Kim_2023, title={Frequency Selective Augmentation for Video Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25194}, DOI={10.1609/aaai.v37i1.25194}, abstractNote={Recent self-supervised video representation learning methods focus on maximizing the similarity between multiple augmented views from the same video and largely rely on the quality of generated views. However, most existing methods lack a mechanism to prevent representation learning from bias towards static information in the video. In this paper, we propose frequency augmentation (FreqAug), a spatio-temporal data augmentation method in the frequency domain\nfor video representation learning. FreqAug stochastically removes specific frequency components from the video so that learned representation captures essential features more from the remaining information for various downstream tasks. Specifically, FreqAug pushes the model to focus more on dynamic features rather than static features in the video via dropping spatial or temporal low-frequency components. To verify the generality of the proposed method, we experiment with FreqAug on multiple self-supervised learning frameworks along with standard augmentations. Transferring the improved representation to five video action recognition and two temporal action localization downstream tasks shows consistent improvements over baselines.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Jinhyung and Kim, Taeoh and Shim, Minho and Han, Dongyoon and Wee, Dongyoon and Kim, Junmo}, year={2023}, month={Jun.}, pages={1124-1132} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25194/24966", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25194", + "pdf_size": 2360333, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17113622489889304995&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;2;3", + "aff_unique_norm": "LG AI Research;;NAVER Corporation;Korea Advanced Institute of Science and Technology", + "aff_unique_dep": ";;AI Lab;", + "aff_unique_url": "https://www.lgaires.com;;https://www.naver.com;https://www.kaist.ac.kr", + "aff_unique_abbr": "LG AI;;NAVER;KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "article-25133", + "title": "Frido: Feature Pyramid Diffusion for Complex Scene Image Synthesis", + "track": "main", + "status": "Technical", + "abstract": "Diffusion models (DMs) have shown great potential for high-quality image synthesis. However, when it comes to producing images with complex scenes, how to properly describe both image global structures and object details remains a challenging task. In this paper, we present Frido, a Feature Pyramid Diffusion model performing a multi-scale coarse-to-fine denoising process for image synthesis. Our model decomposes an input image into scale-dependent vector quantized features, followed by a coarse-to-fine gating for producing image output. During the above multi-scale representation learning stage, additional input conditions like text, scene graph, or image layout can be further exploited. Thus, Frido can be also applied for conditional or cross-modality image synthesis. We conduct extensive experiments over various unconditioned and conditional image generation tasks, ranging from text-to-image synthesis, layout-to-image, scene-graph-to-image, to label-to-image. More specifically, we achieved state-of-the-art FID scores on five benchmarks, namely layout-to-image on COCO and OpenImages, scene-graph-to-image on COCO and Visual Genome, and label-to-image on COCO.", + "primary_area": "computer vision i", + "author": "Wan-Cyuan Fan; Yen-Chun Chen; DongDong Chen; Yu Cheng; Lu Yuan; Yu-Chiang Frank Wang", + "authorids": "", + "aff": "National Taiwan University*; Microsoft Corporation\u2020; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; National Taiwan University+NVIDIA", + "bibtex": "@article{Fan_Chen_Chen_Cheng_Yuan_Wang_2023, title={Frido: Feature Pyramid Diffusion for Complex Scene Image Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25133}, DOI={10.1609/aaai.v37i1.25133}, abstractNote={Diffusion models (DMs) have shown great potential for high-quality image synthesis. However, when it comes to producing images with complex scenes, how to properly describe both image global structures and object details remains a challenging task. In this paper, we present Frido, a Feature Pyramid Diffusion model performing a multi-scale coarse-to-fine denoising process for image synthesis. Our model decomposes an input image into scale-dependent vector quantized features, followed by a coarse-to-fine gating for producing image output. During the above multi-scale representation learning stage, additional input conditions like text, scene graph, or image layout can be further exploited. Thus, Frido can be also applied for conditional or cross-modality image synthesis. We conduct extensive experiments over various unconditioned and conditional image generation tasks, ranging from text-to-image synthesis, layout-to-image, scene-graph-to-image, to label-to-image. More specifically, we achieved state-of-the-art FID scores on five benchmarks, namely layout-to-image on COCO and OpenImages, scene-graph-to-image on COCO and Visual Genome, and label-to-image on COCO.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fan, Wan-Cyuan and Chen, Yen-Chun and Chen, DongDong and Cheng, Yu and Yuan, Lu and Wang, Yu-Chiang Frank}, year={2023}, month={Jun.}, pages={579-587} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25133/24905", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25133", + "pdf_size": 4340029, + "gs_citation": 90, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7003619032927580215&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ntu.edu.tw; ; ; ; ; ", + "email": "ntu.edu.tw; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0+2", + "aff_unique_norm": "National Taiwan University;Microsoft Corporation;NVIDIA Corporation", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ntu.edu.tw;https://www.microsoft.com;https://www.nvidia.com", + "aff_unique_abbr": "NTU;Microsoft;NVIDIA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;0+1", + "aff_country_unique": "Taiwan, China;United States" + }, + { + "id": "article-25254", + "title": "From Coarse to Fine: Hierarchical Pixel Integration for Lightweight Image Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Image super-resolution (SR) serves as a fundamental tool for the processing and transmission of multimedia data. Recently, Transformer-based models have achieved competitive performances in image SR. They divide images into fixed-size patches and apply self-attention on these patches to model long-range dependencies among pixels. However, this architecture design is originated for high-level vision tasks, which lacks design guideline from SR knowledge. In this paper, we aim to design a new attention block whose insights are from the interpretation of Local Attribution Map (LAM) for SR networks. Specifically, LAM presents a hierarchical importance map where the most important pixels are located in a fine area of a patch and some less important pixels are spread in a coarse area of the whole image. To access pixels in the coarse area, instead of using a very large patch size, we propose a lightweight Global Pixel Access (GPA) module that applies cross-attention with the most similar patch in an image. In the fine area, we use an Intra-Patch Self-Attention (IPSA) module to model long-range pixel dependencies in a local patch, and then a spatial convolution is applied to process the finest details. In addition, a Cascaded Patch Division (CPD) strategy is proposed to enhance perceptual quality of recovered images. Extensive experiments suggest that our method outperforms state-of-the-art lightweight SR methods by a large margin. Code is available at https://github.com/passerer/HPINet.", + "primary_area": "computer vision ii", + "author": "Jie Liu; Chao Chen; Jie Tang; Gangshan Wu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Liu_Chen_Tang_Wu_2023, title={From Coarse to Fine: Hierarchical Pixel Integration for Lightweight Image Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25254}, DOI={10.1609/aaai.v37i2.25254}, abstractNote={Image super-resolution (SR) serves as a fundamental tool for the processing and transmission of multimedia data. Recently, Transformer-based models have achieved competitive performances in image SR. They divide images into fixed-size patches and apply self-attention on these patches to model long-range dependencies among pixels. However, this architecture design is originated for high-level vision tasks, which lacks design guideline from SR knowledge. In this paper, we aim to design a new attention block whose insights are from the interpretation of Local Attribution Map (LAM) for SR networks. Specifically, LAM presents a hierarchical importance map where the most important pixels are located in a fine area of a patch and some less important pixels are spread in a coarse area of the whole image. To access pixels in the coarse area, instead of using a very large patch size, we propose a lightweight Global Pixel Access (GPA) module that applies cross-attention with the most similar patch in an image. In the fine area, we use an Intra-Patch Self-Attention (IPSA) module to model long-range pixel dependencies in a local patch, and then a spatial convolution is applied to process the finest details. In addition, a Cascaded Patch Division (CPD) strategy is proposed to enhance perceptual quality of recovered images. Extensive experiments suggest that our method outperforms state-of-the-art lightweight SR methods by a large margin. Code is available at https://github.com/passerer/HPINet.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jie and Chen, Chao and Tang, Jie and Wu, Gangshan}, year={2023}, month={Jun.}, pages={1666-1674} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25254/25026", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25254", + "pdf_size": 2741786, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13354065424045603912&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "https://github.com/passerer/HPINet", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25696", + "title": "From Monopoly to Competition: Optimal Contests Prevail", + "track": "main", + "status": "Technical", + "abstract": "We study competition among contests in a general model that allows for an arbitrary and heterogeneous space of contest design and symmetric contestants. The goal of the contest designers is to maximize the contestants' sum of efforts. Our main result shows that optimal contests in the monopolistic setting (i.e., those that maximize the sum of efforts in a model with a single contest) form an equilibrium in the model with competition among contests. Under a very natural assumption these contests are in fact dominant, and the equilibria that they form are unique. Moreover, equilibria with the optimal contests are Pareto-optimal even in cases where other equilibria emerge. In many natural cases, they also maximize the social welfare.", + "primary_area": "game theory and economic paradigms", + "author": "Xiaotie Deng; Yotam Gafni; Ron Lavi; Tao Lin; Hongyi Ling", + "authorids": "", + "aff": "Center on Frontiers of Computing Studies, Department of Computer Science, Peking University; Technion - Israel Institute of Technology; University of Bath, UK; School of Engineering and Applied Sciences, Harvard University; ETH Zurich", + "bibtex": "@article{Deng_Gafni_Lavi_Lin_Ling_2023, title={From Monopoly to Competition: Optimal Contests Prevail}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25696}, DOI={10.1609/aaai.v37i5.25696}, abstractNote={We study competition among contests in a general model that allows for an arbitrary and heterogeneous space of contest design and symmetric contestants. The goal of the contest designers is to maximize the contestants\u2019 sum of efforts. Our main result shows that optimal contests in the monopolistic setting (i.e., those that maximize the sum of efforts in a model with a single contest) form an equilibrium in the model with competition among contests. Under a very natural assumption these contests are in fact dominant, and the equilibria that they form are unique. Moreover, equilibria with the optimal contests are Pareto-optimal even in cases where other equilibria emerge. In many natural cases, they also maximize the social welfare.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Xiaotie and Gafni, Yotam and Lavi, Ron and Lin, Tao and Ling, Hongyi}, year={2023}, month={Jun.}, pages={5608-5615} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25696/25468", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25696", + "pdf_size": 169752, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3727006592070955182&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 14, + "aff_domain": "pku.edu.cn;campus.technion.ac.il;gmail.com;g.harvard.edu;pku.edu.cn", + "email": "pku.edu.cn;campus.technion.ac.il;gmail.com;g.harvard.edu;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Peking University;Technion - Israel Institute of Technology;University of Bath;Harvard University;ETH Zurich", + "aff_unique_dep": "Department of Computer Science;;;School of Engineering and Applied Sciences;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.technion.ac.il/en/;https://www.bath.ac.uk;https://www.harvard.edu;https://www.ethz.ch", + "aff_unique_abbr": "Peking U;Technion;Bath;Harvard;ETHZ", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1;2;3;4", + "aff_country_unique": "China;Israel;United Kingdom;United States;Switzerland" + }, + { + "id": "article-26462", + "title": "From Understanding the Population Dynamics of the NSGA-II to the First Proven Lower Bounds", + "track": "main", + "status": "Technical", + "abstract": "Due to the more complicated population dynamics of the NSGA-II, none of the existing runtime guarantees for this algorithm is accompanied by a non-trivial lower bound. Via a first mathematical understanding of the population dynamics of the NSGA-II, that is, by estimating the expected number of individuals having a certain objective value, we prove that the NSGA-II with suitable population size needs Omega(Nn log n) function evaluations to find the Pareto front of the OneMinMax problem and Omega(Nn^k) evaluations on the OneJumpZeroJump problem with jump size k. These bounds are asymptotically tight (that is, they match previously shown upper bounds) and show that the NSGA-II here does not even in terms of the parallel runtime (number of iterations) profit from larger population sizes. For the OneJumpZeroJump problem and when the same sorting is used for the computation of the crowding distance contributions of the two objectives, we even obtain a runtime estimate that is tight including the leading constant.", + "primary_area": "search and optimization", + "author": "Benjamin Doerr; Zhongdi Qu", + "authorids": "", + "aff": "Laboratoire d\u2019Informatique (LIX), Ecole Polytechnique, CNRS, Institut Polytechnique de Paris, Palaiseau, France; Laboratoire d\u2019Informatique (LIX), Ecole Polytechnique, CNRS, Institut Polytechnique de Paris, Palaiseau, France", + "bibtex": "@article{Doerr_Qu_2023, title={From Understanding the Population Dynamics of the NSGA-II to the First Proven Lower Bounds}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26462}, DOI={10.1609/aaai.v37i10.26462}, abstractNote={Due to the more complicated population dynamics of the NSGA-II, none of the existing runtime guarantees for this algorithm is accompanied by a non-trivial lower bound. Via a first mathematical understanding of the population dynamics of the NSGA-II, that is, by estimating the expected number of individuals having a certain objective value, we prove that the NSGA-II with suitable population size needs Omega(Nn log n) function evaluations to find the Pareto front of the OneMinMax problem and Omega(Nn^k) evaluations on the OneJumpZeroJump problem with jump size k. These bounds are asymptotically tight (that is, they match previously shown upper bounds) and show that the NSGA-II here does not even in terms of the parallel runtime (number of iterations) profit from larger population sizes. For the OneJumpZeroJump problem and when the same sorting is used for the computation of the crowding distance contributions of the two objectives, we even obtain a runtime estimate that is tight including the leading constant.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Doerr, Benjamin and Qu, Zhongdi}, year={2023}, month={Jun.}, pages={12408-12416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26462/26234", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26462", + "pdf_size": 242810, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4972173761144340489&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "lix.polytechnique.fr;gmail.com", + "email": "lix.polytechnique.fr;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ecole Polytechnique", + "aff_unique_dep": "Laboratoire d\u2019Informatique (LIX)", + "aff_unique_url": "https://www.polytechnique.edu", + "aff_unique_abbr": "Polytechnique", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Palaiseau", + "aff_country_unique_index": "0;0", + "aff_country_unique": "France" + }, + { + "id": "article-25775", + "title": "From Width-Based Model Checking to Width-Based Automated Theorem Proving", + "track": "main", + "status": "Technical", + "abstract": "In the field of parameterized complexity theory, the study of graph width measures has been intimately connected with the development of width-based model checking algorithms for combinatorial properties on graphs. In this work, we introduce a general framework to convert a large class of width-based model-checking algorithms into algorithms that can be used to test the validity of graph-theoretic conjectures on classes of graphs of bounded width. Our framework is modular and can be applied with respect to several well-studied width measures for graphs, including treewidth and cliquewidth.\n\nAs a quantitative application of our framework, we prove analytically that for several long-standing graph-theoretic conjectures, there exists an algorithm that takes a number k as input and correctly determines in time double-exponential in a polynomial of k whether the conjecture is valid on all graphs of treewidth at most k. These upper bounds, which may be regarded as upper-bounds on the size of proofs/disproofs for these conjectures on the class of graphs of treewidth at most k, improve significantly on theoretical upper bounds obtained using previously available techniques.", + "primary_area": "knowledge representation and reasoning", + "author": "Mateus de Oliveira Oliveira; Farhad Vadiee", + "authorids": "", + "aff": "Department of Computer and System Sciences, Stockholm University, Stockholm, Sweden + Department of Informatics, University of Bergen, Bergen, Norway; Department of Informatics, University of Bergen, Bergen, Norway", + "bibtex": "@article{de Oliveira Oliveira_Vadiee_2023, title={From Width-Based Model Checking to Width-Based Automated Theorem Proving}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25775}, DOI={10.1609/aaai.v37i5.25775}, abstractNote={In the field of parameterized complexity theory, the study of graph width measures has been intimately connected with the development of width-based model checking algorithms for combinatorial properties on graphs. In this work, we introduce a general framework to convert a large class of width-based model-checking algorithms into algorithms that can be used to test the validity of graph-theoretic conjectures on classes of graphs of bounded width. Our framework is modular and can be applied with respect to several well-studied width measures for graphs, including treewidth and cliquewidth. As a quantitative application of our framework, we prove analytically that for several long-standing graph-theoretic conjectures, there exists an algorithm that takes a number k as input and correctly determines in time double-exponential in a polynomial of k whether the conjecture is valid on all graphs of treewidth at most k. These upper bounds, which may be regarded as upper-bounds on the size of proofs/disproofs for these conjectures on the class of graphs of treewidth at most k, improve significantly on theoretical upper bounds obtained using previously available techniques.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={de Oliveira Oliveira, Mateus and Vadiee, Farhad}, year={2023}, month={Jun.}, pages={6297-6304} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25775/25547", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25775", + "pdf_size": 229668, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2592958254902308690&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "dsv.su.se;uib.no", + "email": "dsv.su.se;uib.no", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Stockholm University;University of Bergen", + "aff_unique_dep": "Department of Computer and System Sciences;Department of Informatics", + "aff_unique_url": "https://www.su.se;https://www.uib.no", + "aff_unique_abbr": "SU;uib", + "aff_campus_unique_index": "0+1;1", + "aff_campus_unique": "Stockholm;Bergen", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "Sweden;Norway" + }, + { + "id": "article-25750", + "title": "Frustratingly Easy Truth Discovery", + "track": "main", + "status": "Technical", + "abstract": "Truth discovery is a general name for a broad range of statistical methods aimed to extract the correct answers to questions, based on multiple answers coming from noisy sources. For example, workers in a crowdsourcing platform.\nIn this paper, we consider an extremely simple heuristic for estimating workers' competence using average proximity to other workers. We prove that this estimates well the actual competence level and enables separating high and low quality workers in a wide spectrum of domains and statistical models. Under Gaussian noise, this simple estimate is the unique solution to the MLE with a constant regularization factor. \n\nFinally, weighing workers according to their average proximity in a crowdsourcing setting, results in substantial improvement over unweighted aggregation and other truth discovery algorithms in practice.", + "primary_area": "humans and ai", + "author": "Reshef Meir; Ofra Amir; Omer Ben-Porat; Tsviel Ben Shabat; Gal Cohensius; Lirong Xia", + "authorids": "", + "aff": "Technion\u2014Israel Institute of Technology; Technion\u2014Israel Institute of Technology; Technion\u2014Israel Institute of Technology; Technion\u2014Israel Institute of Technology; Technion\u2014Israel Institute of Technology; Rensselaer Polytechnic Institute (RPI)", + "bibtex": "@article{Meir_Amir_Ben-Porat_Ben Shabat_Cohensius_Xia_2023, title={Frustratingly Easy Truth Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25750}, DOI={10.1609/aaai.v37i5.25750}, abstractNote={Truth discovery is a general name for a broad range of statistical methods aimed to extract the correct answers to questions, based on multiple answers coming from noisy sources. For example, workers in a crowdsourcing platform.\nIn this paper, we consider an extremely simple heuristic for estimating workers\u2019 competence using average proximity to other workers. We prove that this estimates well the actual competence level and enables separating high and low quality workers in a wide spectrum of domains and statistical models. Under Gaussian noise, this simple estimate is the unique solution to the MLE with a constant regularization factor. Finally, weighing workers according to their average proximity in a crowdsourcing setting, results in substantial improvement over unweighted aggregation and other truth discovery algorithms in practice.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Meir, Reshef and Amir, Ofra and Ben-Porat, Omer and Ben Shabat, Tsviel and Cohensius, Gal and Xia, Lirong}, year={2023}, month={Jun.}, pages={6074-6083} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25750/25522", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25750", + "pdf_size": 1269973, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14782895277271594052&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ie.technion.ac.il;ie.technion.ac.il;ie.technion.ac.il;ie.technion.ac.il;gmail.com;cs.rpi.edu", + "email": "ie.technion.ac.il;ie.technion.ac.il;ie.technion.ac.il;ie.technion.ac.il;gmail.com;cs.rpi.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Technion\u2014Israel Institute of Technology;Rensselaer Polytechnic Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.technion.ac.il/en/;https://www.rpi.edu", + "aff_unique_abbr": "Technion;RPI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "article-26470", + "title": "Fully Computer-Assisted Proofs in Extremal Combinatorics", + "track": "main", + "status": "Technical", + "abstract": "We present a fully computer-assisted proof system for solving a particular family of problems in Extremal Combinatorics. Existing techniques using Flag Algebras have proven powerful in the past, but have so far lacked a computational counterpart to derive matching constructive bounds. We demonstrate that common search heuristics are capable of finding constructions far beyond the reach of human intuition. Additionally, the most obvious downside of such heuristics, namely a missing guarantee of global optimality, can often be fully eliminated in this case through lower bounds and stability results coming from the Flag Algebra approach.\n\nTo illustrate the potential of this approach, we study two related and well-known problems in Extremal Graph Theory that go back to questions of Erd\u0151s from the 60s.\nMost notably, we present the first major improvement in the upper bound of the Ramsey multiplicity of K_4 in 25 years, precisely determine the first off-diagonal Ramsey multiplicity number, and settle the minimum number of independent sets of size four in graphs with clique number strictly less than five.", + "primary_area": "search and optimization", + "author": "Olaf Parczyk; Sebastian Pokutta; Christoph Spiegel; Tibor Szab\u00f3", + "authorids": "", + "aff": "Freie Universit\u00e4t Berlin, Institute of Mathematics, Berlin; Technische Universit\u00e4t Berlin, Institute of Mathematics, Berlin + Zuse Institute Berlin, Department for AI in Society, Science, and Technology, Germany; Zuse Institute Berlin, Department for AI in Society, Science, and Technology, Germany; Freie Universit\u00e4t Berlin, Institute of Mathematics, Berlin", + "bibtex": "@article{Parczyk_Pokutta_Spiegel_Szab\u00f3_2023, title={Fully Computer-Assisted Proofs in Extremal Combinatorics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26470}, DOI={10.1609/aaai.v37i10.26470}, abstractNote={We present a fully computer-assisted proof system for solving a particular family of problems in Extremal Combinatorics. Existing techniques using Flag Algebras have proven powerful in the past, but have so far lacked a computational counterpart to derive matching constructive bounds. We demonstrate that common search heuristics are capable of finding constructions far beyond the reach of human intuition. Additionally, the most obvious downside of such heuristics, namely a missing guarantee of global optimality, can often be fully eliminated in this case through lower bounds and stability results coming from the Flag Algebra approach. To illustrate the potential of this approach, we study two related and well-known problems in Extremal Graph Theory that go back to questions of Erd\u0151s from the 60s.\nMost notably, we present the first major improvement in the upper bound of the Ramsey multiplicity of K_4 in 25 years, precisely determine the first off-diagonal Ramsey multiplicity number, and settle the minimum number of independent sets of size four in graphs with clique number strictly less than five.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Parczyk, Olaf and Pokutta, Sebastian and Spiegel, Christoph and Szab\u00f3, Tibor}, year={2023}, month={Jun.}, pages={12482-12490} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26470/26242", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26470", + "pdf_size": 155645, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6622086118273220824&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "mi.fu-berlin.de;zib.de;zib.de;mi.fu-berlin.de", + "email": "mi.fu-berlin.de;zib.de;zib.de;mi.fu-berlin.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;2;0", + "aff_unique_norm": "Freie Universit\u00e4t Berlin;Technische Universit\u00e4t Berlin;Zuse Institute Berlin", + "aff_unique_dep": "Institute of Mathematics;Institute of Mathematics;Department for AI in Society, Science, and Technology", + "aff_unique_url": "https://www.fu-berlin.de;https://www.tu-berlin.de;https://www.zib.de", + "aff_unique_abbr": "FU Berlin;TU Berlin;ZIB", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Berlin;", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25821", + "title": "Fully Dynamic Online Selection through Online Contention Resolution Schemes", + "track": "main", + "status": "Technical", + "abstract": "We study fully dynamic online selection problems in an adversarial/stochastic setting that includes Bayesian online selection, prophet inequalities, posted price mechanisms, and stochastic probing problems subject to combinatorial constraints. In the classical ``incremental'' version of the problem, selected elements remain active until the end of the input sequence. On the other hand, in the fully dynamic version of the problem, elements stay active for a limited time interval, and then leave. This models, for example, the online matching of tasks to workers with task/worker-dependent working times, and sequential posted pricing of perishable goods. A successful approach to online selection problems in the adversarial setting is given by the notion of Online Contention Resolution Scheme (OCRS), that uses a priori information to formulate a linear relaxation of the underlying optimization problem, whose optimal fractional solution is rounded online for any adversarial order of the input sequence. Our main contribution is providing a general method for constructing an OCRS for fully dynamic online selection problems. Then, we show how to employ such OCRS to construct no-regret algorithms in a partial information model with semi-bandit feedback and adversarial inputs.", + "primary_area": "machine learning i", + "author": "Vashist Avadhanula; Andrea Celli; Riccardo Colini-Baldeschi; Stefano Leonardi; Matteo Russo", + "authorids": "", + "aff": "Core Data Science, Meta, London, UK; Department of Computing Sciences, Bocconi University, Milan, Italy; Core Data Science, Meta, London, UK + Department of Computer, Control and Management Engineering Antonio Ruberti, Sapienza University, Rome, Italy; Department of Computer, Control and Management Engineering Antonio Ruberti, Sapienza University, Rome, Italy; Department of Computer, Control and Management Engineering Antonio Ruberti, Sapienza University, Rome, Italy", + "bibtex": "@article{Avadhanula_Celli_Colini-Baldeschi_Leonardi_Russo_2023, title={Fully Dynamic Online Selection through Online Contention Resolution Schemes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25821}, DOI={10.1609/aaai.v37i6.25821}, abstractNote={We study fully dynamic online selection problems in an adversarial/stochastic setting that includes Bayesian online selection, prophet inequalities, posted price mechanisms, and stochastic probing problems subject to combinatorial constraints. In the classical ``incremental\u2019\u2019 version of the problem, selected elements remain active until the end of the input sequence. On the other hand, in the fully dynamic version of the problem, elements stay active for a limited time interval, and then leave. This models, for example, the online matching of tasks to workers with task/worker-dependent working times, and sequential posted pricing of perishable goods. A successful approach to online selection problems in the adversarial setting is given by the notion of Online Contention Resolution Scheme (OCRS), that uses a priori information to formulate a linear relaxation of the underlying optimization problem, whose optimal fractional solution is rounded online for any adversarial order of the input sequence. Our main contribution is providing a general method for constructing an OCRS for fully dynamic online selection problems. Then, we show how to employ such OCRS to construct no-regret algorithms in a partial information model with semi-bandit feedback and adversarial inputs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Avadhanula, Vashist and Celli, Andrea and Colini-Baldeschi, Riccardo and Leonardi, Stefano and Russo, Matteo}, year={2023}, month={Jun.}, pages={6693-6700} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25821/25593", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25821", + "pdf_size": 170553, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15977081382601381920&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;unibocconi.it;fb.com;diag.uniroma1.it;diag.uniroma1.it", + "email": "gmail.com;unibocconi.it;fb.com;diag.uniroma1.it;diag.uniroma1.it", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0+2;2;2", + "aff_unique_norm": "Meta;Bocconi University;Sapienza University", + "aff_unique_dep": "Core Data Science;Department of Computing Sciences;Department of Computer, Control and Management Engineering Antonio Ruberti", + "aff_unique_url": "https://meta.com;https://www.bocconi.edu;https://www.sapienza.uniroma.it", + "aff_unique_abbr": "Meta;Bocconi;Sapienza", + "aff_campus_unique_index": "0;1;0+2;2;2", + "aff_campus_unique": "London;Milan;Rome", + "aff_country_unique_index": "0;1;0+1;1;1", + "aff_country_unique": "United Kingdom;Italy" + }, + { + "id": "article-26417", + "title": "Fully Online Matching with Stochastic Arrivals and Departures", + "track": "main", + "status": "Technical", + "abstract": "We study a fully online matching problem with stochastic arrivals and departures. In this model, each online arrival follows a known identical and independent distribution over a fixed set of agent types. Its sojourn time is unknown in advance and follows type-specific distributions with known expectations. The goal is to maximize the weighted reward from successful matches. To solve this problem, we first propose a linear program (LP)-based algorithm whose competitive ratio is lower bounded by 0.155 under mild conditions. We further achieve better ratios in some special cases. To demonstrate the challenges of the problem, we further establish several hardness results. In particular, we show that no online algorithm can achieve a competitive ratio better than 2/3 in this model and there is no LP-based algorithm (with respect to our proposed LP) with a competitive ratio better than 1/3. Finally, we demonstrate the effectiveness and efficiency of our algorithm numerically.", + "primary_area": "planning routing and scheduling", + "author": "Zihao Li; Hao Wang; Zhenzhen Yan", + "authorids": "", + "aff": "Nanyang Technological University; Nanyang Technological University; Nanyang Technological University", + "bibtex": "@article{Li_Wang_Yan_2023, title={Fully Online Matching with Stochastic Arrivals and Departures}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26417}, DOI={10.1609/aaai.v37i10.26417}, abstractNote={We study a fully online matching problem with stochastic arrivals and departures. In this model, each online arrival follows a known identical and independent distribution over a fixed set of agent types. Its sojourn time is unknown in advance and follows type-specific distributions with known expectations. The goal is to maximize the weighted reward from successful matches. To solve this problem, we first propose a linear program (LP)-based algorithm whose competitive ratio is lower bounded by 0.155 under mild conditions. We further achieve better ratios in some special cases. To demonstrate the challenges of the problem, we further establish several hardness results. In particular, we show that no online algorithm can achieve a competitive ratio better than 2/3 in this model and there is no LP-based algorithm (with respect to our proposed LP) with a competitive ratio better than 1/3. Finally, we demonstrate the effectiveness and efficiency of our algorithm numerically.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zihao and Wang, Hao and Yan, Zhenzhen}, year={2023}, month={Jun.}, pages={12014-12021} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26417/26189", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26417", + "pdf_size": 227571, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11040067854156403583&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "e.ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "email": "e.ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanyang Technological University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ntu.edu.sg", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25838", + "title": "Fully-Dynamic Decision Trees", + "track": "main", + "status": "Technical", + "abstract": "We develop the first fully dynamic algorithm that maintains a decision tree over an arbitrary sequence of insertions and deletions of labeled examples. Given \u03b5>0 our algorithm guarantees that, at every point in time, every node of the decision tree uses a split with Gini gain within an additive \u03b5 of the optimum. For real-valued features the algorithm has an amortized running time per insertion/deletion of O((d\u00b7log\u00b3n)/\u03b5\u00b2), which improves to O((d\u00b7log\u00b2n)/\u03b5) for binary or categorical features, while it uses space O(n\u00b7d), where n is the maximum number of examples at any point in time and d is the number of features. Our algorithm is nearly optimal, as we show that any algorithm with similar guarantees requires amortized running time \u03a9(d) and space \u03a9(n\u00b7d/polylog(nd)). We complement our theoretical results with an extensive experimental evaluation on real-world data, showing the effectiveness of our algorithm.", + "primary_area": "machine learning i", + "author": "Marco Bressan; Gabriel Damay; Mauro Sozio", + "authorids": "", + "aff": "University of Milan; Institut Polytechnique de Paris + T \u00b4el\u00b4ecom Paris; Institut Polytechnique de Paris + T \u00b4el\u00b4ecom Paris", + "bibtex": "@article{Bressan_Damay_Sozio_2023, title={Fully-Dynamic Decision Trees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25838}, DOI={10.1609/aaai.v37i6.25838}, abstractNote={We develop the first fully dynamic algorithm that maintains a decision tree over an arbitrary sequence of insertions and deletions of labeled examples. Given \u03b5>0 our algorithm guarantees that, at every point in time, every node of the decision tree uses a split with Gini gain within an additive \u03b5 of the optimum. For real-valued features the algorithm has an amortized running time per insertion/deletion of O((d\u00b7log\u00b3n)/\u03b5\u00b2), which improves to O((d\u00b7log\u00b2n)/\u03b5) for binary or categorical features, while it uses space O(n\u00b7d), where n is the maximum number of examples at any point in time and d is the number of features. Our algorithm is nearly optimal, as we show that any algorithm with similar guarantees requires amortized running time \u03a9(d) and space \u03a9(n\u00b7d/polylog(nd)). We complement our theoretical results with an extensive experimental evaluation on real-world data, showing the effectiveness of our algorithm.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bressan, Marco and Damay, Gabriel and Sozio, Mauro}, year={2023}, month={Jun.}, pages={6842-6849} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25838/25610", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25838", + "pdf_size": 3999483, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16963239874775042207&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "unimi.it;telecom-paris.fr;telecom-paris.fr", + "email": "unimi.it;telecom-paris.fr;telecom-paris.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;1+2", + "aff_unique_norm": "University of Milan;Institut Polytechnique de Paris;T\u00e9l\u00e9com Paris", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unimi.it;https://www.ipparis.fr;https://www.telecom-paris.fr", + "aff_unique_abbr": "UniMi;IP Paris;T\u00e9l\u00e9com Paris", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+1;1+1", + "aff_country_unique": "Italy;France" + }, + { + "id": "article-25715", + "title": "Function Approximation for Solving Stackelberg Equilibrium in Large Perfect Information Games", + "track": "main", + "status": "Technical", + "abstract": "Function approximation (FA) has been a critical component in solving large zero-sum games. Yet, little attention has been given towards FA in solving general-sum extensive-form games, despite them being widely regarded as being computationally more challenging than their fully competitive or cooperative counterparts. A key challenge is that for many equilibria in general-sum games, no simple analogue to the state value function used in Markov Decision Processes and zero-sum games exists. In this paper, we propose learning the Enforceable Payoff Frontier (EPF)---a generalization of the state value function for general-sum games. We approximate the optimal Stackelberg extensive-form correlated equilibrium by representing EPFs with neural networks and training them by using appropriate backup operations and loss functions. This is the first method that applies FA to the Stackelberg setting, allowing us to scale to much larger games while still enjoying performance guarantees based on FA error. Additionally, our proposed method guarantees incentive compatibility and is easy to evaluate without having to depend on self-play or approximate best-response oracles.", + "primary_area": "game theory and economic paradigms", + "author": "Chun Kai Ling; J. Zico Kolter; Fei Fang", + "authorids": "", + "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", + "bibtex": "@article{Ling_Kolter_Fang_2023, title={Function Approximation for Solving Stackelberg Equilibrium in Large Perfect Information Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25715}, DOI={10.1609/aaai.v37i5.25715}, abstractNote={Function approximation (FA) has been a critical component in solving large zero-sum games. Yet, little attention has been given towards FA in solving general-sum extensive-form games, despite them being widely regarded as being computationally more challenging than their fully competitive or cooperative counterparts. A key challenge is that for many equilibria in general-sum games, no simple analogue to the state value function used in Markov Decision Processes and zero-sum games exists. In this paper, we propose learning the Enforceable Payoff Frontier (EPF)---a generalization of the state value function for general-sum games. We approximate the optimal Stackelberg extensive-form correlated equilibrium by representing EPFs with neural networks and training them by using appropriate backup operations and loss functions. This is the first method that applies FA to the Stackelberg setting, allowing us to scale to much larger games while still enjoying performance guarantees based on FA error. Additionally, our proposed method guarantees incentive compatibility and is easy to evaluate without having to depend on self-play or approximate best-response oracles.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ling, Chun Kai and Kolter, J. Zico and Fang, Fei}, year={2023}, month={Jun.}, pages={5764-5772} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25715/25487", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25715", + "pdf_size": 288601, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14810204256469687626&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26106", + "title": "Fundamentals of Task-Agnostic Data Valuation", + "track": "main", + "status": "Technical", + "abstract": "We study valuing the data of a data owner/seller for a data seeker/buyer. Data valuation is often carried out for a specific task assuming a particular utility metric, such as test accuracy on a validation set, that may not exist in practice. In this work, we focus on task-agnostic data valuation without any validation requirements. The data buyer has access to a limited amount of data (which could be publicly available) and seeks more data samples from a data seller. We formulate the problem as estimating the differences in the statistical properties of the data at the seller with respect to the baseline data available at the buyer. We capture these statistical differences through second moment by measuring diversity and relevance of the seller\u2019s data for the buyer; we estimate these measures through queries to the seller without requesting the raw data. We design the queries with the proposed approach so that the seller is blind to the buyer\u2019s raw data and has no knowledge to fabricate responses to the queries to obtain a desired outcome of the diversity and relevance trade-off. We will show through extensive experiments on real tabular and image datasets that the proposed estimates capture the diversity and relevance of the seller\u2019s data for the buyer.", + "primary_area": "machine learning iii", + "author": "Mohammad Mohammadi Amiri; Frederic Berdoz; Ramesh Raskar", + "authorids": "", + "aff": "MIT, Media Lab, 75 Amherst St, Cambridge, MA 02139, USA; EPFL, Lausanne, Switzerland; MIT, Media Lab, 75 Amherst St, Cambridge, MA 02139, USA", + "bibtex": "@article{Mohammadi Amiri_Berdoz_Raskar_2023, title={Fundamentals of Task-Agnostic Data Valuation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26106}, DOI={10.1609/aaai.v37i8.26106}, abstractNote={We study valuing the data of a data owner/seller for a data seeker/buyer. Data valuation is often carried out for a specific task assuming a particular utility metric, such as test accuracy on a validation set, that may not exist in practice. In this work, we focus on task-agnostic data valuation without any validation requirements. The data buyer has access to a limited amount of data (which could be publicly available) and seeks more data samples from a data seller. We formulate the problem as estimating the differences in the statistical properties of the data at the seller with respect to the baseline data available at the buyer. We capture these statistical differences through second moment by measuring diversity and relevance of the seller\u2019s data for the buyer; we estimate these measures through queries to the seller without requesting the raw data. We design the queries with the proposed approach so that the seller is blind to the buyer\u2019s raw data and has no knowledge to fabricate responses to the queries to obtain a desired outcome of the diversity and relevance trade-off. We will show through extensive experiments on real tabular and image datasets that the proposed estimates capture the diversity and relevance of the seller\u2019s data for the buyer.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mohammadi Amiri, Mohammad and Berdoz, Frederic and Raskar, Ramesh}, year={2023}, month={Jun.}, pages={9226-9234} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26106/25878", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26106", + "pdf_size": 322574, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16087886184952351927&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mit.edu;epfl.ch;mit.edu", + "email": "mit.edu;epfl.ch;mit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": "Media Lab;", + "aff_unique_url": "https://www.mit.edu;https://www.epfl.ch", + "aff_unique_abbr": "MIT;EPFL", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Cambridge;Lausanne", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "article-26710", + "title": "Future Aware Pricing and Matching for Sustainable On-Demand Ride Pooling", + "track": "aaai special track", + "status": "Technical", + "abstract": "The popularity of on-demand ride pooling is owing to the benefits offered to customers (lower prices), taxi drivers (higher revenue), environment (lower carbon footprint due to fewer vehicles) and aggregation companies like Uber (higher revenue). To achieve these benefits, two key interlinked challenges have to be solved effectively: (a) pricing -- setting prices to customer requests for taxis; and (b) matching -- assignment of customers (that accepted the prices) to taxis/cars. Traditionally, both these challenges have been studied individually and using myopic approaches (considering only current requests), without considering the impact of current matching on addressing future requests. In this paper, we develop a novel framework that handles the pricing and matching problems together, while also considering the future impact of the pricing and matching decisions. In our experimental results on a real-world taxi dataset, we demonstrate that our framework can significantly improve revenue (up to 17% and on average 6.4%) in a sustainable manner by reducing the number of vehicles (up to 14% and on average 10.6%) required to obtain a given fixed revenue and the overall distance travelled by vehicles (up to 11.1% and on average 3.7%). That is to say, we are able to provide an ideal win-win scenario for all stakeholders (customers, drivers, aggregator, environment) involved by obtaining higher revenue for customers, drivers, aggregator (ride pooling company) while being good for the environment (due to fewer number of vehicles on the road and lesser fuel consumed).", + "primary_area": "ai for social impact", + "author": "Xianjie Zhang; Pradeep Varakantham; Hao Jiang", + "authorids": "", + "aff": "Key Laboratory for Ubiquitous Network and Service Software of Liaoning Province, School of Software, Dalian University of Technology, China+Singapore Management University, Singapore; Singapore Management University, Singapore; Singapore Management University, Singapore", + "bibtex": "@article{Zhang_Varakantham_Jiang_2023, title={Future Aware Pricing and Matching for Sustainable On-Demand Ride Pooling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26710}, DOI={10.1609/aaai.v37i12.26710}, abstractNote={The popularity of on-demand ride pooling is owing to the benefits offered to customers (lower prices), taxi drivers (higher revenue), environment (lower carbon footprint due to fewer vehicles) and aggregation companies like Uber (higher revenue). To achieve these benefits, two key interlinked challenges have to be solved effectively: (a) pricing -- setting prices to customer requests for taxis; and (b) matching -- assignment of customers (that accepted the prices) to taxis/cars. Traditionally, both these challenges have been studied individually and using myopic approaches (considering only current requests), without considering the impact of current matching on addressing future requests. In this paper, we develop a novel framework that handles the pricing and matching problems together, while also considering the future impact of the pricing and matching decisions. In our experimental results on a real-world taxi dataset, we demonstrate that our framework can significantly improve revenue (up to 17% and on average 6.4%) in a sustainable manner by reducing the number of vehicles (up to 14% and on average 10.6%) required to obtain a given fixed revenue and the overall distance travelled by vehicles (up to 11.1% and on average 3.7%). That is to say, we are able to provide an ideal win-win scenario for all stakeholders (customers, drivers, aggregator, environment) involved by obtaining higher revenue for customers, drivers, aggregator (ride pooling company) while being good for the environment (due to fewer number of vehicles on the road and lesser fuel consumed).}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xianjie and Varakantham, Pradeep and Jiang, Hao}, year={2023}, month={Jun.}, pages={14628-14636} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26710/26482", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26710", + "pdf_size": 400827, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6871237934747671087&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.dlut.edu.cn;smu.edu.sg;phdcs.smu.edu.sg", + "email": "mail.dlut.edu.cn;smu.edu.sg;phdcs.smu.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;1", + "aff_unique_norm": "Dalian University of Technology;Singapore Management University", + "aff_unique_dep": "School of Software;", + "aff_unique_url": "http://www.dlut.edu.cn/;https://www.smu.edu.sg", + "aff_unique_abbr": "DUT;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-27015", + "title": "Fuzzy C-means: Differences on Clustering Behavior between High Dimensional and Functional Data (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Fuzzy c-means (FCM) is a generalization of the classical k-means clustering algorithm to the case where an observation can belong to several clusters at the same time.\nThe algorithm was previously observed to have initialization problems when the number of desired clusters or the number of dimensions of the data are high.\nWe have tested FCM against clustering problems with functional data, generated from stationary Gaussian processes, and thus in principle infinite-dimensional.\nWe observed that when the data is more functional in nature, which can be obtained by tuning the length-scale parameter of the Gaussian process, the aforementioned problems do not appear.\nThis not only indicates that FCM is suitable as a clustering method for functional data, but also illustrates how functional data differs from traditional multivariate data.\nIn addition this seems to suggest a qualitative way to measure the latent dimensionality of the functional distribution itself.", + "primary_area": "", + "author": "Carlos Ramos-Carre\u00f1o", + "authorids": "", + "aff": "Universidad Aut\u00f3noma de Madrid", + "bibtex": "@article{Ramos-Carre\u00f1o_2024, title={Fuzzy C-means: Differences on Clustering Behavior between High Dimensional and Functional Data (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27015}, DOI={10.1609/aaai.v37i13.27015}, abstractNote={Fuzzy c-means (FCM) is a generalization of the classical k-means clustering algorithm to the case where an observation can belong to several clusters at the same time.\nThe algorithm was previously observed to have initialization problems when the number of desired clusters or the number of dimensions of the data are high.\nWe have tested FCM against clustering problems with functional data, generated from stationary Gaussian processes, and thus in principle infinite-dimensional.\nWe observed that when the data is more functional in nature, which can be obtained by tuning the length-scale parameter of the Gaussian process, the aforementioned problems do not appear.\nThis not only indicates that FCM is suitable as a clustering method for functional data, but also illustrates how functional data differs from traditional multivariate data.\nIn addition this seems to suggest a qualitative way to measure the latent dimensionality of the functional distribution itself.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ramos-Carre\u00f1o, Carlos}, year={2024}, month={Jul.}, pages={16310-16311} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27015/26787", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27015", + "pdf_size": 91462, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:BlzW-vJSymkJ:scholar.google.com/&scioq=Fuzzy+C-means:+Differences+on+Clustering+Behavior+between+High+Dimensional+and+Functional+Data+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "uam.es", + "email": "uam.es", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Universidad Aut\u00f3noma de Madrid", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uam.es", + "aff_unique_abbr": "UAM", + "aff_country_unique_index": "0", + "aff_country_unique": "Spain" + }, + { + "id": "article-27079", + "title": "GAAMA 2.0: An Integrated System That Answers Boolean and Extractive Questions", + "track": "demonstrations", + "status": "Technical", + "abstract": "Recent machine reading comprehension datasets include extractive and boolean questions but current approaches do not offer integrated support for answering both question types. We present a front-end demo to a multilingual machine reading comprehension system that handles boolean and extractive questions. It provides a yes/no answer and highlights the supporting evidence for boolean questions. It provides an answer for extractive questions and highlights the answer in the passage. Our system, GAAMA 2.0, achieved first place on the TyDi QA leaderboard at the time of submission. We contrast two different implementations of our approach: including multiple transformer models for easy deployment, and a shared transformer model utilizing adapters to reduce GPU memory footprint for a resource-constrained environment.", + "primary_area": "", + "author": "Scott McCarley; Mihaela Bornea; Sara Rosenthal; Anthony Ferritto; Md Arafat Sultan; Avirup Sil; Radu Florian", + "authorids": "", + "aff": "IBM Research AI; IBM Research AI; IBM Research AI; AWS AI Labs + IBM Research AI; IBM Research AI; IBM Research AI; IBM Research AI", + "bibtex": "@article{McCarley_Bornea_Rosenthal_Ferritto_Sultan_Sil_Florian_2024, title={GAAMA 2.0: An Integrated System That Answers Boolean and Extractive Questions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27079}, DOI={10.1609/aaai.v37i13.27079}, abstractNote={Recent machine reading comprehension datasets include extractive and boolean questions but current approaches do not offer integrated support for answering both question types. We present a front-end demo to a multilingual machine reading comprehension system that handles boolean and extractive questions. It provides a yes/no answer and highlights the supporting evidence for boolean questions. It provides an answer for extractive questions and highlights the answer in the passage. Our system, GAAMA 2.0, achieved first place on the TyDi QA leaderboard at the time of submission. We contrast two different implementations of our approach: including multiple transformer models for easy deployment, and a shared transformer model utilizing adapters to reduce GPU memory footprint for a resource-constrained environment.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={McCarley, Scott and Bornea, Mihaela and Rosenthal, Sara and Ferritto, Anthony and Sultan, Md Arafat and Sil, Avirup and Florian, Radu}, year={2024}, month={Jul.}, pages={16461-16463} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27079/26851", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27079", + "pdf_size": 196777, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:DxTepsRxBicJ:scholar.google.com/&scioq=GAAMA+2.0:+An+Integrated+System+That+Answers+Boolean+and+Extractive+Questions&hl=en&as_sdt=0,5", + "gs_version_total": 6, + "aff_domain": "us.ibm.com;us.ibm.com;us.ibm.com;amazon.com;us.ibm.com;us.ibm.com;us.ibm.com", + "email": "us.ibm.com;us.ibm.com;us.ibm.com;amazon.com;us.ibm.com;us.ibm.com;us.ibm.com", + "github": "https://github.com/primeqa/primeqa", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1+0;0;0;0", + "aff_unique_norm": "IBM Research;Amazon Web Services", + "aff_unique_dep": "AI;AWS AI Labs", + "aff_unique_url": "https://www.ibm.com/research;https://aws.amazon.com", + "aff_unique_abbr": "IBM;AWS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25162", + "title": "GAM: Gradient Attention Module of Optimization for Point Clouds Analysis", + "track": "main", + "status": "Technical", + "abstract": "In the point cloud analysis task, the existing local feature aggregation descriptors (LFAD) do not fully utilize the neighborhood information of center points. Previous methods only use the distance information to constrain the local aggregation process, which is easy to be affected by abnormal points and cannot adequately fit the original geometry of the point cloud. This paper argues that fine-grained geometric information (FGGI) plays an important role in the aggregation of local features. Based on this, we propose a gradient-based local attention module to address the above problem, which is called Gradient Attention Module (GAM). GAM simplifies the process of extracting the gradient information in the neighborhood to explicit representation using the Zenith Angle matrix and Azimuth Angle matrix, which makes the module 35X faster. The comprehensive experiments on the ScanObjectNN dataset, ShapeNet dataset, S3DIS dataset, Modelnet40 dataset, and KITTI dataset demonstrate the effectiveness, efficientness, and generalization of our newly proposed GAM for 3D point cloud analysis. Especially in S3DIS, GAM achieves the highest index in the current point-based model with mIoU/OA/mAcc of 74.4%/90.6%/83.2%.", + "primary_area": "computer vision i", + "author": "Haotian Hu; Fanyi Wang; Zhiwang Zhang; Yaonong Wang; Laifeng Hu; Yanhao Zhang", + "authorids": "", + "aff": "Zhejiang Leapmotor Technology CO., LTD.; OPPO Research Institute; The University of Sydney; Zhejiang Leapmotor Technology CO., LTD.; Zhejiang Leapmotor Technology CO., LTD.; OPPO Research Institute", + "bibtex": "@article{Hu_Wang_Zhang_Wang_Hu_Zhang_2023, title={GAM: Gradient Attention Module of Optimization for Point Clouds Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25162}, DOI={10.1609/aaai.v37i1.25162}, abstractNote={In the point cloud analysis task, the existing local feature aggregation descriptors (LFAD) do not fully utilize the neighborhood information of center points. Previous methods only use the distance information to constrain the local aggregation process, which is easy to be affected by abnormal points and cannot adequately fit the original geometry of the point cloud. This paper argues that fine-grained geometric information (FGGI) plays an important role in the aggregation of local features. Based on this, we propose a gradient-based local attention module to address the above problem, which is called Gradient Attention Module (GAM). GAM simplifies the process of extracting the gradient information in the neighborhood to explicit representation using the Zenith Angle matrix and Azimuth Angle matrix, which makes the module 35X faster. The comprehensive experiments on the ScanObjectNN dataset, ShapeNet dataset, S3DIS dataset, Modelnet40 dataset, and KITTI dataset demonstrate the effectiveness, efficientness, and generalization of our newly proposed GAM for 3D point cloud analysis. Especially in S3DIS, GAM achieves the highest index in the current point-based model with mIoU/OA/mAcc of 74.4%/90.6%/83.2%.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Haotian and Wang, Fanyi and Zhang, Zhiwang and Wang, Yaonong and Hu, Laifeng and Zhang, Yanhao}, year={2023}, month={Jun.}, pages={835-843} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25162/24934", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25162", + "pdf_size": 2000638, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9056559796347648243&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "leapmotor.com;oppo.com;sydney.edu.au;leapmotor.com;leapmotor.com;oppo.com", + "email": "leapmotor.com;oppo.com;sydney.edu.au;leapmotor.com;leapmotor.com;oppo.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;1", + "aff_unique_norm": "Zhejiang Leapmotor Technology CO., LTD.;OPPO Research Institute;University of Sydney", + "aff_unique_dep": ";;", + "aff_unique_url": ";https://www.oppo.com/en;https://www.sydney.edu.au", + "aff_unique_abbr": ";OPPO RI;USYD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25372", + "title": "GAN Prior Based Null-Space Learning for Consistent Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Consistency and realness have always been the two critical issues of image super-resolution. While the realness has been dramatically improved with the use of GAN prior, the state-of-the-art methods still suffer inconsistencies in local structures and colors (e.g., tooth and eyes). In this paper, we show that these inconsistencies can be analytically eliminated by learning only the null-space component while fixing the range-space part. Further, we design a pooling-based decomposition (PD), a universal range-null space decomposition for super-resolution tasks, which is concise, fast, and parameter-free. PD can be easily applied to state-of-the-art GAN Prior based SR methods to eliminate their inconsistencies, neither compromise the realness nor bring extra parameters or computational costs. Besides, our ablation studies reveal that PD can replace pixel-wise losses for training and achieve better generalization performance when facing unseen downsamplings or even real-world degradation. Experiments show that the use of PD refreshes state-of-the-art SR performance and speeds up the convergence of training up to 2~10 times.", + "primary_area": "computer vision iii", + "author": "Yinhuai Wang; Yujie Hu; Jiwen Yu; Jian Zhang", + "authorids": "", + "aff": "Peking University Shenzhen Graduate School, China; Peking University Shenzhen Graduate School, China; Peking University Shenzhen Graduate School, China; Peking University Shenzhen Graduate School, China + Peng Cheng Laboratory, China", + "bibtex": "@article{Wang_Hu_Yu_Zhang_2023, title={GAN Prior Based Null-Space Learning for Consistent Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25372}, DOI={10.1609/aaai.v37i3.25372}, abstractNote={Consistency and realness have always been the two critical issues of image super-resolution. While the realness has been dramatically improved with the use of GAN prior, the state-of-the-art methods still suffer inconsistencies in local structures and colors (e.g., tooth and eyes). In this paper, we show that these inconsistencies can be analytically eliminated by learning only the null-space component while fixing the range-space part. Further, we design a pooling-based decomposition (PD), a universal range-null space decomposition for super-resolution tasks, which is concise, fast, and parameter-free. PD can be easily applied to state-of-the-art GAN Prior based SR methods to eliminate their inconsistencies, neither compromise the realness nor bring extra parameters or computational costs. Besides, our ablation studies reveal that PD can replace pixel-wise losses for training and achieve better generalization performance when facing unseen downsamplings or even real-world degradation. Experiments show that the use of PD refreshes state-of-the-art SR performance and speeds up the convergence of training up to 2~10 times.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yinhuai and Hu, Yujie and Yu, Jiwen and Zhang, Jian}, year={2023}, month={Jun.}, pages={2724-2732} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25372/25144", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25372", + "pdf_size": 3499246, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=682364267703656288&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26663", + "title": "GAN-Based Domain Inference Attack", + "track": "aaai special track", + "status": "Technical", + "abstract": "Model-based attacks can infer training data information from deep neural network models. These attacks heavily depend on the attacker's knowledge of the application domain, e.g., using it to determine the auxiliary data for model-inversion attacks. However, attackers may not know what the model is used for in practice. We propose a generative adversarial network (GAN) based method to explore likely or similar domains of a target model -- the model domain inference (MDI) attack. For a given target (classification) model, we assume that the attacker knows nothing but the input and output formats and can use the model to derive the prediction for any input in the desired form. Our basic idea is to use the target model to affect a GAN training process for a candidate domain's dataset that is easy to obtain. We find that the target model may distort the training procedure less if the domain is more similar to the target domain. We then measure the distortion level with the distance between GAN-generated datasets, which can be used to rank candidate domains for the target model. Our experiments show that the auxiliary dataset from an MDI top-ranked domain can effectively boost the result of model-inversion attacks.", + "primary_area": "ai for social impact", + "author": "Yuechun Gu; Keke Chen", + "authorids": "", + "aff": "Trustworthy and Intelligent Computing Lab, Computer Science Department, Marquette University, Milwaukee, WI; Trustworthy and Intelligent Computing Lab, Computer Science Department, Marquette University, Milwaukee, WI", + "bibtex": "@article{Gu_Chen_2023, title={GAN-Based Domain Inference Attack}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26663}, DOI={10.1609/aaai.v37i12.26663}, abstractNote={Model-based attacks can infer training data information from deep neural network models. These attacks heavily depend on the attacker\u2019s knowledge of the application domain, e.g., using it to determine the auxiliary data for model-inversion attacks. However, attackers may not know what the model is used for in practice. We propose a generative adversarial network (GAN) based method to explore likely or similar domains of a target model -- the model domain inference (MDI) attack. For a given target (classification) model, we assume that the attacker knows nothing but the input and output formats and can use the model to derive the prediction for any input in the desired form. Our basic idea is to use the target model to affect a GAN training process for a candidate domain\u2019s dataset that is easy to obtain. We find that the target model may distort the training procedure less if the domain is more similar to the target domain. We then measure the distortion level with the distance between GAN-generated datasets, which can be used to rank candidate domains for the target model. Our experiments show that the auxiliary dataset from an MDI top-ranked domain can effectively boost the result of model-inversion attacks.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gu, Yuechun and Chen, Keke}, year={2023}, month={Jun.}, pages={14214-14222} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26663/26435", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26663", + "pdf_size": 169791, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=203120652269810514&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 10, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Marquette University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.marquette.edu", + "aff_unique_abbr": "MU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Milwaukee", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25785", + "title": "GANTEE: Generative Adversarial Network for Taxonomy Enterance Evaluation", + "track": "main", + "status": "Technical", + "abstract": "Taxonomy is formulated as directed acyclic graphs or trees of concepts that support many downstream tasks.\nMany new coming concepts need to be added to an existing taxonomy.\nThe traditional taxonomy expansion task aims only at finding the best position for new coming concepts in the existing taxonomy. \nHowever, they have two drawbacks when being applied to the real-scenarios.\nThe previous methods suffer from low-efficiency since they waste much time when most of the new coming concepts are indeed noisy concepts. They also suffer from low-effectiveness since they collect training samples only from the existing taxonomy, which limits the ability of the model to mine more hypernym-hyponym relationships among real concepts.\nThis paper proposes a pluggable framework called Generative Adversarial Network for Taxonomy Entering Evaluation (GANTEE) to alleviate these drawbacks.\nA generative adversarial network is designed in this framework by discriminative models to alleviate the first drawback and the generative model to alleviate the second drawback.\nTwo discriminators are used in GANTEE to provide long-term and short-term rewards, respectively.\nMoreover, to further improve the efficiency, pre-trained language models are used to retrieve the representation of the concepts quickly.\nThe experiments on three real-world large-scale datasets with two different languages show that GANTEE improves the performance of the existing taxonomy expansion methods in both effectiveness and efficiency.", + "primary_area": "knowledge representation and reasoning", + "author": "Zhouhong Gu; Sihang Jiang; Jingping Liu; Yanghua Xiao; Hongwei Feng; Zhixu Li; Jiaqing Liang; Zhong Jian", + "authorids": "", + "aff": "Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, China; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, China; School of Information Science and Engineering, East China University of Science and Technology; Fudan-Aishu Cognitive Intelligence Joint Research Center; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, China; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, China; School of Data Science, Fudan University; HUAWEI CBG Edu AI Lab", + "bibtex": "@article{Gu_Jiang_Liu_Xiao_Feng_Li_Liang_Jian_2023, title={GANTEE: Generative Adversarial Network for Taxonomy Enterance Evaluation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25785}, DOI={10.1609/aaai.v37i5.25785}, abstractNote={Taxonomy is formulated as directed acyclic graphs or trees of concepts that support many downstream tasks.\nMany new coming concepts need to be added to an existing taxonomy.\nThe traditional taxonomy expansion task aims only at finding the best position for new coming concepts in the existing taxonomy. However, they have two drawbacks when being applied to the real-scenarios.\nThe previous methods suffer from low-efficiency since they waste much time when most of the new coming concepts are indeed noisy concepts. They also suffer from low-effectiveness since they collect training samples only from the existing taxonomy, which limits the ability of the model to mine more hypernym-hyponym relationships among real concepts.\nThis paper proposes a pluggable framework called Generative Adversarial Network for Taxonomy Entering Evaluation (GANTEE) to alleviate these drawbacks.\nA generative adversarial network is designed in this framework by discriminative models to alleviate the first drawback and the generative model to alleviate the second drawback.\nTwo discriminators are used in GANTEE to provide long-term and short-term rewards, respectively.\nMoreover, to further improve the efficiency, pre-trained language models are used to retrieve the representation of the concepts quickly.\nThe experiments on three real-world large-scale datasets with two different languages show that GANTEE improves the performance of the existing taxonomy expansion methods in both effectiveness and efficiency.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gu, Zhouhong and Jiang, Sihang and Liu, Jingping and Xiao, Yanghua and Feng, Hongwei and Li, Zhixu and Liang, Jiaqing and Jian, Zhong}, year={2023}, month={Jun.}, pages={6380-6388} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25785/25557", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25785", + "pdf_size": 2420289, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=974628033557857393&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "fudan.edu.cn;gmail.com;ecust.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;huawei.com", + "email": "fudan.edu.cn;gmail.com;ecust.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;0;0;0;0;2", + "aff_unique_norm": "Fudan University;East China University of Science and Technology;HUAWEI", + "aff_unique_dep": "School of Computer Science;School of Information Science and Engineering;Edu AI Lab", + "aff_unique_url": "https://www.fudan.edu.cn;http://www.ecust.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "Fudan;ECUST;HUAWEI", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26102", + "title": "GENNAPE: Towards Generalized Neural Architecture Performance Estimators", + "track": "main", + "status": "Technical", + "abstract": "Predicting neural architecture performance is a challenging task and is crucial to neural architecture design and search. Existing approaches either rely on neural performance predictors which are limited to modeling architectures in a predefined design space involving specific sets of operators and connection rules, and cannot generalize to unseen architectures, or resort to Zero-Cost Proxies which are not always accurate. In this paper, we propose GENNAPE, a Generalized Neural Architecture Performance Estimator, which is pretrained on open neural architecture benchmarks, and aims to generalize to completely unseen architectures through combined innovations in network representation, contrastive pretraining, and a fuzzy clustering-based predictor ensemble. Specifically, GENNAPE represents a given neural network as a Computation Graph (CG) of atomic operations which can model an arbitrary architecture. It first learns a graph encoder via Contrastive Learning to encourage network separation by topological features, and then trains multiple predictor heads, which are soft-aggregated according to the fuzzy membership of a neural network. Experiments show that GENNAPE pretrained on NAS-Bench-101 can achieve superior transferability to 5 different public neural network benchmarks, including NAS-Bench-201, NAS-Bench-301, MobileNet and ResNet families under no or minimum fine-tuning. We further introduce 3 challenging newly labelled neural network benchmarks: HiAML, Inception and Two-Path, which can concentrate in narrow accuracy ranges. Extensive experiments show that GENNAPE can correctly discern high-performance architectures in these families. Finally, when paired with a search algorithm, GENNAPE can find architectures that improve accuracy while reducing FLOPs on three families.", + "primary_area": "machine learning iii", + "author": "Keith G. Mills; Fred X. Han; Jialin Zhang; Fabian Chudak; Ali Safari Mamaghani; Mohammad Salameh; Wei Lu; Shangling Jui; Di Niu", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, University of Alberta + Huawei Technologies, Edmonton, Alberta, Canada; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Kirin Solution, Shanghai, China; Huawei Technologies, Edmonton, Alberta, Canada; Department of Electrical and Computer Engineering, University of Alberta + Huawei Technologies, Edmonton, Alberta, Canada; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Technologies, Edmonton, Alberta, Canada; Huawei Kirin Solution, Shanghai, China; Department of Electrical and Computer Engineering, University of Alberta", + "bibtex": "@article{Mills_Han_Zhang_Chudak_Safari Mamaghani_Salameh_Lu_Jui_Niu_2023, title={GENNAPE: Towards Generalized Neural Architecture Performance Estimators}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26102}, DOI={10.1609/aaai.v37i8.26102}, abstractNote={Predicting neural architecture performance is a challenging task and is crucial to neural architecture design and search. Existing approaches either rely on neural performance predictors which are limited to modeling architectures in a predefined design space involving specific sets of operators and connection rules, and cannot generalize to unseen architectures, or resort to Zero-Cost Proxies which are not always accurate. In this paper, we propose GENNAPE, a Generalized Neural Architecture Performance Estimator, which is pretrained on open neural architecture benchmarks, and aims to generalize to completely unseen architectures through combined innovations in network representation, contrastive pretraining, and a fuzzy clustering-based predictor ensemble. Specifically, GENNAPE represents a given neural network as a Computation Graph (CG) of atomic operations which can model an arbitrary architecture. It first learns a graph encoder via Contrastive Learning to encourage network separation by topological features, and then trains multiple predictor heads, which are soft-aggregated according to the fuzzy membership of a neural network. Experiments show that GENNAPE pretrained on NAS-Bench-101 can achieve superior transferability to 5 different public neural network benchmarks, including NAS-Bench-201, NAS-Bench-301, MobileNet and ResNet families under no or minimum fine-tuning. We further introduce 3 challenging newly labelled neural network benchmarks: HiAML, Inception and Two-Path, which can concentrate in narrow accuracy ranges. Extensive experiments show that GENNAPE can correctly discern high-performance architectures in these families. Finally, when paired with a search algorithm, GENNAPE can find architectures that improve accuracy while reducing FLOPs on three families.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mills, Keith G. and Han, Fred X. and Zhang, Jialin and Chudak, Fabian and Safari Mamaghani, Ali and Salameh, Mohammad and Lu, Wei and Jui, Shangling and Niu, Di}, year={2023}, month={Jun.}, pages={9190-9199} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26102/25874", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26102", + "pdf_size": 1149058, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9205154061983145083&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ualberta.ca;huawei.com;hisilicon.com;huawei.com;ualberta.ca;huawei.com;hisilicon.com;huawei.com;ualberta.ca", + "email": "ualberta.ca;huawei.com;hisilicon.com;huawei.com;ualberta.ca;huawei.com;hisilicon.com;huawei.com;ualberta.ca", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;1;2;1;0+1;1;1;2;0", + "aff_unique_norm": "University of Alberta;Huawei Technologies;Huawei", + "aff_unique_dep": "Department of Electrical and Computer Engineering;;Kirin Solution", + "aff_unique_url": "https://www.ualberta.ca;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "UAlberta;Huawei;Huawei", + "aff_campus_unique_index": "1;1;2;1;1;1;1;2", + "aff_campus_unique": ";Edmonton;Shanghai", + "aff_country_unique_index": "0+0;0;1;0;0+0;0;0;1;0", + "aff_country_unique": "Canada;China" + }, + { + "id": "article-25559", + "title": "GLCC: A General Framework for Graph-Level Clustering", + "track": "main", + "status": "Technical", + "abstract": "This paper studies the problem of graph-level clustering, which is a novel yet challenging task. This problem is critical in a variety of real-world applications such as protein clustering and genome analysis in bioinformatics. Recent years have witnessed the success of deep clustering coupled with graph neural networks (GNNs). However, existing methods focus on clustering among nodes given a single graph, while exploring clustering on multiple graphs is still under-explored. In this paper, we propose a general graph-level clustering framework named Graph-Level Contrastive Clustering (GLCC) given multiple graphs. Specifically, GLCC first constructs an adaptive affinity graph to explore instance- and cluster-level contrastive learning (CL). Instance-level CL leverages graph Laplacian based contrastive loss to learn clustering-friendly representations while cluster-level CL captures discriminative cluster representations incorporating neighbor information of each sample. Moreover, we utilize neighbor-aware pseudo-labels to reward the optimization of representation learning. The two steps can be alternatively trained to collaborate and benefit each other. Experiments on a range of well-known datasets demonstrate the superiority of our proposed GLCC over competitive baselines.", + "primary_area": "data mining and knowledge management", + "author": "Wei Ju; Yiyang Gu; Binqi Chen; Gongbo Sun; Yifang Qin; Xingyuming Liu; Xiao Luo; Ming Zhang", + "authorids": "", + "aff": "School of Computer Science, Peking University, China; School of Computer Science, Peking University, China; School of EECS, Peking University, China; Beijing National Day School, China; School of EECS, Peking University, China; School of EECS, Peking University, China; Department of Computer Science, University of California Los Angeles, USA; School of Computer Science, Peking University, China", + "bibtex": "@article{Ju_Gu_Chen_Sun_Qin_Liu_Luo_Zhang_2023, title={GLCC: A General Framework for Graph-Level Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25559}, DOI={10.1609/aaai.v37i4.25559}, abstractNote={This paper studies the problem of graph-level clustering, which is a novel yet challenging task. This problem is critical in a variety of real-world applications such as protein clustering and genome analysis in bioinformatics. Recent years have witnessed the success of deep clustering coupled with graph neural networks (GNNs). However, existing methods focus on clustering among nodes given a single graph, while exploring clustering on multiple graphs is still under-explored. In this paper, we propose a general graph-level clustering framework named Graph-Level Contrastive Clustering (GLCC) given multiple graphs. Specifically, GLCC first constructs an adaptive affinity graph to explore instance- and cluster-level contrastive learning (CL). Instance-level CL leverages graph Laplacian based contrastive loss to learn clustering-friendly representations while cluster-level CL captures discriminative cluster representations incorporating neighbor information of each sample. Moreover, we utilize neighbor-aware pseudo-labels to reward the optimization of representation learning. The two steps can be alternatively trained to collaborate and benefit each other. Experiments on a range of well-known datasets demonstrate the superiority of our proposed GLCC over competitive baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ju, Wei and Gu, Yiyang and Chen, Binqi and Sun, Gongbo and Qin, Yifang and Liu, Xingyuming and Luo, Xiao and Zhang, Ming}, year={2023}, month={Jun.}, pages={4391-4399} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25559/25331", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25559", + "pdf_size": 396256, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5570570228084123768&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 9, + "aff_domain": "pku.edu.cn;pku.edu.cn;gmail.com;163.com;pku.edu.cn;pku.edu.cn;cs.ucla.edu;cs.pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;gmail.com;163.com;pku.edu.cn;pku.edu.cn;cs.ucla.edu;cs.pku.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;0;0;2;0", + "aff_unique_norm": "Peking University;Beijing National Day School;University of California, Los Angeles", + "aff_unique_dep": "School of Computer Science;;Department of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn;;https://www.ucla.edu", + "aff_unique_abbr": "Peking U;;UCLA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25287", + "title": "GLT-T: Global-Local Transformer Voting for 3D Single Object Tracking in Point Clouds", + "track": "main", + "status": "Technical", + "abstract": "Current 3D single object tracking methods are typically based on VoteNet, a 3D region proposal network. Despite the success, using a single seed point feature as the cue for offset learning in VoteNet prevents high-quality 3D proposals from being generated. Moreover, seed points with different importance are treated equally in the voting process, aggravating this defect. To address these issues, we propose a novel global-local transformer voting scheme to provide more informative cues and guide the model pay more attention on potential seed points, promoting the generation of high-quality 3D proposals. Technically, a global-local transformer (GLT) module is employed to integrate object- and patch-aware prior into seed point features to effectively form strong feature representation for geometric positions of the seed points, thus providing more robust and accurate cues for offset learning. Subsequently, a simple yet effective training strategy is designed to train the GLT module. We develop an importance prediction branch to learn the potential importance of the seed points and treat the output weights vector as a training constraint term. By incorporating the above components together, we exhibit a superior tracking method GLT-T. Extensive experiments on challenging KITTI and NuScenes benchmarks demonstrate that GLT-T achieves state-of-the-art performance in the 3D single object tracking task. Besides, further ablation studies show the advantages of the proposed global-local transformer voting scheme over the original VoteNet. Code and models will be available at https://github.com/haooozi/GLT-T.", + "primary_area": "computer vision ii", + "author": "Jiahao Nie; Zhiwei He; Yuxiang Yang; Mingyu Gao; Jing Zhang", + "authorids": "", + "aff": "School of Electronics and Information, Hangzhou Dianzi University, China; School of Electronics and Information, Hangzhou Dianzi University, China; School of Electronics and Information, Hangzhou Dianzi University, China; School of Electronics and Information, Hangzhou Dianzi University, China; School of Computer Science, The University of Sydney, Australia", + "bibtex": "@article{Nie_He_Yang_Gao_Zhang_2023, title={GLT-T: Global-Local Transformer Voting for 3D Single Object Tracking in Point Clouds}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25287}, DOI={10.1609/aaai.v37i2.25287}, abstractNote={Current 3D single object tracking methods are typically based on VoteNet, a 3D region proposal network. Despite the success, using a single seed point feature as the cue for offset learning in VoteNet prevents high-quality 3D proposals from being generated. Moreover, seed points with different importance are treated equally in the voting process, aggravating this defect. To address these issues, we propose a novel global-local transformer voting scheme to provide more informative cues and guide the model pay more attention on potential seed points, promoting the generation of high-quality 3D proposals. Technically, a global-local transformer (GLT) module is employed to integrate object- and patch-aware prior into seed point features to effectively form strong feature representation for geometric positions of the seed points, thus providing more robust and accurate cues for offset learning. Subsequently, a simple yet effective training strategy is designed to train the GLT module. We develop an importance prediction branch to learn the potential importance of the seed points and treat the output weights vector as a training constraint term. By incorporating the above components together, we exhibit a superior tracking method GLT-T. Extensive experiments on challenging KITTI and NuScenes benchmarks demonstrate that GLT-T achieves state-of-the-art performance in the 3D single object tracking task. Besides, further ablation studies show the advantages of the proposed global-local transformer voting scheme over the original VoteNet. Code and models will be available at https://github.com/haooozi/GLT-T.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nie, Jiahao and He, Zhiwei and Yang, Yuxiang and Gao, Mingyu and Zhang, Jing}, year={2023}, month={Jun.}, pages={1957-1965} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25287/25059", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25287", + "pdf_size": 874194, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8154832060820736083&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;sydney.edu.au", + "email": "hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;sydney.edu.au", + "github": "https://github.com/haooozi/GLT-T", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Hangzhou Dianzi University;The University of Sydney", + "aff_unique_dep": "School of Electronics and Information;School of Computer Science", + "aff_unique_url": "http://www.hdu.edu.cn/;https://www.sydney.edu.au", + "aff_unique_abbr": ";USYD", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Sydney", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26143", + "title": "GLUECons: A Generic Benchmark for Learning under Constraints", + "track": "main", + "status": "Technical", + "abstract": "Recent research has shown that integrating domain knowledge into deep learning architectures is effective; It helps reduce the amount of required data, improves the accuracy of the models' decisions, and improves the interpretability of models. However, the research community lacks a convened benchmark for systematically evaluating knowledge integration methods.\nIn this work, we create a benchmark that is a collection of nine tasks in the domains of natural language processing and computer vision. In all cases, we model external knowledge as constraints, specify the sources of the constraints for each task, and implement various models that use these constraints.\nWe report the results of these models using a new set of extended evaluation criteria in addition to the task performances for a more in-depth analysis. This effort provides a framework for a more comprehensive and systematic comparison of constraint integration techniques and for identifying related research challenges. It will facilitate further research for alleviating some problems of state-of-the-art neural models.", + "primary_area": "machine learning iii", + "author": "Hossein Rajaby Faghihi; Aliakbar Nafar; Chen Zheng; Roshanak Mirzaee; Yue Zhang; Andrzej Uszok; Alexander Wan; Tanawan Premsri; Dan Roth; Parisa Kordjamshidi", + "authorids": "", + "aff": "Michigan State University; Michigan State University; Michigan State University; Michigan State University; Michigan State University; Florida Institute for Human and Machine Cognition; University of California Berkeley+Michigan State University; Michigan State University; University of Pennsylvania; Michigan State University", + "bibtex": "@article{Rajaby Faghihi_Nafar_Zheng_Mirzaee_Zhang_Uszok_Wan_Premsri_Roth_Kordjamshidi_2023, title={GLUECons: A Generic Benchmark for Learning under Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26143}, DOI={10.1609/aaai.v37i8.26143}, abstractNote={Recent research has shown that integrating domain knowledge into deep learning architectures is effective; It helps reduce the amount of required data, improves the accuracy of the models\u2019 decisions, and improves the interpretability of models. However, the research community lacks a convened benchmark for systematically evaluating knowledge integration methods.\nIn this work, we create a benchmark that is a collection of nine tasks in the domains of natural language processing and computer vision. In all cases, we model external knowledge as constraints, specify the sources of the constraints for each task, and implement various models that use these constraints.\nWe report the results of these models using a new set of extended evaluation criteria in addition to the task performances for a more in-depth analysis. This effort provides a framework for a more comprehensive and systematic comparison of constraint integration techniques and for identifying related research challenges. It will facilitate further research for alleviating some problems of state-of-the-art neural models.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rajaby Faghihi, Hossein and Nafar, Aliakbar and Zheng, Chen and Mirzaee, Roshanak and Zhang, Yue and Uszok, Andrzej and Wan, Alexander and Premsri, Tanawan and Roth, Dan and Kordjamshidi, Parisa}, year={2023}, month={Jun.}, pages={9552-9561} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26143/25915", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26143", + "pdf_size": 165342, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8346605199463444518&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "msu.com;msu.com;msu.com;msu.com;msu.com;ihmc.org;berkeley.edu;msu.com;seas.upenn.edu;msu.com", + "email": "msu.com;msu.com;msu.com;msu.com;msu.com;ihmc.org;berkeley.edu;msu.com;seas.upenn.edu;msu.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;1;2+0;0;3;0", + "aff_unique_norm": "Michigan State University;Florida Institute for Human and Machine Cognition;University of California, Berkeley;University of Pennsylvania", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.msu.edu;https://www.ihmc.us;https://www.berkeley.edu;https://www.upenn.edu", + "aff_unique_abbr": "MSU;IHMC;UC Berkeley;UPenn", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Berkeley", + "aff_country_unique_index": "0;0;0;0;0;0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25578", + "title": "GMDNet: A Graph-Based Mixture Density Network for Estimating Packages\u2019 Multimodal Travel Time Distribution", + "track": "main", + "status": "Technical", + "abstract": "", + "primary_area": "data mining and knowledge management", + "author": "Xiaowei Mao; Huaiyu Wan; Haomin Wen; Fan Wu; Jianbin Zheng; Yuting Qiang; Shengnan Guo; Lixia Wu; Haoyuan Hu; Youfang Lin", + "authorids": "", + "aff": ";;;;;;;;;", + "bibtex": "@article{Mao_Wan_Wen_Wu_Zheng_Qiang_Guo_Wu_Hu_Lin_2024, title={GMDNet: A Graph-Based Mixture Density Network for Estimating Packages\u2019 Multimodal Travel Time Distribution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25578}, DOI={10.1609/aaai.v37i4.25578}, abstractNote={<p>In the logistics network, accurately estimating packages\u2019 Travel Time Distribution (TTD) given the routes greatly benefits both consumers and platforms. Although recent works perform well in predicting an expected time or a time distribution in a road network, they could not be well applied to estimate TTD in logistics networks. Because TTD prediction in the logistics network requires modeling packages\u2019 multimodal TTD (MTTD, i.e., there can be more than one likely output with a given input) while leveraging the complex correlations in the logistics network. To this end, this work opens appealing research opportunities in studying MTTD learning conditioned on graph-structure data by investigating packages\u2019 travel time distribution in the logistics network. We propose a Graph-based Mixture Density Network, named GMDNet, which takes the benefits of both graph neural network and mixture density network for estimating MTTD conditioned on graph-structure data (i.e., the logistics network). Furthermore, we adopt the Expectation-Maximization (EM) framework in the training process to guarantee local convergence and thus obtain more stable results than gradient descent. Extensive experiments on two real-world datasets demonstrate the superiority of our proposed model.</p>\n<p><strong>Corrigendum Notice</strong></p>\n<p>In the initial publication of this article, the authors\u00a0<span style="margin: 0px; padding: 0px;">(Mao et al. 2023) acknowledged that although it referred to an earlier paper already presented and published in ICML-21 (Errica et al. 2021), it insufficiently acknowledged the extent to which it incorporated and made extensive use of techniques therein. We are providing a\u00a0<em>Corrigendum Note</em>, "PDF (2024-09-25),"</span>\u00a0alongside the original published version. The <em>Corrigendum Note</em> summarizes the main novel contributions of this paper.</p>\n<p>Errica, F.; Bacciu, D.; and Micheli, A. 2021. Graph Mixture Density Networks. In Proceedings of the 38th International Conference on Machine Learning (PMLR-28), 3025\u20133035. PMLR.<br /><br />Mao, X.; Wan, H.; Wen, H.; Wu, F.; Zheng, J.; Qiang, Y.; Guo, S.; Wu, L.; Hu, H.; and Lin, Y. 2023. GMDNet: A Graph-Based Mixture Density Network for Estimating Packages\u2019 Multimodal Travel Time Distribution. In Proceedings of the 37th AAAI Conference on Artificial Intelligence.</p>}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Xiaowei and Wan, Huaiyu and Wen, Haomin and Wu, Fan and Zheng, Jianbin and Qiang, Yuting and Guo, Shengnan and Wu, Lixia and Hu, Haoyuan and Lin, Youfang}, year={2024}, month={Sep.}, pages={4561-4568} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25578/33778", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25578", + "pdf_size": 939000, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8085837359985723619&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10 + }, + { + "id": "article-26298", + "title": "GOHSP: A Unified Framework of Graph and Optimization-Based Heterogeneous Structured Pruning for Vision Transformer", + "track": "main", + "status": "Technical", + "abstract": "The recently proposed Vision transformers (ViTs) have shown\nvery impressive empirical performance in various computer vision tasks,\nand they are viewed as an important type of foundation model. However, ViTs are typically constructed with large-scale sizes, which then\nseverely hinder their potential deployment in many practical resources constrained applications. \nTo mitigate this challenging problem, structured pruning is a promising solution to compress model size and enable\npractical efficiency. However, unlike its current popularity for CNNs and\nRNNs, structured pruning for ViT models is little explored.\nIn this paper, we propose GOHSP, a unified framework of Graph and\nOptimization-based Structured Pruning for ViT models. We first develop\na graph-based ranking for measuring the importance of attention heads,\nand the extracted importance information is further integrated to an\noptimization-based procedure to impose the heterogeneous structured\nsparsity patterns on the ViT models. Experimental results show that\nour proposed GOHSP demonstrates excellent compression performance.\nOn CIFAR-10 dataset, our approach can bring 40% parameters reduction\nwith no accuracy loss for ViT-Small model. On ImageNet dataset, with\n30% and 35% sparsity ratio for DeiT-Tiny and DeiT-Small models, our\napproach achieves 1.65% and 0.76% accuracy increase over the existing\nstructured pruning methods, respectively.", + "primary_area": "machine learning iv", + "author": "Miao Yin; Burak Uzkent; Yilin Shen; Hongxia Jin; Bo Yuan", + "authorids": "", + "aff": "Rutgers University; Samsung Research America; Samsung Research America; Samsung Research America; Rutgers University", + "bibtex": "@article{Yin_Uzkent_Shen_Jin_Yuan_2023, title={GOHSP: A Unified Framework of Graph and Optimization-Based Heterogeneous Structured Pruning for Vision Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26298}, DOI={10.1609/aaai.v37i9.26298}, abstractNote={The recently proposed Vision transformers (ViTs) have shown\nvery impressive empirical performance in various computer vision tasks,\nand they are viewed as an important type of foundation model. However, ViTs are typically constructed with large-scale sizes, which then\nseverely hinder their potential deployment in many practical resources constrained applications. To mitigate this challenging problem, structured pruning is a promising solution to compress model size and enable\npractical efficiency. However, unlike its current popularity for CNNs and\nRNNs, structured pruning for ViT models is little explored.\nIn this paper, we propose GOHSP, a unified framework of Graph and\nOptimization-based Structured Pruning for ViT models. We first develop\na graph-based ranking for measuring the importance of attention heads,\nand the extracted importance information is further integrated to an\noptimization-based procedure to impose the heterogeneous structured\nsparsity patterns on the ViT models. Experimental results show that\nour proposed GOHSP demonstrates excellent compression performance.\nOn CIFAR-10 dataset, our approach can bring 40% parameters reduction\nwith no accuracy loss for ViT-Small model. On ImageNet dataset, with\n30% and 35% sparsity ratio for DeiT-Tiny and DeiT-Small models, our\napproach achieves 1.65% and 0.76% accuracy increase over the existing\nstructured pruning methods, respectively.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yin, Miao and Uzkent, Burak and Shen, Yilin and Jin, Hongxia and Yuan, Bo}, year={2023}, month={Jun.}, pages={10954-10962} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26298/26070", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26298", + "pdf_size": 353808, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17825768624622331238&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "rutgers.edu;samsung.com;samsung.com;samsung.com;rutgers.edu", + "email": "rutgers.edu;samsung.com;samsung.com;samsung.com;rutgers.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "Rutgers University;Samsung Research America", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.rutgers.edu;https://www.samsung.com/us/careers/research/", + "aff_unique_abbr": "Rutgers;SRA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25169", + "title": "GPTR: Gestalt-Perception Transformer for Diagram Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Diagram object detection is the key basis of practical applications such as textbook question answering. Because the diagram mainly consists of simple lines and color blocks, its visual features are sparser than those of natural images. In addition, diagrams usually express diverse knowledge, in which there are many low-frequency object categories in diagrams. These lead to the fact that traditional data-driven detection model is not suitable for diagrams. In this work, we propose a gestalt-perception transformer model for diagram object detection, which is based on an encoder-decoder architecture. Gestalt perception contains a series of laws to explain human perception, that the human visual system tends to perceive patches in an image that are similar, close or connected without abrupt directional changes as a perceptual whole object. Inspired by these thoughts, we build a gestalt-perception graph in transformer encoder, which is composed of diagram patches as nodes and the relationships between patches as edges. This graph aims to group these patches into objects via laws of similarity, proximity, and smoothness implied in these edges, so that the meaningful objects can be effectively detected. The experimental results demonstrate that the proposed GPTR achieves the best results in the diagram object detection task. Our model also obtains comparable results over the competitors in natural image object detection.", + "primary_area": "computer vision i", + "author": "Xin Hu; Lingling Zhang; Jun Liu; Jinfu Fan; Yang You; Yaqiang Wu", + "authorids": "", + "aff": "Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering, School of Computer Science and Technology, Xi\u2019an Jiaotong University, China+National Engineering Lab for Big Data Analytics, Xi\u2019an Jiaotong University, China; Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering, School of Computer Science and Technology, Xi\u2019an Jiaotong University, China+National Engineering Lab for Big Data Analytics, Xi\u2019an Jiaotong University, China; Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering, School of Computer Science and Technology, Xi\u2019an Jiaotong University, China+National Engineering Lab for Big Data Analytics, Xi\u2019an Jiaotong University, China; Department of Control Science and Engineering, Tongji University, Shanghai, China; Department of Computer Science, National University of Singapore, Singapore; Lenovo Research, Beijing, China", + "bibtex": "@article{Hu_Zhang_Liu_Fan_You_Wu_2023, title={GPTR: Gestalt-Perception Transformer for Diagram Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25169}, DOI={10.1609/aaai.v37i1.25169}, abstractNote={Diagram object detection is the key basis of practical applications such as textbook question answering. Because the diagram mainly consists of simple lines and color blocks, its visual features are sparser than those of natural images. In addition, diagrams usually express diverse knowledge, in which there are many low-frequency object categories in diagrams. These lead to the fact that traditional data-driven detection model is not suitable for diagrams. In this work, we propose a gestalt-perception transformer model for diagram object detection, which is based on an encoder-decoder architecture. Gestalt perception contains a series of laws to explain human perception, that the human visual system tends to perceive patches in an image that are similar, close or connected without abrupt directional changes as a perceptual whole object. Inspired by these thoughts, we build a gestalt-perception graph in transformer encoder, which is composed of diagram patches as nodes and the relationships between patches as edges. This graph aims to group these patches into objects via laws of similarity, proximity, and smoothness implied in these edges, so that the meaningful objects can be effectively detected. The experimental results demonstrate that the proposed GPTR achieves the best results in the diagram object detection task. Our model also obtains comparable results over the competitors in natural image object detection.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Xin and Zhang, Lingling and Liu, Jun and Fan, Jinfu and You, Yang and Wu, Yaqiang}, year={2023}, month={Jun.}, pages={899-907} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25169/24941", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25169", + "pdf_size": 5409323, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1002854295808455483&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "foxmail.com; fzhanglling;xjtu.edu.cn;tongji.edu.cn;comp.nus.edu.sg;lenovo.com", + "email": "foxmail.com; fzhanglling;xjtu.edu.cn;tongji.edu.cn;comp.nus.edu.sg;lenovo.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;1;2;3", + "aff_unique_norm": "Xi'an Jiaotong University;Tongji University;National University of Singapore;Lenovo Research", + "aff_unique_dep": "School of Computer Science and Technology;Department of Control Science and Engineering;Department of Computer Science;", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.tongji.edu.cn;https://www.nus.edu.sg;https://www.lenovo.com", + "aff_unique_abbr": "XJTU;Tongji;NUS;Lenovo", + "aff_campus_unique_index": "0;0;0;2;3", + "aff_campus_unique": "Xi'an;;Shanghai;Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26457", + "title": "GRASMOS: Graph Signage Model Selection for Gene Regulatory Networks", + "track": "main", + "status": "Technical", + "abstract": "Signed networks (networks with positive and negative edges) commonly arise in various domains from molecular biology to social media. \nThe edge signs -- i.e., the graph signage -- represent the interaction pattern between the vertices and can provide insights into the underlying system formation process. Generative models considering signage formation are essential for testing hypotheses about the emergence of interactions and for creating synthetic datasets for algorithm benchmarking (especially in areas where obtaining real-world datasets is difficult).\n\nIn this work, we pose a novel Maximum-Likelihood-based optimization problem for modeling signages given their topology and showcase it in the context of gene regulation. Regulatory interactions of genes play a key role in the process of organism development, and when broken can lead to serious organism abnormalities and diseases. Our contributions are threefold: First, we design a new class of signage models for a given topology, and, based on the parameter setting, we discuss its biological interpretations for gene regulatory networks (GRNs). Second, we design algorithms computing the Maximum Likelihood -- depending on the parameter setting, our algorithms range from closed-form expressions to MCMC sampling. Third, we evaluated the results of our algorithms on synthetic datasets and real-world large GRNs. Our work can lead to the prediction of unknown gene regulations, novel biological hypotheses, and realistic benchmark datasets in the realm of gene regulation.", + "primary_area": "search and optimization", + "author": "Angelina Brilliantova; Hannah Miller; Ivona Bez\u00e1kov\u00e1", + "authorids": "", + "aff": "Rochester Institute of Technology; Rochester Institute of Technology; Rochester Institute of Technology", + "bibtex": "@article{Brilliantova_Miller_Bez\u00e1kov\u00e1_2023, title={GRASMOS: Graph Signage Model Selection for Gene Regulatory Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26457}, DOI={10.1609/aaai.v37i10.26457}, abstractNote={Signed networks (networks with positive and negative edges) commonly arise in various domains from molecular biology to social media. The edge signs -- i.e., the graph signage -- represent the interaction pattern between the vertices and can provide insights into the underlying system formation process. Generative models considering signage formation are essential for testing hypotheses about the emergence of interactions and for creating synthetic datasets for algorithm benchmarking (especially in areas where obtaining real-world datasets is difficult). In this work, we pose a novel Maximum-Likelihood-based optimization problem for modeling signages given their topology and showcase it in the context of gene regulation. Regulatory interactions of genes play a key role in the process of organism development, and when broken can lead to serious organism abnormalities and diseases. Our contributions are threefold: First, we design a new class of signage models for a given topology, and, based on the parameter setting, we discuss its biological interpretations for gene regulatory networks (GRNs). Second, we design algorithms computing the Maximum Likelihood -- depending on the parameter setting, our algorithms range from closed-form expressions to MCMC sampling. Third, we evaluated the results of our algorithms on synthetic datasets and real-world large GRNs. Our work can lead to the prediction of unknown gene regulations, novel biological hypotheses, and realistic benchmark datasets in the realm of gene regulation.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brilliantova, Angelina and Miller, Hannah and Bez\u00e1kov\u00e1, Ivona}, year={2023}, month={Jun.}, pages={12364-12372} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26457/26229", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26457", + "pdf_size": 743407, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:4aG05KQHmrMJ:scholar.google.com/&scioq=GRASMOS:+Graph+Signage+Model+Selection+for+Gene+Regulatory+Networks&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff_domain": "rit.edu;mail.rit.edu;cs.rit.edu", + "email": "rit.edu;mail.rit.edu;cs.rit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25645", + "title": "GRIP: Graph Representation of Immune Repertoire Using Graph Neural Network and Transformer", + "track": "main", + "status": "Technical", + "abstract": "The immune repertoire is a collection of immune recep-tors that has emerged as an important biomarker for both diagnostic and therapeutic of cancer patients. In terms of deep learning, analyzing immune repertoire is a challeng-ing multiple-instance learning problem in which the im-mune repertoire of an individual is a bag, and the immune receptor is an instance. Although several deep learning methods for immune repertoire analysis are introduced, they consider the immune repertoire as a set-like struc-ture that doesn\u2019t take account of the nature of the im-mune response. When the immune response occurs, mu-tations are introduced to the immune receptor sequence sequentially to optimize the immune response against the pathogens that enter our body. As a result, immune receptors for the specific pathogen have the lineage of evolution; thus, immune repertoire is better represented as a graph-like structure. In this work, we present our novel method graph representation of immune repertoire (GRIP), which analyzes the immune repertoire as a hier-archical graph structure and utilize the collection of graph neural network followed by graph pooling and transformer to efficiently represents the immune reper-toire as an embedding vector. We show that GRIP predict the survival probability of cancer patients better than the set-based methods and graph-based structure is critical for performance. Also, GRIP provides interpretable re-sults, which prove that GRIP adequately use the progno-sis-related immune receptor and give further possibility to use the GRIP as the novel biomarker searching tool", + "primary_area": "domain s of application", + "author": "Yongju Lee; Hyunho Lee; Kyoungseob Shin; Sunghoon Kwon", + "authorids": "", + "aff": "Department of Electronic and Computer Engineering, Seoul National University; Department of Electronic and Computer Engineering, Seoul National University; Department of Electronic and Computer Engineering, Seoul National University; Department of Electronic and Computer Engineering, Seoul National University + Bio-Max Institute, Seoul National University", + "bibtex": "@article{Lee_Lee_Shin_Kwon_2023, title={GRIP: Graph Representation of Immune Repertoire Using Graph Neural Network and Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25645}, DOI={10.1609/aaai.v37i4.25645}, abstractNote={The immune repertoire is a collection of immune recep-tors that has emerged as an important biomarker for both diagnostic and therapeutic of cancer patients. In terms of deep learning, analyzing immune repertoire is a challeng-ing multiple-instance learning problem in which the im-mune repertoire of an individual is a bag, and the immune receptor is an instance. Although several deep learning methods for immune repertoire analysis are introduced, they consider the immune repertoire as a set-like struc-ture that doesn\u2019t take account of the nature of the im-mune response. When the immune response occurs, mu-tations are introduced to the immune receptor sequence sequentially to optimize the immune response against the pathogens that enter our body. As a result, immune receptors for the specific pathogen have the lineage of evolution; thus, immune repertoire is better represented as a graph-like structure. In this work, we present our novel method graph representation of immune repertoire (GRIP), which analyzes the immune repertoire as a hier-archical graph structure and utilize the collection of graph neural network followed by graph pooling and transformer to efficiently represents the immune reper-toire as an embedding vector. We show that GRIP predict the survival probability of cancer patients better than the set-based methods and graph-based structure is critical for performance. Also, GRIP provides interpretable re-sults, which prove that GRIP adequately use the progno-sis-related immune receptor and give further possibility to use the GRIP as the novel biomarker searching tool}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Yongju and Lee, Hyunho and Shin, Kyoungseob and Kwon, Sunghoon}, year={2023}, month={Jun.}, pages={5160-5168} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25645/25417", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25645", + "pdf_size": 1284127, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11807362759504411172&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+0", + "aff_unique_norm": "Seoul National University", + "aff_unique_dep": "Department of Electronic and Computer Engineering", + "aff_unique_url": "https://www.snu.ac.kr", + "aff_unique_abbr": "SNU", + "aff_campus_unique_index": "0;0;0;0+0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25624", + "title": "GRLSTM: Trajectory Similarity Computation with Graph-Based Residual LSTM", + "track": "main", + "status": "Technical", + "abstract": "The computation of trajectory similarity is a crucial task in many spatial data analysis applications. However, existing methods have been designed primarily for trajectories in Euclidean space, which overlooks the fact that real-world trajectories are often generated on road networks. This paper addresses this gap by proposing a novel framework, called GRLSTM (Graph-based Residual LSTM). To jointly capture the properties of trajectories and road networks, the proposed framework incorporates knowledge graph embedding (KGE), graph neural network (GNN), and the residual network into the multi-layer LSTM (Residual-LSTM). Specifically, the framework constructs a point knowledge graph to study the multi-relation of points, as points may belong to both the trajectory and the road network. KGE is introduced to learn point embeddings and relation embeddings to build the point fusion graph, while GNN is used to capture the topology structure information of the point fusion graph. Finally, Residual-LSTM is used to learn the trajectory embeddings.To further enhance the accuracy and robustness of the final trajectory embeddings, we introduce two new neighbor-based point loss functions, namely, graph-based point loss function and trajectory-based point loss function. The GRLSTM is evaluated using two real-world trajectory datasets, and the experimental results demonstrate that GRLSTM outperforms all the state-of-the-art methods significantly.", + "primary_area": "data mining and knowledge management", + "author": "Silin Zhou; Jing Li; Hao Wang; Shuo Shang; Peng Han", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; Harbin Institute of Technology, Shenzhen, China; School of Computer Science, Wuhan University, China; University of Electronic Science and Technology of China + Sichuan Artificial Intelligence Research Institute, Yibin, 644000, China; University of Electronic Science and Technology of China + Sichuan Artificial Intelligence Research Institute, Yibin, 644000, China", + "bibtex": "@article{Zhou_Li_Wang_Shang_Han_2023, title={GRLSTM: Trajectory Similarity Computation with Graph-Based Residual LSTM}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25624}, DOI={10.1609/aaai.v37i4.25624}, abstractNote={The computation of trajectory similarity is a crucial task in many spatial data analysis applications. However, existing methods have been designed primarily for trajectories in Euclidean space, which overlooks the fact that real-world trajectories are often generated on road networks. This paper addresses this gap by proposing a novel framework, called GRLSTM (Graph-based Residual LSTM). To jointly capture the properties of trajectories and road networks, the proposed framework incorporates knowledge graph embedding (KGE), graph neural network (GNN), and the residual network into the multi-layer LSTM (Residual-LSTM). Specifically, the framework constructs a point knowledge graph to study the multi-relation of points, as points may belong to both the trajectory and the road network. KGE is introduced to learn point embeddings and relation embeddings to build the point fusion graph, while GNN is used to capture the topology structure information of the point fusion graph. Finally, Residual-LSTM is used to learn the trajectory embeddings.To further enhance the accuracy and robustness of the final trajectory embeddings, we introduce two new neighbor-based point loss functions, namely, graph-based point loss function and trajectory-based point loss function. The GRLSTM is evaluated using two real-world trajectory datasets, and the experimental results demonstrate that GRLSTM outperforms all the state-of-the-art methods significantly.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Silin and Li, Jing and Wang, Hao and Shang, Shuo and Han, Peng}, year={2023}, month={Jun.}, pages={4972-4980} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25624/25396", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25624", + "pdf_size": 278960, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13270412346944445258&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;hotmail.com;gmail.com;gmail.com;hotmail.com", + "email": "gmail.com;hotmail.com;gmail.com;gmail.com;hotmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0+3;0+3", + "aff_unique_norm": "University of Electronic Science and Technology of China;Harbin Institute of Technology;Wuhan University;Sichuan Artificial Intelligence Research Institute", + "aff_unique_dep": ";;School of Computer Science;", + "aff_unique_url": "https://www.uestc.edu.cn;http://en.hhit.edu.cn/;http://www.whu.edu.cn;", + "aff_unique_abbr": "UESTC;HIT;WHU;", + "aff_campus_unique_index": "1;2;3;3", + "aff_campus_unique": ";Shenzhen;Wuhan;Yibin", + "aff_country_unique_index": "0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25690", + "title": "Game Implementation: What Are the Obstructions?", + "track": "main", + "status": "Technical", + "abstract": "In many applications, we want to influence the decisions of independent agents by designing incentives for their actions. We revisit a fundamental problem in this area, called GAME IMPLEMENTATION: Given a game in standard form and a set of desired strategies, can we design a set of payment promises such that if the players take the payment promises into account, then all undominated strategies are desired? Furthermore, we aim to minimize the cost, that is, the worst-case amount of payments.\n\nWe study the tractability of computing such payment promises and determine more closely what obstructions we may have to overcome in doing so. We show that GAME IMPLEMENTATION is NP-hard even for two players, solving in particular a long-standing open question and suggesting more restrictions are necessary to obtain tractability results. We thus study the regime in which players have only a small constant number of strategies and obtain the following. First, this case remains NP-hard even if each player\u2019s utility depends only on three others. Second, we repair a flawed efficient algorithm for the case of both small number of strategies and small number of players. Among further results, we characterize sets of desired strategies that can be implemented at zero cost as a generalization of Nash equilibria.", + "primary_area": "game theory and economic paradigms", + "author": "Jiehua Chen; Seyedeh Negar Layegh Khavidaki; Sebastian Vincent Haydn; Sofia Simola; Manuel Sorge", + "authorids": "", + "aff": "TU Wien, Austria; TU Wien, Austria; TU Wien, Austria; TU Wien, Austria; TU Wien, Austria", + "bibtex": "@article{Chen_Layegh Khavidaki_Haydn_Simola_Sorge_2023, title={Game Implementation: What Are the Obstructions?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25690}, DOI={10.1609/aaai.v37i5.25690}, abstractNote={In many applications, we want to influence the decisions of independent agents by designing incentives for their actions. We revisit a fundamental problem in this area, called GAME IMPLEMENTATION: Given a game in standard form and a set of desired strategies, can we design a set of payment promises such that if the players take the payment promises into account, then all undominated strategies are desired? Furthermore, we aim to minimize the cost, that is, the worst-case amount of payments. We study the tractability of computing such payment promises and determine more closely what obstructions we may have to overcome in doing so. We show that GAME IMPLEMENTATION is NP-hard even for two players, solving in particular a long-standing open question and suggesting more restrictions are necessary to obtain tractability results. We thus study the regime in which players have only a small constant number of strategies and obtain the following. First, this case remains NP-hard even if each player\u2019s utility depends only on three others. Second, we repair a flawed efficient algorithm for the case of both small number of strategies and small number of players. Among further results, we characterize sets of desired strategies that can be implemented at zero cost as a generalization of Nash equilibria.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jiehua and Layegh Khavidaki, Seyedeh Negar and Haydn, Sebastian Vincent and Simola, Sofia and Sorge, Manuel}, year={2023}, month={Jun.}, pages={5557-5564} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25690/25462", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25690", + "pdf_size": 182808, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:sLmlcC5ZQLUJ:scholar.google.com/&scioq=Game+Implementation:+What+Are+the+Obstructions%3F&hl=en&as_sdt=0,5", + "gs_version_total": 8, + "aff_domain": "tuwien.ac.at;tuwien.ac.at;student.tuwien.ac.at;tuwien.ac.at;tuwien.ac.at", + "email": "tuwien.ac.at;tuwien.ac.at;student.tuwien.ac.at;tuwien.ac.at;tuwien.ac.at", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Technische Universit\u00e4t Wien", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tuwien.ac.at", + "aff_unique_abbr": "TU Wien", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Austria" + }, + { + "id": "article-25784", + "title": "General Acyclicity and Cyclicity Notions for the Disjunctive Skolem Chase", + "track": "main", + "status": "Technical", + "abstract": "The disjunctive skolem chase is a sound, complete, and potentially non-terminating procedure for solving boolean conjunctive query entailment over knowledge bases of disjunctive existential rules. We develop novel acyclicity and cyclicity notions for this procedure; that is, we develop sufficient conditions to determine chase termination and non-termination. Our empirical evaluation shows that our novel notions are significantly more general than existing criteria.", + "primary_area": "knowledge representation and reasoning", + "author": "Lukas Gerlach; David Carral", + "authorids": "", + "aff": "Knowledge-Based Systems Group, TU Dresden, Dresden, Germany; LIRMM, Inria, University of Montpellier, CNRS, Montpellier, France", + "bibtex": "@article{Gerlach_Carral_2023, title={General Acyclicity and Cyclicity Notions for the Disjunctive Skolem Chase}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25784}, DOI={10.1609/aaai.v37i5.25784}, abstractNote={The disjunctive skolem chase is a sound, complete, and potentially non-terminating procedure for solving boolean conjunctive query entailment over knowledge bases of disjunctive existential rules. We develop novel acyclicity and cyclicity notions for this procedure; that is, we develop sufficient conditions to determine chase termination and non-termination. Our empirical evaluation shows that our novel notions are significantly more general than existing criteria.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gerlach, Lukas and Carral, David}, year={2023}, month={Jun.}, pages={6372-6379} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25784/25556", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25784", + "pdf_size": 184727, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9434854672962449709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "tu-dresden.de;inria.fr", + "email": "tu-dresden.de;inria.fr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Technische Universit\u00e4t Dresden;University of Montpellier", + "aff_unique_dep": "Knowledge-Based Systems Group;", + "aff_unique_url": "https://www.tu-dresden.de;https://www.univ-montp.fr", + "aff_unique_abbr": "TU Dresden;UM", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Dresden;Montpellier", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Germany;France" + }, + { + "id": "article-26018", + "title": "Generalization Bounds for Inductive Matrix Completion in Low-Noise Settings", + "track": "main", + "status": "Technical", + "abstract": "We study inductive matrix completion (matrix completion with side information) under an i.i.d. subgaussian noise assumption at a low noise regime, with uniform sampling of the entries. We obtain for the first time generalization bounds with the following three properties: \n\t(1) they scale like the standard deviation of the noise and in particular approach zero in the exact recovery case; (2) even in the presence of noise, they converge to zero when the sample size approaches infinity; and (3) for a fixed dimension of the side information, they only have a logarithmic dependence on the size of the matrix. Differently from many works in approximate recovery, we present results both for bounded Lipschitz losses and for the absolute loss, with the latter relying on Talagrand-type inequalities. The proofs create a bridge between two approaches to the theoretical analysis of matrix completion, since they consist in a combination of techniques from both the exact recovery literature and the approximate recovery literature.", + "primary_area": "machine learning ii", + "author": "Antoine Ledent; Rodrigo Alves; Yunwen Lei; Yann Guermeur; Marius Kloft", + "authorids": "", + "aff": "Singapore Management University (SMU); Czech Technical University in Prague (CTU); Hong Kong Baptist University (HKBU); Centre National de la Recherche Scientique (CNRS); Technische Universit\u00e4t Kaiserslautern (TUK)", + "bibtex": "@article{Ledent_Alves_Lei_Guermeur_Kloft_2023, title={Generalization Bounds for Inductive Matrix Completion in Low-Noise Settings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26018}, DOI={10.1609/aaai.v37i7.26018}, abstractNote={We study inductive matrix completion (matrix completion with side information) under an i.i.d. subgaussian noise assumption at a low noise regime, with uniform sampling of the entries. We obtain for the first time generalization bounds with the following three properties: (1) they scale like the standard deviation of the noise and in particular approach zero in the exact recovery case; (2) even in the presence of noise, they converge to zero when the sample size approaches infinity; and (3) for a fixed dimension of the side information, they only have a logarithmic dependence on the size of the matrix. Differently from many works in approximate recovery, we present results both for bounded Lipschitz losses and for the absolute loss, with the latter relying on Talagrand-type inequalities. The proofs create a bridge between two approaches to the theoretical analysis of matrix completion, since they consist in a combination of techniques from both the exact recovery literature and the approximate recovery literature.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ledent, Antoine and Alves, Rodrigo and Lei, Yunwen and Guermeur, Yann and Kloft, Marius}, year={2023}, month={Jun.}, pages={8447-8455} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26018/25790", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26018", + "pdf_size": 280297, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5928230321908004776&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "smu.edu.sg;fit.cvut.cz;hotmail.com;loria.fr;cs.uni-kl.de", + "email": "smu.edu.sg;fit.cvut.cz;hotmail.com;loria.fr;cs.uni-kl.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Singapore Management University;Czech Technical University in Prague;Hong Kong Baptist University;Centre National de la Recherche Scientifique;Technische Universit\u00e4t Kaiserslautern", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.smu.edu.sg;https://www.ctu.cz;https://www.hkbu.edu.hk;https://www.cnrs.fr;https://www.tu-kl.de", + "aff_unique_abbr": "SMU;CTU;HKBU;CNRS;TUK", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Prague", + "aff_country_unique_index": "0;1;2;3;4", + "aff_country_unique": "Singapore;Czech Republic;China;France;Germany" + }, + { + "id": "article-26475", + "title": "Generalized Category Discovery with Decoupled Prototypical Network", + "track": "main", + "status": "Technical", + "abstract": "Generalized Category Discovery (GCD) aims to recognize both known and novel categories from a set of unlabeled data, based on another dataset labeled with only known categories. Without considering differences between known and novel categories, current methods learn about them in a coupled manner, which can hurt model's generalization and discriminative ability. Furthermore, the coupled training approach prevents these models transferring category-specific knowledge explicitly from labeled data to unlabeled data, which can lose high-level semantic information and impair model performance. To mitigate above limitations, we present a novel model called Decoupled Prototypical Network (DPN). By formulating a bipartite matching problem for category prototypes, DPN can not only decouple known and novel categories to achieve different training targets effectively, but also align known categories in labeled and unlabeled data to transfer category-specific knowledge explicitly and capture high-level semantics. Furthermore, DPN can learn more discriminative features for both known and novel categories through our proposed Semantic-aware Prototypical Learning (SPL). Besides capturing meaningful semantic information, SPL can also alleviate the noise of hard pseudo labels through semantic-weighted soft assignment. Extensive experiments show that DPN outperforms state-of-the-art models by a large margin on all evaluation metrics across multiple benchmark datasets. Code and data are available at https://github.com/Lackel/DPN.", + "primary_area": "speech natural language processing", + "author": "Wenbin An; Feng Tian; Qinghua Zheng; Wei Ding; Qianying Wang; Ping Chen", + "authorids": "", + "aff": "School of Automation Science and Engineering, Xi\u2019an Jiaotong University + National Engineering Laboratory for Big Data Analytics; School of Computer Science and Technology, Xi\u2019an Jiaotong University + National Engineering Laboratory for Big Data Analytics; School of Computer Science and Technology, Xi\u2019an Jiaotong University + National Engineering Laboratory for Big Data Analytics; Department of Computer Science, University of Massachusetts Boston; Lenovo Research; Department of Engineering, University of Massachusetts Boston", + "bibtex": "@article{An_Tian_Zheng_Ding_Wang_Chen_2023, title={Generalized Category Discovery with Decoupled Prototypical Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26475}, DOI={10.1609/aaai.v37i11.26475}, abstractNote={Generalized Category Discovery (GCD) aims to recognize both known and novel categories from a set of unlabeled data, based on another dataset labeled with only known categories. Without considering differences between known and novel categories, current methods learn about them in a coupled manner, which can hurt model\u2019s generalization and discriminative ability. Furthermore, the coupled training approach prevents these models transferring category-specific knowledge explicitly from labeled data to unlabeled data, which can lose high-level semantic information and impair model performance. To mitigate above limitations, we present a novel model called Decoupled Prototypical Network (DPN). By formulating a bipartite matching problem for category prototypes, DPN can not only decouple known and novel categories to achieve different training targets effectively, but also align known categories in labeled and unlabeled data to transfer category-specific knowledge explicitly and capture high-level semantics. Furthermore, DPN can learn more discriminative features for both known and novel categories through our proposed Semantic-aware Prototypical Learning (SPL). Besides capturing meaningful semantic information, SPL can also alleviate the noise of hard pseudo labels through semantic-weighted soft assignment. Extensive experiments show that DPN outperforms state-of-the-art models by a large margin on all evaluation metrics across multiple benchmark datasets. Code and data are available at https://github.com/Lackel/DPN.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={An, Wenbin and Tian, Feng and Zheng, Qinghua and Ding, Wei and Wang, Qianying and Chen, Ping}, year={2023}, month={Jun.}, pages={12527-12535} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26475/26247", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26475", + "pdf_size": 329932, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11040419122803603627&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;umb.edu;lenovo.com;umb.edu", + "email": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;umb.edu;lenovo.com;umb.edu", + "github": "https://github.com/Lackel/DPN", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;2;3;2", + "aff_unique_norm": "Xi'an Jiaotong University;National Engineering Laboratory for Big Data Analytics;University of Massachusetts Boston;Lenovo", + "aff_unique_dep": "School of Automation Science and Engineering;;Department of Computer Science;Research", + "aff_unique_url": "http://www.xjtu.edu.cn;;https://www.umb.edu;https://www.lenovo.com", + "aff_unique_abbr": "XJTU;;UMass Boston;Lenovo", + "aff_campus_unique_index": "0;0;0;2;2", + "aff_campus_unique": "Xi'an;;Boston", + "aff_country_unique_index": "0+0;0+0;0+0;1;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25672", + "title": "Generalized Cell Type Annotation and Discovery for Single-Cell RNA-Seq Data", + "track": "main", + "status": "Technical", + "abstract": "The rapid development of single-cell RNA sequencing (scRNA-seq) technology allows us to study gene expression heterogeneity at the cellular level. Cell annotation is the basis for subsequent downstream analysis in single-cell data mining. Existing methods rarely explore the fine-grained semantic knowledge of novel cell types absent from the reference data and usually susceptible to batch effects on the classification of seen cell types.\nTaking into consideration these limitations, this paper proposes a new and practical task called generalized cell type annotation and discovery for scRNA-seq data. In this task, cells of seen cell types are given class labels, while cells of novel cell types are given cluster labels instead of a unified \u201cunassigned\u201d label. To address this problem, we carefully design a comprehensive evaluation benchmark and propose a novel end-to-end algorithm framework called scGAD. Specifically, scGAD first builds the intrinsic correspondence across the reference and target data by retrieving the geometrically and semantically mutual nearest neighbors as anchor pairs. Then we introduce an anchor-based self-supervised learning module with a connectivity-aware attention mechanism to facilitate model prediction capability on unlabeled target data. To enhance the inter-type separation and intra-type compactness, we further propose a confidential prototypical self-supervised learning module to uncover the consensus category structure of the reference and target data. Extensive results on massive real datasets demonstrate the superiority of scGAD over various state-of-the-art clustering and annotation methods.", + "primary_area": "domain s of application", + "author": "Yuyao Zhai; Liang Chen; Minghua Deng", + "authorids": "", + "aff": "School of Mathematical Sciences, Peking University + Center for Statistical Science, Peking University + Center for Quantitative Biology, Peking University; Huawei Technologies Co., Ltd.; School of Mathematical Sciences, Peking University + Center for Statistical Science, Peking University + Center for Quantitative Biology, Peking University", + "bibtex": "@article{Zhai_Chen_Deng_2023, title={Generalized Cell Type Annotation and Discovery for Single-Cell RNA-Seq Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25672}, DOI={10.1609/aaai.v37i4.25672}, abstractNote={The rapid development of single-cell RNA sequencing (scRNA-seq) technology allows us to study gene expression heterogeneity at the cellular level. Cell annotation is the basis for subsequent downstream analysis in single-cell data mining. Existing methods rarely explore the fine-grained semantic knowledge of novel cell types absent from the reference data and usually susceptible to batch effects on the classification of seen cell types.\nTaking into consideration these limitations, this paper proposes a new and practical task called generalized cell type annotation and discovery for scRNA-seq data. In this task, cells of seen cell types are given class labels, while cells of novel cell types are given cluster labels instead of a unified \u201cunassigned\u201d label. To address this problem, we carefully design a comprehensive evaluation benchmark and propose a novel end-to-end algorithm framework called scGAD. Specifically, scGAD first builds the intrinsic correspondence across the reference and target data by retrieving the geometrically and semantically mutual nearest neighbors as anchor pairs. Then we introduce an anchor-based self-supervised learning module with a connectivity-aware attention mechanism to facilitate model prediction capability on unlabeled target data. To enhance the inter-type separation and intra-type compactness, we further propose a confidential prototypical self-supervised learning module to uncover the consensus category structure of the reference and target data. Extensive results on massive real datasets demonstrate the superiority of scGAD over various state-of-the-art clustering and annotation methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhai, Yuyao and Chen, Liang and Deng, Minghua}, year={2023}, month={Jun.}, pages={5402-5410} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25672/25444", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25672", + "pdf_size": 566712, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6043175232995874809&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.pku.edu.cn;huawei.com;pku.edu.cn", + "email": "stu.pku.edu.cn;huawei.com;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+0;1;0+0+0", + "aff_unique_norm": "Peking University;Huawei Technologies", + "aff_unique_dep": "School of Mathematical Sciences;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "PKU;Huawei", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25523", + "title": "Generalized Confidence Constraints", + "track": "main", + "status": "Technical", + "abstract": "In robust optimization, finding a solution that solely respects the constraints is not enough.\nUsually, the uncertainty and unknown parameters of the model are represented by random variables.\nIn such conditions, a good solution is a solution robust to most-likely assignments of these random variables.\nRecently, the Confidence constraint has been introduced by Mercier-Aubin et al. in order to enforce this type of robustness in constraint programming.\nUnfortunately, it is restricted to a conjunction of binary inequalities\nIn this paper, we generalize the Confidence constraint to any constraint and propose an implementation based on Multi-valued Decision Diagrams (MDDs). \nThe Confidence constraint is defined over a vector of random variables. \nFor a given constraint C, and given a threshold, the Confidence constraint ensures that the probability for C to be satisfied by a sample of the random variables is greater than the threshold.\nWe propose to use MDDs to represent the constraints on the random variables.\nMDDs are an efficient tool for representing combinatorial constraints, thanks to their exponential compression power.\nHere, both random and decision variables are stored in the MDD, \nand propagation rules are proposed for removing values of decision variables\nthat cannot lead to robust solutions.\nFurthermore, for several constraints, we show that decision variables can be omitted from the MDD because\nlighter filtering algorithms are sufficient. This leads to gain an exponential factor in the MDD size.\nThe experimental results obtained on a chemical deliveries problem in factories \u2013 where the chemicals consumption are uncertain \u2013 \nshows the efficiency of the proposed approach.", + "primary_area": "constraint satisfaction and optimization", + "author": "Guillaume Perez; Steve Malalel; Gael Glorian; Victor Jung; Alexandre Papadopoulos; Marie Pelleau; Wijnand Suijlen; Jean-Charles R\u00e9gin; Arnaud Lallouet", + "authorids": "", + "aff": "Huawei Technologie, Boulogne-Billancourt, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Sophia Antipolis, France; Huawei Technologie, Boulogne-Billancourt, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Sophia Antipolis, France; Huawei Technologie, Boulogne-Billancourt, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Sophia Antipolis, France; Huawei Technologie, Boulogne-Billancourt, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Sophia Antipolis, France; Huawei Technologie, Boulogne-Billancourt, France", + "bibtex": "@article{Perez_Malalel_Glorian_Jung_Papadopoulos_Pelleau_Suijlen_R\u00e9gin_Lallouet_2023, title={Generalized Confidence Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25523}, DOI={10.1609/aaai.v37i4.25523}, abstractNote={In robust optimization, finding a solution that solely respects the constraints is not enough.\nUsually, the uncertainty and unknown parameters of the model are represented by random variables.\nIn such conditions, a good solution is a solution robust to most-likely assignments of these random variables.\nRecently, the Confidence constraint has been introduced by Mercier-Aubin et al. in order to enforce this type of robustness in constraint programming.\nUnfortunately, it is restricted to a conjunction of binary inequalities\nIn this paper, we generalize the Confidence constraint to any constraint and propose an implementation based on Multi-valued Decision Diagrams (MDDs). The Confidence constraint is defined over a vector of random variables. For a given constraint C, and given a threshold, the Confidence constraint ensures that the probability for C to be satisfied by a sample of the random variables is greater than the threshold.\nWe propose to use MDDs to represent the constraints on the random variables.\nMDDs are an efficient tool for representing combinatorial constraints, thanks to their exponential compression power.\nHere, both random and decision variables are stored in the MDD, and propagation rules are proposed for removing values of decision variables\nthat cannot lead to robust solutions.\nFurthermore, for several constraints, we show that decision variables can be omitted from the MDD because\nlighter filtering algorithms are sufficient. This leads to gain an exponential factor in the MDD size.\nThe experimental results obtained on a chemical deliveries problem in factories \u2013 where the chemicals consumption are uncertain \u2013 shows the efficiency of the proposed approach.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Perez, Guillaume and Malalel, Steve and Glorian, Gael and Jung, Victor and Papadopoulos, Alexandre and Pelleau, Marie and Suijlen, Wijnand and R\u00e9gin, Jean-Charles and Lallouet, Arnaud}, year={2023}, month={Jun.}, pages={4078-4086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25523/25295", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25523", + "pdf_size": 212637, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1288571353408120670&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com; ; ; ; ; ; ; ;", + "email": "gmail.com; ; ; ; ; ; ; ;", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;1;0;1;0;1;0", + "aff_unique_norm": "Huawei Technologies;Universit\u00e9 C\u00f4te d\u2019Azur", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.huawei.com;https://www.univ-cotedazur.fr", + "aff_unique_abbr": "Huawei;UCA", + "aff_campus_unique_index": "0;1;0;1;0;1;0;1;0", + "aff_campus_unique": "Boulogne-Billancourt;Sophia Antipolis", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26280", + "title": "Generalized Semantic Segmentation by Self-Supervised Source Domain Projection and Multi-Level Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Deep networks trained on the source domain show degraded performance when tested on unseen target domain data. To enhance the model's generalization ability, most existing domain generalization methods learn domain invariant features by suppressing domain sensitive features. Different from them, we propose a Domain Projection and Contrastive Learning (DPCL) approach for generalized semantic segmentation, which includes two modules: Self-supervised Source Domain Projection (SSDP) and Multi-Level Contrastive Learning (MLCL). SSDP aims to reduce domain gap by projecting data to the source domain, while MLCL is a learning scheme to learn discriminative and generalizable features on the projected data. During test time, we first project the target data by SSDP to mitigate domain shift, then generate the segmentation results by the learned segmentation network based on MLCL. At test time, we can update the projected data by minimizing our proposed pixel-to-pixel contrastive loss to obtain better results. Extensive experiments for semantic segmentation demonstrate the favorable generalization capability of our method on benchmark datasets.", + "primary_area": "machine learning iv", + "author": "Liwei Yang; Xiang Gu; Jian Sun", + "authorids": "", + "aff": "School of Mathematics and Statistics, Xi\u2019an Jiaotong University, China; School of Mathematics and Statistics, Xi\u2019an Jiaotong University, China; School of Mathematics and Statistics, Xi\u2019an Jiaotong University, China + Pazhou Laboratory (Huangpu), China + Peng Cheng Laboratory, China", + "bibtex": "@article{Yang_Gu_Sun_2023, title={Generalized Semantic Segmentation by Self-Supervised Source Domain Projection and Multi-Level Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26280}, DOI={10.1609/aaai.v37i9.26280}, abstractNote={Deep networks trained on the source domain show degraded performance when tested on unseen target domain data. To enhance the model\u2019s generalization ability, most existing domain generalization methods learn domain invariant features by suppressing domain sensitive features. Different from them, we propose a Domain Projection and Contrastive Learning (DPCL) approach for generalized semantic segmentation, which includes two modules: Self-supervised Source Domain Projection (SSDP) and Multi-Level Contrastive Learning (MLCL). SSDP aims to reduce domain gap by projecting data to the source domain, while MLCL is a learning scheme to learn discriminative and generalizable features on the projected data. During test time, we first project the target data by SSDP to mitigate domain shift, then generate the segmentation results by the learned segmentation network based on MLCL. At test time, we can update the projected data by minimizing our proposed pixel-to-pixel contrastive loss to obtain better results. Extensive experiments for semantic segmentation demonstrate the favorable generalization capability of our method on benchmark datasets.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Liwei and Gu, Xiang and Sun, Jian}, year={2023}, month={Jun.}, pages={10789-10797} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26280/26052", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26280", + "pdf_size": 2826542, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7018136807920938153&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1+2", + "aff_unique_norm": "Xi'an Jiaotong University;Pazhou Laboratory;Peng Cheng Laboratory", + "aff_unique_dep": "School of Mathematics and Statistics;;", + "aff_unique_url": "http://en.xjtu.edu.cn/;;", + "aff_unique_abbr": "XJTU;;", + "aff_campus_unique_index": "0;0;0+1", + "aff_campus_unique": "Xi'an;Huangpu;", + "aff_country_unique_index": "0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25824", + "title": "Generalizing Downsampling from Regular Data to Graphs", + "track": "main", + "status": "Technical", + "abstract": "Downsampling produces coarsened, multi-resolution representations of data and it is used, for example, to produce lossy compression and visualization of large images, reduce computational costs, and boost deep neural representation learning. \nUnfortunately, due to their lack of a regular structure, there is still no consensus on how downsampling should apply to graphs and linked data. Indeed reductions in graph data are still needed for the goals described above, but reduction mechanisms do not have the same focus on preserving topological structures and properties, while allowing for resolution-tuning, as is the case in regular data downsampling.\nIn this paper, we take a step in this direction, introducing a unifying interpretation of downsampling in regular and graph data. In particular, we define a graph coarsening mechanism which is a graph-structured counterpart of controllable equispaced coarsening mechanisms in regular data. We prove theoretical guarantees for distortion bounds on path lengths, as well as the ability to preserve key topological properties in the coarsened graphs. We leverage these concepts to define a graph pooling mechanism that we empirically assess in graph classification tasks, providing a greedy algorithm that allows efficient parallel implementation on GPUs, and showing that it compares favorably against pooling methods in literature.", + "primary_area": "machine learning i", + "author": "Davide Bacciu; Alessio Conte; Francesco Landolfi", + "authorids": "", + "aff": "Department of Computer Science, Universit `a di Pisa; Department of Computer Science, Universit `a di Pisa; Department of Computer Science, Universit `a di Pisa", + "bibtex": "@article{Bacciu_Conte_Landolfi_2023, title={Generalizing Downsampling from Regular Data to Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25824}, DOI={10.1609/aaai.v37i6.25824}, abstractNote={Downsampling produces coarsened, multi-resolution representations of data and it is used, for example, to produce lossy compression and visualization of large images, reduce computational costs, and boost deep neural representation learning. Unfortunately, due to their lack of a regular structure, there is still no consensus on how downsampling should apply to graphs and linked data. Indeed reductions in graph data are still needed for the goals described above, but reduction mechanisms do not have the same focus on preserving topological structures and properties, while allowing for resolution-tuning, as is the case in regular data downsampling.\nIn this paper, we take a step in this direction, introducing a unifying interpretation of downsampling in regular and graph data. In particular, we define a graph coarsening mechanism which is a graph-structured counterpart of controllable equispaced coarsening mechanisms in regular data. We prove theoretical guarantees for distortion bounds on path lengths, as well as the ability to preserve key topological properties in the coarsened graphs. We leverage these concepts to define a graph pooling mechanism that we empirically assess in graph classification tasks, providing a greedy algorithm that allows efficient parallel implementation on GPUs, and showing that it compares favorably against pooling methods in literature.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bacciu, Davide and Conte, Alessio and Landolfi, Francesco}, year={2023}, month={Jun.}, pages={6718-6727} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25824/25596", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25824", + "pdf_size": 386949, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14586850935046266377&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "unipi.it;unipi.it;phd.unipi.it", + "email": "unipi.it;unipi.it;phd.unipi.it", + "github": "", + "project": "https://arxiv.org/abs/2208.03523", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Universit\u00e0 di Pisa", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.unipi.it", + "aff_unique_abbr": "UniPi", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26548", + "title": "Generalizing Math Word Problem Solvers via Solution Diversification", + "track": "main", + "status": "Technical", + "abstract": "Current math word problem (MWP) solvers are usually Seq2Seq models trained by the (one-problem; one-solution) pairs, each of which is made of a problem description and a solution showing reasoning flow to get the correct answer. However, one MWP problem naturally has multiple solution equations. The training of an MWP solver with (one-problem; one-solution) pairs excludes other correct solutions, and thus limits the generalizability of the MWP solver. One feasible solution to this limitation is to augment multiple solutions to a given problem. However, it is difficult to collect diverse and accurate augment solutions through human efforts. In this paper, we design a new training framework for an MWP solver by introducing a solution buffer and a solution discriminator. The buffer includes solutions generated by an MWP solver to encourage the training data diversity. The discriminator controls the quality of buffered solutions to participate in training. Our framework is flexibly applicable to a wide setting of fully, semi-weakly and weakly supervised training for all Seq2Seq MWP solvers. We conduct extensive experiments on a benchmark dataset Math23k and a new dataset named Weak12k, and show that our framework improves the performance of various MWP solvers under different settings by generating correct and diverse solutions.", + "primary_area": "speech natural language processing", + "author": "Zhenwen Liang; Jipeng Zhang; Lei Wang; Yan Wang; Jie Shao; Xiangliang Zhang", + "authorids": "", + "aff": "University of Notre Dame; Hong Kong University of Science and Technology; Singapore Management University; Tencent AI Lab; University of Electronic Science and Technology of China; University of Notre Dame", + "bibtex": "@article{Liang_Zhang_Wang_Wang_Shao_Zhang_2023, title={Generalizing Math Word Problem Solvers via Solution Diversification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26548}, DOI={10.1609/aaai.v37i11.26548}, abstractNote={Current math word problem (MWP) solvers are usually Seq2Seq models trained by the (one-problem; one-solution) pairs, each of which is made of a problem description and a solution showing reasoning flow to get the correct answer. However, one MWP problem naturally has multiple solution equations. The training of an MWP solver with (one-problem; one-solution) pairs excludes other correct solutions, and thus limits the generalizability of the MWP solver. One feasible solution to this limitation is to augment multiple solutions to a given problem. However, it is difficult to collect diverse and accurate augment solutions through human efforts. In this paper, we design a new training framework for an MWP solver by introducing a solution buffer and a solution discriminator. The buffer includes solutions generated by an MWP solver to encourage the training data diversity. The discriminator controls the quality of buffered solutions to participate in training. Our framework is flexibly applicable to a wide setting of fully, semi-weakly and weakly supervised training for all Seq2Seq MWP solvers. We conduct extensive experiments on a benchmark dataset Math23k and a new dataset named Weak12k, and show that our framework improves the performance of various MWP solvers under different settings by generating correct and diverse solutions.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Zhenwen and Zhang, Jipeng and Wang, Lei and Wang, Yan and Shao, Jie and Zhang, Xiangliang}, year={2023}, month={Jun.}, pages={13183-13191} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26548/26320", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26548", + "pdf_size": 349118, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14539287149200105645&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 8, + "aff_domain": "nd.edu;conect.ust.hk;phdcs.smu.edu.sg;gmail.com;uestc.edu.cn;nd.edu", + "email": "nd.edu;conect.ust.hk;phdcs.smu.edu.sg;gmail.com;uestc.edu.cn;nd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;0", + "aff_unique_norm": "University of Notre Dame;Hong Kong University of Science and Technology;Singapore Management University;Tencent;University of Electronic Science and Technology of China", + "aff_unique_dep": ";;;Tencent AI Lab;", + "aff_unique_url": "https://www.nd.edu;https://www.ust.hk;https://www.smu.edu.sg;https://ai.tencent.com;https://www.uestc.edu.cn", + "aff_unique_abbr": "Notre Dame;HKUST;SMU;Tencent AI Lab;UESTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;1;1;0", + "aff_country_unique": "United States;China;Singapore" + }, + { + "id": "article-25437", + "title": "Generalizing Multiple Object Tracking to Unseen Domains by Introducing Natural Language Representation", + "track": "main", + "status": "Technical", + "abstract": "Although existing multi-object tracking (MOT) algorithms have obtained competitive performance on various benchmarks, almost all of them train and validate models on the same domain. The domain generalization problem of MOT is hardly studied. To bridge this gap, we first draw the observation that the high-level information contained in natural language is domain invariant to different tracking domains. Based on this observation, we propose to introduce natural language representation into visual MOT models for boosting the domain generalization ability. However, it is infeasible to label every tracking target with a textual description. To tackle this problem, we design two modules, namely visual context prompting (VCP) and visual-language mixing (VLM). Specifically, VCP generates visual prompts based on the input frames. VLM joints the information in the generated visual prompts and the textual prompts from a pre-defined Trackbook to obtain instance-level pseudo textual description, which is domain invariant to different tracking scenes. Through training models on MOT17 and validating them on MOT20, we observe that the pseudo textual descriptions generated by our proposed modules improve the generalization performance of query-based trackers by large margins.", + "primary_area": "computer vision iii", + "author": "En Yu; Songtao Liu; Zhuoling Li; Jinrong Yang; Zeming Li; Shoudong Han; Wenbing Tao", + "authorids": "", + "aff": "Huazhong University of Science and Technology; Megvii(Face++) Inc; Tsinghua University; Huazhong University of Science and Technology; Megvii(Face++) Inc; Huazhong University of Science and Technology; Huazhong University of Science and Technology", + "bibtex": "@article{Yu_Liu_Li_Yang_Li_Han_Tao_2023, title={Generalizing Multiple Object Tracking to Unseen Domains by Introducing Natural Language Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25437}, DOI={10.1609/aaai.v37i3.25437}, abstractNote={Although existing multi-object tracking (MOT) algorithms have obtained competitive performance on various benchmarks, almost all of them train and validate models on the same domain. The domain generalization problem of MOT is hardly studied. To bridge this gap, we first draw the observation that the high-level information contained in natural language is domain invariant to different tracking domains. Based on this observation, we propose to introduce natural language representation into visual MOT models for boosting the domain generalization ability. However, it is infeasible to label every tracking target with a textual description. To tackle this problem, we design two modules, namely visual context prompting (VCP) and visual-language mixing (VLM). Specifically, VCP generates visual prompts based on the input frames. VLM joints the information in the generated visual prompts and the textual prompts from a pre-defined Trackbook to obtain instance-level pseudo textual description, which is domain invariant to different tracking scenes. Through training models on MOT17 and validating them on MOT20, we observe that the pseudo textual descriptions generated by our proposed modules improve the generalization performance of query-based trackers by large margins.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, En and Liu, Songtao and Li, Zhuoling and Yang, Jinrong and Li, Zeming and Han, Shoudong and Tao, Wenbing}, year={2023}, month={Jun.}, pages={3304-3312} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25437/25209", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25437", + "pdf_size": 1520273, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11955355710108025705&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn; ; ;hust.edu.cn; ;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn; ; ;hust.edu.cn; ;hust.edu.cn;hust.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1+2;3;0;1+2;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Megvii Technology;Unknown Institution;Tsinghua University", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.hust.edu.cn;https://www.megvii.com;;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "HUST;Megvii;;THU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26509", + "title": "Generating Coherent Narratives by Learning Dynamic and Discrete Entity States with a Contrastive Framework", + "track": "main", + "status": "Technical", + "abstract": "Despite advances in generating fluent texts, existing pretraining models tend to attach incoherent event sequences to involved entities when generating narratives such as stories and news. We conjecture that such issues result from representing entities as static embeddings of superficial words, while neglecting to model their ever-changing states, i.e., the information they carry, as the text unfolds. Therefore, we extend the Transformer model to dynamically conduct entity state updates and sentence realization for narrative generation. We propose a contrastive framework to learn the state representations in a discrete space, and insert additional attention layers into the decoder to better exploit these states. Experiments on two narrative datasets show that our model can generate more coherent and diverse narratives than strong baselines with the guidance of meaningful entity states.", + "primary_area": "speech natural language processing", + "author": "Jian Guan; Zhenyu Yang; Rongsheng Zhang; Zhipeng Hu; Minlie Huang", + "authorids": "", + "aff": "The CoAI group, DCST, Institute for Artificial Intelligence, State Key Lab of Intelligent Technology and Systems, Beijing National Research Center for Information Science and Technology, Tsinghua University; Guangdong OPPO Mobile Telecommunications Corp., Ltd.; Fuxi AI Lab, NetEase Inc., Hangzhou, China; Fuxi AI Lab, NetEase Inc., Hangzhou, China; The CoAI group, DCST, Institute for Artificial Intelligence, State Key Lab of Intelligent Technology and Systems, Beijing National Research Center for Information Science and Technology, Tsinghua University", + "bibtex": "@article{Guan_Yang_Zhang_Hu_Huang_2023, title={Generating Coherent Narratives by Learning Dynamic and Discrete Entity States with a Contrastive Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26509}, DOI={10.1609/aaai.v37i11.26509}, abstractNote={Despite advances in generating fluent texts, existing pretraining models tend to attach incoherent event sequences to involved entities when generating narratives such as stories and news. We conjecture that such issues result from representing entities as static embeddings of superficial words, while neglecting to model their ever-changing states, i.e., the information they carry, as the text unfolds. Therefore, we extend the Transformer model to dynamically conduct entity state updates and sentence realization for narrative generation. We propose a contrastive framework to learn the state representations in a discrete space, and insert additional attention layers into the decoder to better exploit these states. Experiments on two narrative datasets show that our model can generate more coherent and diverse narratives than strong baselines with the guidance of meaningful entity states.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guan, Jian and Yang, Zhenyu and Zhang, Rongsheng and Hu, Zhipeng and Huang, Minlie}, year={2023}, month={Jun.}, pages={12836-12844} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26509/26281", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26509", + "pdf_size": 1424615, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4793449783767506072&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.tsinghua.edu.cn;oppo.com;corp.netease.com;corp.netease.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;oppo.com;corp.netease.com;corp.netease.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;0", + "aff_unique_norm": "Tsinghua University;OPPO Mobile Telecommunications Corp., Ltd.;NetEase Inc.", + "aff_unique_dep": "Institute for Artificial Intelligence;;Fuxi AI Lab", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.oppo.com;https://www.163.com", + "aff_unique_abbr": "THU;OPPO;NetEase", + "aff_campus_unique_index": "0;2;2;0", + "aff_campus_unique": "Beijing;;Hangzhou", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27070", + "title": "Generating Reflective Questions for Engaging Gallery Visitors in ArtMuse", + "track": "demonstrations", + "status": "Technical", + "abstract": "Human guides in museums and galleries are professionally trained to stimulate informal \nlearning in visitors by asking low-risk, open-ended reflective questions\nthat enable them to\nfocus on specific features of artifacts, \nrelate to prior experiences, and elicit curiosity as well as further thought. We present ArtMuse, our AI-powered chatbot for asking reflective questions in context of paintings. Our reflective question generation model in ArtMuse\nwas trained by applying a\nnovel combination of existing models for\nextractive question answering and open-domain chitchat. User evaluation studies indicate that we are able to generate fluent and specific reflective questions for paintings that are highly-engaging.", + "primary_area": "", + "author": "Sujatha Das Gollapalli; Mingzhe Du; See-Kiong Ng", + "authorids": "", + "aff": "Institute of Data Science, National University of Singapore; Institute of Data Science, National University of Singapore; Institute of Data Science, National University of Singapore", + "bibtex": "@article{Gollapalli_Du_Ng_2024, title={Generating Reflective Questions for Engaging Gallery Visitors in ArtMuse}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27070}, DOI={10.1609/aaai.v37i13.27070}, abstractNote={Human guides in museums and galleries are professionally trained to stimulate informal learning in visitors by asking low-risk, open-ended reflective questions\nthat enable them to\nfocus on specific features of artifacts, relate to prior experiences, and elicit curiosity as well as further thought. We present ArtMuse, our AI-powered chatbot for asking reflective questions in context of paintings. Our reflective question generation model in ArtMuse\nwas trained by applying a\nnovel combination of existing models for\nextractive question answering and open-domain chitchat. User evaluation studies indicate that we are able to generate fluent and specific reflective questions for paintings that are highly-engaging.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gollapalli, Sujatha Das and Du, Mingzhe and Ng, See-Kiong}, year={2024}, month={Jul.}, pages={16434-16436} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27070/26842", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27070", + "pdf_size": 885152, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2237287658680093401&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "nus.edu.sg;nus.edu.sg;nus.edu.sg", + "email": "nus.edu.sg;nus.edu.sg;nus.edu.sg", + "github": "", + "project": "https://nlp-platform.online/artmuse", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "Institute of Data Science", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25154", + "title": "Generating Transferable 3D Adversarial Point Cloud via Random Perturbation Factorization", + "track": "main", + "status": "Technical", + "abstract": "Recent studies have demonstrated that existing deep neural networks (DNNs) on 3D point clouds are vulnerable to adversarial examples, especially under the white-box settings where the adversaries have access to model parameters. However, adversarial 3D point clouds generated by existing white-box methods have limited transferability across different DNN architectures. They have only minor threats in real-world scenarios under the black-box settings where the adversaries can only query the deployed victim model. In this paper, we revisit the transferability of adversarial 3D point clouds. We observe that an adversarial perturbation can be randomly factorized into two sub-perturbations, which are also likely to be adversarial perturbations. It motivates us to consider the effects of the perturbation and its sub-perturbations simultaneously to increase the transferability for sub-perturbations also contain helpful information. In this paper, we propose a simple yet effective attack method to generate more transferable adversarial 3D point clouds. Specifically, rather than simply optimizing the loss of perturbation alone, we combine it with its random factorization. We conduct experiments on benchmark dataset, verifying our method's effectiveness in increasing transferability while preserving high efficiency.", + "primary_area": "computer vision i", + "author": "Bangyan He; Jian Liu; Yiming Li; Siyuan Liang; Jingzhi Li; Xiaojun Jia; Xiaochun Cao", + "authorids": "", + "aff": "SKLOIS, Institute of Information Engineering, CAS, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Ant Group, Beijing, China; Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China; SKLOIS, Institute of Information Engineering, CAS, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; SKLOIS, Institute of Information Engineering, CAS, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; SKLOIS, Institute of Information Engineering, CAS, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; School of Cyber Science and Technology, Shenzhen Campus, Sun Yat-sen University, Shenzhen, China", + "bibtex": "@article{He_Liu_Li_Liang_Li_Jia_Cao_2023, title={Generating Transferable 3D Adversarial Point Cloud via Random Perturbation Factorization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25154}, DOI={10.1609/aaai.v37i1.25154}, abstractNote={Recent studies have demonstrated that existing deep neural networks (DNNs) on 3D point clouds are vulnerable to adversarial examples, especially under the white-box settings where the adversaries have access to model parameters. However, adversarial 3D point clouds generated by existing white-box methods have limited transferability across different DNN architectures. They have only minor threats in real-world scenarios under the black-box settings where the adversaries can only query the deployed victim model. In this paper, we revisit the transferability of adversarial 3D point clouds. We observe that an adversarial perturbation can be randomly factorized into two sub-perturbations, which are also likely to be adversarial perturbations. It motivates us to consider the effects of the perturbation and its sub-perturbations simultaneously to increase the transferability for sub-perturbations also contain helpful information. In this paper, we propose a simple yet effective attack method to generate more transferable adversarial 3D point clouds. Specifically, rather than simply optimizing the loss of perturbation alone, we combine it with its random factorization. We conduct experiments on benchmark dataset, verifying our method\u2019s effectiveness in increasing transferability while preserving high efficiency.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Bangyan and Liu, Jian and Li, Yiming and Liang, Siyuan and Li, Jingzhi and Jia, Xiaojun and Cao, Xiaochun}, year={2023}, month={Jun.}, pages={764-772} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25154/24926", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25154", + "pdf_size": 557936, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=517029860526695981&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iie.ac.cn;antgroup.com;mails.tsinghua.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;mail.sysu.edu.cn", + "email": "iie.ac.cn;antgroup.com;mails.tsinghua.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;mail.sysu.edu.cn", + "github": "https://github.com/HeBangYan/PF-Attack", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;3;0+1;0+1;0+1;4", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Ant Group;Tsinghua University;Sun Yat-sen University", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;;International Graduate School;School of Cyber Science and Technology", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.antgroup.com;https://www.tsinghua.edu.cn;http://www.sysu.edu.cn", + "aff_unique_abbr": "CAS;UCAS;Ant Group;THU;SYSU", + "aff_campus_unique_index": "0+0;0;1;0+0;0+0;0+0;1", + "aff_campus_unique": "Beijing;Shenzhen", + "aff_country_unique_index": "0+0;0;0;0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26807", + "title": "Generative Decision Making Under Uncertainty", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "In the fields of natural language processing (NLP) and computer vision (CV), recent advances in generative modeling have led to powerful machine learning systems that can effectively learn from large labeled and unlabeled datasets. These systems, by and large, apply a uniform pretrain-finetune pipeline on sequential data streams and have achieved state-of-the-art-performance across many tasks and benchmarks. In this talk, we will present recent algorithms that extend this paradigm to sequential decision making, by casting it as an inverse problem that can be solved via deep generative models. These generative approaches are stable to train, provide a flexible interface for single- and multi-task inference, and generalize exceedingly well outside their training datasets. We instantiate these algorithms in the context of reinforcement learning and black-box optimization. Empirically, we demonstrate that these approaches perform exceedingly well on high-dimensional benchmarks outperforming the current state-of-the-art approaches based on forward models.", + "primary_area": "", + "author": "Aditya Grover", + "authorids": "", + "aff": "University of California, Los Angeles", + "bibtex": "@article{Grover_2024, title={Generative Decision Making Under Uncertainty}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26807}, DOI={10.1609/aaai.v37i13.26807}, abstractNote={In the fields of natural language processing (NLP) and computer vision (CV), recent advances in generative modeling have led to powerful machine learning systems that can effectively learn from large labeled and unlabeled datasets. These systems, by and large, apply a uniform pretrain-finetune pipeline on sequential data streams and have achieved state-of-the-art-performance across many tasks and benchmarks. In this talk, we will present recent algorithms that extend this paradigm to sequential decision making, by casting it as an inverse problem that can be solved via deep generative models. These generative approaches are stable to train, provide a flexible interface for single- and multi-task inference, and generalize exceedingly well outside their training datasets. We instantiate these algorithms in the context of reinforcement learning and black-box optimization. Empirically, we demonstrate that these approaches perform exceedingly well on high-dimensional benchmarks outperforming the current state-of-the-art approaches based on forward models.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Grover, Aditya}, year={2024}, month={Jul.}, pages={15440-15440} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26807/26579", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26807", + "pdf_size": 43998, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6625946545228710190&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.ucla.edu", + "email": "cs.ucla.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of California, Los Angeles", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucla.edu", + "aff_unique_abbr": "UCLA", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25502", + "title": "Generative Image Inpainting with Segmentation Confusion Adversarial Training and Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "This paper presents a new adversarial training framework for image inpainting with segmentation confusion adversarial training (SCAT) and contrastive learning. SCAT plays an adversarial game between an inpainting generator and a segmentation network, which provides pixel-level local training signals and can adapt to images with free-form holes. By combining SCAT with standard global adversarial training, the new adversarial training framework exhibits the following three advantages simultaneously: (1) the global consistency of the repaired image, (2) the local fine texture details of the repaired image, and (3) the flexibility of handling images with free-form holes. Moreover, we propose the textural and semantic contrastive learning losses to stabilize and improve our inpainting model's training by exploiting the feature representation space of the discriminator, in which the inpainting images are pulled closer to the ground truth images but pushed farther from the corrupted images. The proposed contrastive losses better guide the repaired images to move from the corrupted image data points to the real image data points in the feature representation space, resulting in more realistic completed images. We conduct extensive experiments on two benchmark datasets, demonstrating our model's effectiveness and superiority both qualitatively and quantitatively.", + "primary_area": "computer vision iii", + "author": "Zhiwen Zuo; Lei Zhao; Ailin Li; Zhizhong Wang; Zhanjie Zhang; Jiafu Chen; Wei Xing; Dongming Lu", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Zuo_Zhao_Li_Wang_Zhang_Chen_Xing_Lu_2023, title={Generative Image Inpainting with Segmentation Confusion Adversarial Training and Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25502}, DOI={10.1609/aaai.v37i3.25502}, abstractNote={This paper presents a new adversarial training framework for image inpainting with segmentation confusion adversarial training (SCAT) and contrastive learning. SCAT plays an adversarial game between an inpainting generator and a segmentation network, which provides pixel-level local training signals and can adapt to images with free-form holes. By combining SCAT with standard global adversarial training, the new adversarial training framework exhibits the following three advantages simultaneously: (1) the global consistency of the repaired image, (2) the local fine texture details of the repaired image, and (3) the flexibility of handling images with free-form holes. Moreover, we propose the textural and semantic contrastive learning losses to stabilize and improve our inpainting model\u2019s training by exploiting the feature representation space of the discriminator, in which the inpainting images are pulled closer to the ground truth images but pushed farther from the corrupted images. The proposed contrastive losses better guide the repaired images to move from the corrupted image data points to the real image data points in the feature representation space, resulting in more realistic completed images. We conduct extensive experiments on two benchmark datasets, demonstrating our model\u2019s effectiveness and superiority both qualitatively and quantitatively.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zuo, Zhiwen and Zhao, Lei and Li, Ailin and Wang, Zhizhong and Zhang, Zhanjie and Chen, Jiafu and Xing, Wei and Lu, Dongming}, year={2023}, month={Jun.}, pages={3888-3896} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25502/25274", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25502", + "pdf_size": 1323995, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3667431175580627007&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Computer Science and Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26078", + "title": "Generative Label Enhancement with Gaussian Mixture and Partial Ranking", + "track": "main", + "status": "Technical", + "abstract": "Label distribution learning (LDL) is an effective learning paradigm for dealing with label ambiguity. When applying LDL, the datasets annotated with label distributions (i.e., the real-valued vectors like the probability distribution) are typically required. Unfortunately, most existing datasets only contain the logical labels, and manual annotating with label distributions is costly. To address this problem, we treat the label distribution as a latent vector and infer its posterior by variational Bayes. Specifically, we propose a generative label enhancement model to encode the process of generating feature vectors and logical label vectors from label distributions in a principled way. In terms of features, we assume that the feature vector is generated by a Gaussian mixture dominated by the label distribution, which captures the one-to-many relationship from the label distribution to the feature vector and thus reduces the feature generation error. In terms of logical labels, we design a probability distribution to generate the logical label vector from a label distribution, which captures partial label ranking in the logical label vector and thus provides a more accurate guidance for inferring the label distribution. Besides, to approximate the posterior of the label distribution, we design a inference model, and derive the variational learning objective. Finally, extensive experiments on real-world datasets validate our proposal.", + "primary_area": "machine learning ii", + "author": "Yunan Lu; Liang He; Fan Min; Weiwei Li; Xiuyi Jia", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science, Southwest Petroleum University, Chengdu, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China + Ministry Key Laboratory for Safety-Critical Software Development and Verification, Nanjing University of Aeronautics and Astronautics, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Lu_He_Min_Li_Jia_2023, title={Generative Label Enhancement with Gaussian Mixture and Partial Ranking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26078}, DOI={10.1609/aaai.v37i7.26078}, abstractNote={Label distribution learning (LDL) is an effective learning paradigm for dealing with label ambiguity. When applying LDL, the datasets annotated with label distributions (i.e., the real-valued vectors like the probability distribution) are typically required. Unfortunately, most existing datasets only contain the logical labels, and manual annotating with label distributions is costly. To address this problem, we treat the label distribution as a latent vector and infer its posterior by variational Bayes. Specifically, we propose a generative label enhancement model to encode the process of generating feature vectors and logical label vectors from label distributions in a principled way. In terms of features, we assume that the feature vector is generated by a Gaussian mixture dominated by the label distribution, which captures the one-to-many relationship from the label distribution to the feature vector and thus reduces the feature generation error. In terms of logical labels, we design a probability distribution to generate the logical label vector from a label distribution, which captures partial label ranking in the logical label vector and thus provides a more accurate guidance for inferring the label distribution. Besides, to approximate the posterior of the label distribution, we design a inference model, and derive the variational learning objective. Finally, extensive experiments on real-world datasets validate our proposal.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Yunan and He, Liang and Min, Fan and Li, Weiwei and Jia, Xiuyi}, year={2023}, month={Jun.}, pages={8975-8983} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26078/25850", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26078", + "pdf_size": 2625953, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14140810864586244275&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "njust.edu.cn;njust.edu.cn;swpu.edu.cn;nuaa.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;swpu.edu.cn;nuaa.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2+2;0", + "aff_unique_norm": "Nanjing University of Science and Technology;Southwest Petroleum University;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "School of Computer Science and Engineering;School of Computer Science;College of Computer Science and Technology", + "aff_unique_url": "http://www.nust.edu.cn;https://www.swpu.edu.cn;http://www.nuaa.edu.cn", + "aff_unique_abbr": "NUST;;NUAA", + "aff_campus_unique_index": "0;0;1;0+0;0", + "aff_campus_unique": "Nanjing;Chengdu", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27009", + "title": "Generative Pipeline for Data Augmentation of Unconstrained Document Images with Structural and Textural Degradation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Computer vision applications for document image understanding (DIU) such as optical character recognition, word spotting, enhancement etc. suffer from structural deformations like strike-outs and unconstrained strokes, to name a few. They also suffer from texture degradation due to blurring, aging, or blotting-spots etc. \nThe DIU applications with deep networks are limited to constrained environment and lack diverse data with text-level and pixel-level annotation simultaneously. In this work, we propose a generative framework to produce realistic synthetic handwritten document images with simultaneous annotation of text and corresponding pixel-level spatial foreground information. The proposed approach generates realistic backgrounds with artificial handwritten texts which supplements data-augmentation in multiple unconstrained DIU systems. The proposed framework is an early work to facilitate DIU system-evaluation in both image quality and recognition performance at a go.", + "primary_area": "", + "author": "Arnab Poddar; Abhishek Kumar Sah; Soumyadeep Dey; Pratik Jawanpuria; Jayanta Mukhopadhyay; Prabir Kumar Biswas", + "authorids": "", + "aff": "Dept. of Electronics & Electrical Communication Engg, Indian Institute of Technology Kharagpur, 721302 India; Dept. of Computer Science & Engg, Indian Institute of Technology Kharagpur, 721302 India; Microsoft R&D India, Hyderabad; Microsoft R&D India, Hyderabad; Dept. of Computer Science & Engg, Indian Institute of Technology Kharagpur, 721302 India; Dept. of Electronics & Electrical Communication Engg, Indian Institute of Technology Kharagpur, 721302 India", + "bibtex": "@article{Poddar_Sah_Dey_Jawanpuria_Mukhopadhyay_Biswas_2024, title={Generative Pipeline for Data Augmentation of Unconstrained Document Images with Structural and Textural Degradation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27009}, DOI={10.1609/aaai.v37i13.27009}, abstractNote={Computer vision applications for document image understanding (DIU) such as optical character recognition, word spotting, enhancement etc. suffer from structural deformations like strike-outs and unconstrained strokes, to name a few. They also suffer from texture degradation due to blurring, aging, or blotting-spots etc. The DIU applications with deep networks are limited to constrained environment and lack diverse data with text-level and pixel-level annotation simultaneously. In this work, we propose a generative framework to produce realistic synthetic handwritten document images with simultaneous annotation of text and corresponding pixel-level spatial foreground information. The proposed approach generates realistic backgrounds with artificial handwritten texts which supplements data-augmentation in multiple unconstrained DIU systems. The proposed framework is an early work to facilitate DIU system-evaluation in both image quality and recognition performance at a go.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Poddar, Arnab and Sah, Abhishek Kumar and Dey, Soumyadeep and Jawanpuria, Pratik and Mukhopadhyay, Jayanta and Biswas, Prabir Kumar}, year={2024}, month={Jul.}, pages={16298-16299} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27009/26781", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27009", + "pdf_size": 579303, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Miox9PK0wOkJ:scholar.google.com/&scioq=Generative+Pipeline+for+Data+Augmentation+of+Unconstrained+Document+Images+with+Structural+and+Textural+Degradation+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "iitkgp.ac.in;gmail.com;microsoft.com;microsoft.com;iitkgp.ac.in;iitkgp.ac.in", + "email": "iitkgp.ac.in;gmail.com;microsoft.com;microsoft.com;iitkgp.ac.in;iitkgp.ac.in", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;0;0", + "aff_unique_norm": "Indian Institute of Technology Kharagpur;Microsoft", + "aff_unique_dep": "Dept. of Electronics & Electrical Communication Engg;R&D", + "aff_unique_url": "https://www.iitkgp.ac.in;https://www.microsoft.com", + "aff_unique_abbr": "IIT Kharagpur;Microsoft India", + "aff_campus_unique_index": "0;0;1;1;0;0", + "aff_campus_unique": "Kharagpur;Hyderabad", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25548", + "title": "Generic and Dynamic Graph Representation Learning for Crowd Flow Modeling", + "track": "main", + "status": "Technical", + "abstract": "Many deep spatio-temporal learning methods have been proposed for crowd flow modeling in recent years. However, most of them focus on designing a spatial and temporal convolution mechanism to aggregate information from nearby nodes and historical observations for a pre-defined prediction task. Different from the existing research, this paper aims to provide a generic and dynamic representation learning method for crowd flow modeling. The main idea of our method is to maintain a continuous-time representation for each node, and update the representations of all nodes continuously according to the streaming observed data. Along this line, a particular encoder-decoder architecture is proposed, where the encoder converts the newly happened transactions into a timestamped message, and then the representations of related nodes are updated according to the generated message. The role of the decoder is to guide the representation learning process by reconstructing the observed transactions based on the most recent node representations. Moreover, a number of virtual nodes are added to discover macro-level spatial patterns and also share the representations among spatially-interacted stations. Experiments have been conducted on two real-world datasets for four popular prediction tasks in crowd flow modeling. The result demonstrates that our method could achieve better prediction performance for all the tasks than baseline methods.", + "primary_area": "data mining and knowledge management", + "author": "Liangzhe Han; Ruixing Zhang; Leilei Sun; Bowen Du; Yanjie Fu; Tongyu Zhu", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China; Department of Computer Science, University of Central Florida, Florida, USA; State Key Laboratory of Software Development Environment, Beihang University, Beijing, China", + "bibtex": "@article{Han_Zhang_Sun_Du_Fu_Zhu_2023, title={Generic and Dynamic Graph Representation Learning for Crowd Flow Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25548}, DOI={10.1609/aaai.v37i4.25548}, abstractNote={Many deep spatio-temporal learning methods have been proposed for crowd flow modeling in recent years. However, most of them focus on designing a spatial and temporal convolution mechanism to aggregate information from nearby nodes and historical observations for a pre-defined prediction task. Different from the existing research, this paper aims to provide a generic and dynamic representation learning method for crowd flow modeling. The main idea of our method is to maintain a continuous-time representation for each node, and update the representations of all nodes continuously according to the streaming observed data. Along this line, a particular encoder-decoder architecture is proposed, where the encoder converts the newly happened transactions into a timestamped message, and then the representations of related nodes are updated according to the generated message. The role of the decoder is to guide the representation learning process by reconstructing the observed transactions based on the most recent node representations. Moreover, a number of virtual nodes are added to discover macro-level spatial patterns and also share the representations among spatially-interacted stations. Experiments have been conducted on two real-world datasets for four popular prediction tasks in crowd flow modeling. The result demonstrates that our method could achieve better prediction performance for all the tasks than baseline methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Liangzhe and Zhang, Ruixing and Sun, Leilei and Du, Bowen and Fu, Yanjie and Zhu, Tongyu}, year={2023}, month={Jun.}, pages={4293-4301} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25548/25320", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25548", + "pdf_size": 6834934, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15579168753725720938&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;ucf.edu;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;ucf.edu;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Beihang University;University of Central Florida", + "aff_unique_dep": "State Key Laboratory of Software Development Environment;Department of Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.ucf.edu", + "aff_unique_abbr": "BUAA;UCF", + "aff_campus_unique_index": "0;0;0;0;1;0", + "aff_campus_unique": "Beijing;Florida", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25657", + "title": "Gen\u00e9Live! Generating Rhythm Actions in Love Live!", + "track": "main", + "status": "Technical", + "abstract": "This article presents our generative model for rhythm action games together with applications in business operation. Rhythm action games are video games in which the player is challenged to issue commands at the right timings during a music session. The timings are rendered in the chart, which consists of visual symbols, called notes, flying through the screen. We introduce our deep generative model, Gen\u00e9Live!, which outperforms the state-of-the-art model by taking into account musical structures through beats and temporal scales. Thanks to its favorable performance, Gen\u00e9Live! was put into operation at KLab Inc., a Japan-based video game developer, and reduced the business cost of chart generation by as much as half. The application target included the phenomenal \"Love Live!\", which has more than 10 million users across Asia and beyond, and is one of the few rhythm action franchises that has led the online era of the genre. In this article, we evaluate the generative performance of Gen\u00e9Live! using production datasets at KLab as well as open datasets for reproducibility, while the model continues to operate in their business. Our code and the model, tuned and trained using a supercomputer, are publicly available.", + "primary_area": "domain s of application", + "author": "Atsushi Takada; Daichi Yamazaki; Yudai Yoshida; Nyamkhuu Ganbat; Takayuki Shimotomai; Naoki Hamada; Likun Liu; Taiga Yamamoto; Daisuke Sakurai", + "authorids": "", + "aff": "KLab Inc.; KLab Inc.; KLab Inc.; KLab Inc.; KLab Inc.; KLab Inc.; Kyushu University; Kyushu University; Kyushu University", + "bibtex": "@article{Takada_Yamazaki_Yoshida_Ganbat_Shimotomai_Hamada_Liu_Yamamoto_Sakurai_2023, title={Gen\u00e9Live! Generating Rhythm Actions in Love Live!}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25657}, DOI={10.1609/aaai.v37i4.25657}, abstractNote={This article presents our generative model for rhythm action games together with applications in business operation. Rhythm action games are video games in which the player is challenged to issue commands at the right timings during a music session. The timings are rendered in the chart, which consists of visual symbols, called notes, flying through the screen. We introduce our deep generative model, Gen\u00e9Live!, which outperforms the state-of-the-art model by taking into account musical structures through beats and temporal scales. Thanks to its favorable performance, Gen\u00e9Live! was put into operation at KLab Inc., a Japan-based video game developer, and reduced the business cost of chart generation by as much as half. The application target included the phenomenal "Love Live!", which has more than 10 million users across Asia and beyond, and is one of the few rhythm action franchises that has led the online era of the genre. In this article, we evaluate the generative performance of Gen\u00e9Live! using production datasets at KLab as well as open datasets for reproducibility, while the model continues to operate in their business. Our code and the model, tuned and trained using a supercomputer, are publicly available.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Takada, Atsushi and Yamazaki, Daichi and Yoshida, Yudai and Ganbat, Nyamkhuu and Shimotomai, Takayuki and Hamada, Naoki and Liu, Likun and Yamamoto, Taiga and Sakurai, Daisuke}, year={2023}, month={Jun.}, pages={5266-5275} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25657/25429", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25657", + "pdf_size": 4739886, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13221757455291535491&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "klab.com;klab.com;klab.com;klab.com;klab.com;klab.com;s.kyushu-u.ac.jp;s.kyushu-u.ac.jp;ieee.org", + "email": "klab.com;klab.com;klab.com;klab.com;klab.com;klab.com;s.kyushu-u.ac.jp;s.kyushu-u.ac.jp;ieee.org", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;1;1;1", + "aff_unique_norm": "KLab Inc.;Kyushu University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.klab.com;https://www.kyushu-u.ac.jp", + "aff_unique_abbr": "KLab;Kyushu U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26123", + "title": "Geometric Inductive Biases for Identifiable Unsupervised Learning of Disentangled Representations", + "track": "main", + "status": "Technical", + "abstract": "The model identifiability is a considerable issue in the unsupervised learning of disentangled representations. The PCA inductive biases revealed recently for unsupervised disentangling in VAE-based models are shown to improve local alignment of latent dimensions with principal components of the data. In this paper, in additional to the PCA inductive biases, we propose novel geometric inductive biases from the manifold perspective for unsupervised disentangling, which induce the model to capture the global geometric properties of the data manifold with guaranteed model identifiability. We also propose a Geometric Disentangling Regularized AutoEncoder (GDRAE) that combines the PCA and the proposed geometric inductive biases in one unified framework. The experimental results show the usefulness of the geometric inductive biases in unsupervised disentangling and the effectiveness of our GDRAE in capturing the geometric inductive biases.", + "primary_area": "machine learning iii", + "author": "Ziqi Pan; Li Niu; Liqing Zhang", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China", + "bibtex": "@article{Pan_Niu_Zhang_2023, title={Geometric Inductive Biases for Identifiable Unsupervised Learning of Disentangled Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26123}, DOI={10.1609/aaai.v37i8.26123}, abstractNote={The model identifiability is a considerable issue in the unsupervised learning of disentangled representations. The PCA inductive biases revealed recently for unsupervised disentangling in VAE-based models are shown to improve local alignment of latent dimensions with principal components of the data. In this paper, in additional to the PCA inductive biases, we propose novel geometric inductive biases from the manifold perspective for unsupervised disentangling, which induce the model to capture the global geometric properties of the data manifold with guaranteed model identifiability. We also propose a Geometric Disentangling Regularized AutoEncoder (GDRAE) that combines the PCA and the proposed geometric inductive biases in one unified framework. The experimental results show the usefulness of the geometric inductive biases in unsupervised disentangling and the effectiveness of our GDRAE in capturing the geometric inductive biases.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Ziqi and Niu, Li and Zhang, Liqing}, year={2023}, month={Jun.}, pages={9372-9380} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26123/25895", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26123", + "pdf_size": 5759417, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15243341907383105645&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26053", + "title": "Geometry-Aware Network for Domain Adaptive Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Measuring and alleviating the discrepancies between the synthetic (source) and real scene (target) data is the core issue for domain adaptive semantic segmentation. Though recent works have introduced depth information in the source domain to reinforce the geometric and semantic knowledge transfer, they cannot extract the intrinsic 3D information of objects, including positions and shapes, merely based on 2D estimated depth. In this work, we propose a novel Geometry-Aware Network for Domain Adaptation (GANDA), leveraging more compact 3D geometric point cloud representations to shrink the domain gaps. In particular, we first utilize the auxiliary depth supervision from the source domain to obtain the depth prediction in the target domain to accomplish structure-texture disentanglement. Beyond depth estimation, we explicitly exploit 3D topology on the point clouds generated from RGB-D images for further coordinate-color disentanglement and pseudo-labels refinement in the target domain. Moreover, to improve the 2D classifier in the target domain, we perform domain-invariant geometric adaptation from source to target and unify the 2D semantic and 3D geometric segmentation results in two domains. Note that our GANDA is plug-and-play in any existing UDA framework. Qualitative and quantitative results demonstrate that our model outperforms state-of-the-arts on GTA5->Cityscapes and SYNTHIA->Cityscapes.", + "primary_area": "machine learning ii", + "author": "Yinghong Liao; Wending Zhou; Xu Yan; Zhen Li; Yizhou Yu; Shuguang Cui", + "authorids": "", + "aff": "The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen) + School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen); The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen) + School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen); The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen) + School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen); School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen) + The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen); Department of Computer Science, The University of Hong Kong; School of Science and Engineering, The Chinese University of Hong Kong (Shenzhen) + The Future Network of Intelligence Institute, The Chinese University of Hong Kong (Shenzhen)", + "bibtex": "@article{Liao_Zhou_Yan_Li_Yu_Cui_2023, title={Geometry-Aware Network for Domain Adaptive Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26053}, DOI={10.1609/aaai.v37i7.26053}, abstractNote={Measuring and alleviating the discrepancies between the synthetic (source) and real scene (target) data is the core issue for domain adaptive semantic segmentation. Though recent works have introduced depth information in the source domain to reinforce the geometric and semantic knowledge transfer, they cannot extract the intrinsic 3D information of objects, including positions and shapes, merely based on 2D estimated depth. In this work, we propose a novel Geometry-Aware Network for Domain Adaptation (GANDA), leveraging more compact 3D geometric point cloud representations to shrink the domain gaps. In particular, we first utilize the auxiliary depth supervision from the source domain to obtain the depth prediction in the target domain to accomplish structure-texture disentanglement. Beyond depth estimation, we explicitly exploit 3D topology on the point clouds generated from RGB-D images for further coordinate-color disentanglement and pseudo-labels refinement in the target domain. Moreover, to improve the 2D classifier in the target domain, we perform domain-invariant geometric adaptation from source to target and unify the 2D semantic and 3D geometric segmentation results in two domains. Note that our GANDA is plug-and-play in any existing UDA framework. Qualitative and quantitative results demonstrate that our model outperforms state-of-the-arts on GTA5->Cityscapes and SYNTHIA->Cityscapes.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liao, Yinghong and Zhou, Wending and Yan, Xu and Li, Zhen and Yu, Yizhou and Cui, Shuguang}, year={2023}, month={Jun.}, pages={8755-8763} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26053/25825", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26053", + "pdf_size": 1623147, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4239920005892321740&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "link.; ;gcuhk.edu.cn; ; ; ", + "email": "link.; ;gcuhk.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;1;0+0", + "aff_unique_norm": "The Chinese University of Hong Kong;The University of Hong Kong", + "aff_unique_dep": "Future Network of Intelligence Institute;Department of Computer Science", + "aff_unique_url": "https://www.cuhk.edu.cn;https://www.hku.hk", + "aff_unique_abbr": "CUHK;HKU", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26267", + "title": "Global Concept-Based Interpretability for Graph Neural Networks via Neuron Analysis", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) are highly effective on a variety of graph-related tasks; however, they lack interpretability and transparency. Current explainability approaches are typically local and treat GNNs as black-boxes. They do not look inside the model, inhibiting human trust in the model and explanations. Motivated by the ability of neurons to detect high-level semantic concepts in vision models, we perform a novel analysis on the behaviour of individual GNN neurons to answer questions about GNN interpretability. We propose a novel approach for producing global explanations for GNNs using neuron-level concepts to enable practitioners to have a high-level view of the model. Specifically, (i) to the best of our knowledge, this is the first work which shows that GNN neurons act as concept detectors and have strong alignment with concepts formulated as logical compositions of node degree and neighbourhood properties; (ii) we quantitatively assess the importance of detected concepts, and identify a trade-off between training duration and neuron-level interpretability; (iii) we demonstrate that our global explainability approach has advantages over the current state-of-the-art -- we can disentangle the explanation into individual interpretable concepts backed by logical descriptions, which reduces potential for bias and improves user-friendliness.", + "primary_area": "machine learning iv", + "author": "Han Xuanyuan; Pietro Barbiero; Dobrik Georgiev; Lucie Charlotte Magister; Pietro Li\u00f2", + "authorids": "", + "aff": "University of Cambridge; University of Cambridge; University of Cambridge; University of Cambridge; University of Cambridge", + "bibtex": "@article{Xuanyuan_Barbiero_Georgiev_Magister_Li\u00f2_2023, title={Global Concept-Based Interpretability for Graph Neural Networks via Neuron Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26267}, DOI={10.1609/aaai.v37i9.26267}, abstractNote={Graph neural networks (GNNs) are highly effective on a variety of graph-related tasks; however, they lack interpretability and transparency. Current explainability approaches are typically local and treat GNNs as black-boxes. They do not look inside the model, inhibiting human trust in the model and explanations. Motivated by the ability of neurons to detect high-level semantic concepts in vision models, we perform a novel analysis on the behaviour of individual GNN neurons to answer questions about GNN interpretability. We propose a novel approach for producing global explanations for GNNs using neuron-level concepts to enable practitioners to have a high-level view of the model. Specifically, (i) to the best of our knowledge, this is the first work which shows that GNN neurons act as concept detectors and have strong alignment with concepts formulated as logical compositions of node degree and neighbourhood properties; (ii) we quantitatively assess the importance of detected concepts, and identify a trade-off between training duration and neuron-level interpretability; (iii) we demonstrate that our global explainability approach has advantages over the current state-of-the-art -- we can disentangle the explanation into individual interpretable concepts backed by logical descriptions, which reduces potential for bias and improves user-friendliness.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xuanyuan, Han and Barbiero, Pietro and Georgiev, Dobrik and Magister, Lucie Charlotte and Li\u00f2, Pietro}, year={2023}, month={Jun.}, pages={10675-10683} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26267/26039", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26267", + "pdf_size": 2026649, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1618620925628255195&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cantab.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "email": "cantab.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25865", + "title": "Global Convergence of Two-Timescale Actor-Critic for Solving Linear Quadratic Regulator", + "track": "main", + "status": "Technical", + "abstract": "The actor-critic (AC) reinforcement learning algorithms have been the powerhouse behind many challenging applications. Nevertheless, its convergence is fragile in general. To study its instability, existing works mostly consider the uncommon double-loop variant or basic models with finite state and action space. We investigate the more practical single-sample two-timescale AC for solving the canonical linear quadratic regulator (LQR) problem, where the actor and the critic update only once with a single sample in each iteration on an unbounded continuous state and action space. Existing analysis cannot conclude the convergence for such a challenging case. We develop a new analysis framework that allows establishing the global convergence to an epsilon-optimal solution with at most an order of epsilon to -2.5 sample complexity. To our knowledge, this is the first finite-time convergence analysis for the single sample two-timescale AC for solving LQR with global optimality. The sample complexity improves those of other variants by orders, which sheds light on the practical wisdom of single sample algorithms. We also further validate our theoretical findings via comprehensive simulation comparisons.", + "primary_area": "machine learning i", + "author": "Xuyang Chen; Jingliang Duan; Yingbin Liang; Lin Zhao", + "authorids": "", + "aff": "National University of Singapore; University of Science and Technology Beijing; The Ohio State University; National University of Singapore", + "bibtex": "@article{Chen_Duan_Liang_Zhao_2023, title={Global Convergence of Two-Timescale Actor-Critic for Solving Linear Quadratic Regulator}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25865}, DOI={10.1609/aaai.v37i6.25865}, abstractNote={The actor-critic (AC) reinforcement learning algorithms have been the powerhouse behind many challenging applications. Nevertheless, its convergence is fragile in general. To study its instability, existing works mostly consider the uncommon double-loop variant or basic models with finite state and action space. We investigate the more practical single-sample two-timescale AC for solving the canonical linear quadratic regulator (LQR) problem, where the actor and the critic update only once with a single sample in each iteration on an unbounded continuous state and action space. Existing analysis cannot conclude the convergence for such a challenging case. We develop a new analysis framework that allows establishing the global convergence to an epsilon-optimal solution with at most an order of epsilon to -2.5 sample complexity. To our knowledge, this is the first finite-time convergence analysis for the single sample two-timescale AC for solving LQR with global optimality. The sample complexity improves those of other variants by orders, which sheds light on the practical wisdom of single sample algorithms. We also further validate our theoretical findings via comprehensive simulation comparisons.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xuyang and Duan, Jingliang and Liang, Yingbin and Zhao, Lin}, year={2023}, month={Jun.}, pages={7087-7095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25865/25637", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25865", + "pdf_size": 811480, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14462567801489932861&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "u.nus.edu;ustb.edu.cn;osu.edu;nus.edu.sg", + "email": "u.nus.edu;ustb.edu.cn;osu.edu;nus.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "National University of Singapore;University of Science and Technology Beijing;The Ohio State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nus.edu.sg;http://www.ustb.edu.cn;https://www.osu.edu", + "aff_unique_abbr": "NUS;USTB;OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0", + "aff_country_unique": "Singapore;China;United States" + }, + { + "id": "article-25241", + "title": "Global Dilated Attention and Target Focusing Network for Robust Tracking", + "track": "main", + "status": "Technical", + "abstract": "Self Attention has shown the excellent performance in tracking due to its global modeling capability. However, it brings two challenges: First, its global receptive field has less attention on local structure and inter-channel associations, which limits the semantics to distinguish objects and backgrounds; Second, its feature fusion with linear process cannot avoid the interference of non-target semantic objects. To solve the above issues, this paper proposes a robust tracking method named GdaTFT by defining the Global Dilated Attention (GDA) and Target Focusing Network (TFN). The GDA provides a new global semantics modeling approach to enhance the semantic objects while eliminating the background. It is defined via the local focusing module, dilated attention and channel adaption module. Thus, it promotes semantics by focusing local key information, building long-range dependencies and enhancing the semantics of channels. Subsequently, to distinguish the target and non-target objects both with rich semantics, the TFN is proposed to accurately focus the target region. Different from the present feature fusion, it uses the template as the query to build a point-to-point correlation between the template and search region, and finally achieves part-level augmentation of target feature in the search region. Thus, the TFN efficiently augments the target embedding while weakening the non-target objects. Experiments on challenging benchmarks (LaSOT, TrackingNet, GOT-10k, OTB-100) demonstrate that the GdaTFT outperforms many state-of-the-art trackers and achieves leading performance. Code will be available.", + "primary_area": "computer vision ii", + "author": "Yun Liang; Qiaoqiao Li; Fumian Long", + "authorids": "", + "aff": "Guangzhou Key Laboratory of Intelligent Agriculture, College of Mathematics and Informatics, South China Agricultural University; Guangzhou Key Laboratory of Intelligent Agriculture, College of Mathematics and Informatics, South China Agricultural University; Guangzhou Key Laboratory of Intelligent Agriculture, College of Mathematics and Informatics, South China Agricultural University", + "bibtex": "@article{Liang_Li_Long_2023, title={Global Dilated Attention and Target Focusing Network for Robust Tracking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25241}, DOI={10.1609/aaai.v37i2.25241}, abstractNote={Self Attention has shown the excellent performance in tracking due to its global modeling capability. However, it brings two challenges: First, its global receptive field has less attention on local structure and inter-channel associations, which limits the semantics to distinguish objects and backgrounds; Second, its feature fusion with linear process cannot avoid the interference of non-target semantic objects. To solve the above issues, this paper proposes a robust tracking method named GdaTFT by defining the Global Dilated Attention (GDA) and Target Focusing Network (TFN). The GDA provides a new global semantics modeling approach to enhance the semantic objects while eliminating the background. It is defined via the local focusing module, dilated attention and channel adaption module. Thus, it promotes semantics by focusing local key information, building long-range dependencies and enhancing the semantics of channels. Subsequently, to distinguish the target and non-target objects both with rich semantics, the TFN is proposed to accurately focus the target region. Different from the present feature fusion, it uses the template as the query to build a point-to-point correlation between the template and search region, and finally achieves part-level augmentation of target feature in the search region. Thus, the TFN efficiently augments the target embedding while weakening the non-target objects. Experiments on challenging benchmarks (LaSOT, TrackingNet, GOT-10k, OTB-100) demonstrate that the GdaTFT outperforms many state-of-the-art trackers and achieves leading performance. Code will be available.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Yun and Li, Qiaoqiao and Long, Fumian}, year={2023}, month={Jun.}, pages={1549-1557} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25241/25013", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25241", + "pdf_size": 1174884, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4977273819751628466&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "scau.edu.cn;163.com;stu.scau.edu.cn", + "email": "scau.edu.cn;163.com;stu.scau.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "South China Agricultural University", + "aff_unique_dep": "College of Mathematics and Informatics", + "aff_unique_url": "http://www.scau.edu.cn", + "aff_unique_abbr": "SCAU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Guangzhou", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27036", + "title": "Global Explanations for Image Classifiers (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We hypothesize that deep network classifications of complex scenes can be explained using sets of relevant objects.\nWe employ beam search and singular value decomposition to generate local and global explanations that summarize the deep model's interpretation of a class.", + "primary_area": "", + "author": "Bhavan K. Vasu; Prasad Tadepalli", + "authorids": "", + "aff": "Oregon State University, Corvallis, Oregon 97331; Oregon State University, Corvallis, Oregon 97331", + "bibtex": "@article{Vasu_Tadepalli_2024, title={Global Explanations for Image Classifiers (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27036}, DOI={10.1609/aaai.v37i13.27036}, abstractNote={We hypothesize that deep network classifications of complex scenes can be explained using sets of relevant objects.\nWe employ beam search and singular value decomposition to generate local and global explanations that summarize the deep model\u2019s interpretation of a class.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vasu, Bhavan K. and Tadepalli, Prasad}, year={2024}, month={Jul.}, pages={16352-16353} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27036/26808", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27036", + "pdf_size": 494945, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Io3xfMsIp0MJ:scholar.google.com/&scioq=Global+Explanations+for+Image+Classifiers+(Student+Abstract)&hl=en&as_sdt=0,10", + "gs_version_total": 3, + "aff_domain": "oregonstate.edu;oregonstate.edu", + "email": "oregonstate.edu;oregonstate.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Oregon State University", + "aff_unique_dep": "", + "aff_unique_url": "https://oregonstate.edu", + "aff_unique_abbr": "OSU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Corvallis", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26616", + "title": "Global Mixup: Eliminating Ambiguity with Clustering", + "track": "main", + "status": "Technical", + "abstract": "Data augmentation with Mixup has been proven an effective method to regularize the current deep neural networks. Mixup generates virtual samples and corresponding labels simultaneously by linear interpolation. However, the one-stage generation paradigm and the use of linear interpolation have two defects: (1) The label of the generated sample is simply combined from the labels of the original sample pairs without reasonable judgment, resulting in ambiguous labels. (2) Linear combination significantly restricts the sampling space for generating samples. To address these issues, we propose a novel and effective augmentation method, Global Mixup, based on global clustering relationships.\nSpecifically, we transform the previous one-stage augmentation process into two-stage by decoupling the process of generating virtual samples from the labeling. And for the labels of the generated samples, relabeling is performed based on clustering by calculating the global relationships of the generated samples.\nFurthermore, we are no longer restricted to linear relationships, which allows us to generate more reliable virtual samples in a larger sampling space.\nExtensive experiments for CNN, LSTM, and BERT on five tasks show that Global Mixup outperforms previous baselines. Further experiments also demonstrate the advantage of Global Mixup in low-resource scenarios.", + "primary_area": "speech natural language processing", + "author": "Xiangjin Xie; Li Yangning; Wang Chen; Kai Ouyang; Zuotong Xie; Hai-Tao Zheng", + "authorids": "", + "aff": "Shenzhen International Graduate School, Tsinghua University; Shenzhen International Graduate School, Tsinghua University + Pengcheng Laboratory; Google Inc.; Shenzhen International Graduate School, Tsinghua University; Shenzhen International Graduate School, Tsinghua University; Shenzhen International Graduate School, Tsinghua University + Pengcheng Laboratory", + "bibtex": "@article{Xie_Yangning_Chen_Ouyang_Xie_Zheng_2023, title={Global Mixup: Eliminating Ambiguity with Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26616}, DOI={10.1609/aaai.v37i11.26616}, abstractNote={Data augmentation with Mixup has been proven an effective method to regularize the current deep neural networks. Mixup generates virtual samples and corresponding labels simultaneously by linear interpolation. However, the one-stage generation paradigm and the use of linear interpolation have two defects: (1) The label of the generated sample is simply combined from the labels of the original sample pairs without reasonable judgment, resulting in ambiguous labels. (2) Linear combination significantly restricts the sampling space for generating samples. To address these issues, we propose a novel and effective augmentation method, Global Mixup, based on global clustering relationships.\nSpecifically, we transform the previous one-stage augmentation process into two-stage by decoupling the process of generating virtual samples from the labeling. And for the labels of the generated samples, relabeling is performed based on clustering by calculating the global relationships of the generated samples.\nFurthermore, we are no longer restricted to linear relationships, which allows us to generate more reliable virtual samples in a larger sampling space.\nExtensive experiments for CNN, LSTM, and BERT on five tasks show that Global Mixup outperforms previous baselines. Further experiments also demonstrate the advantage of Global Mixup in low-resource scenarios.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Xiangjin and Yangning, Li and Chen, Wang and Ouyang, Kai and Xie, Zuotong and Zheng, Hai-Tao}, year={2023}, month={Jun.}, pages={13798-13806} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26616/26388", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26616", + "pdf_size": 839956, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6191576814751435428&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;google.com;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;google.com;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;sz.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;2;0;0;0+1", + "aff_unique_norm": "Tsinghua University;Pengcheng Laboratory;Google", + "aff_unique_dep": "Shenzhen International Graduate School;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;;https://www.google.com", + "aff_unique_abbr": "THU;;Google", + "aff_campus_unique_index": "0;0;2;0;0;0", + "aff_campus_unique": "Shenzhen;;Mountain View", + "aff_country_unique_index": "0;0+0;1;0;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25362", + "title": "Global-Local Characteristic Excited Cross-Modal Attacks from Images to Videos", + "track": "main", + "status": "Technical", + "abstract": "The transferability of adversarial examples is the key property in practical black-box scenarios. Currently, numerous methods improve the transferability across different models trained on the same modality of data. The investigation of generating video adversarial examples with imagebased substitute models to attack the target video models, i.e., cross-modal transferability of adversarial examples, is rarely explored. A few works on cross-modal transferability directly apply image attack methods for each frame and no factors especial for video data are considered, which limits the cross-modal transferability of adversarial examples. In this paper, we propose an effective cross-modal attack method which considers both the global and local characteristics of video data. Firstly, from the global perspective, we introduce inter-frame interaction into attack process to induce more diverse and stronger gradients rather than perturb each frame separately. Secondly, from the local perspective, we disrupt the inherently local correlation of frames within a video, which prevents black-box video model from capturing valuable temporal clues. Extensive experiments on the UCF-101 and Kinetics-400 validate the proposed method significantly improves cross-modal transferability and even surpasses strong baseline using video models as substitute model. Our source codes are available at https://github.com/lwmming/Cross-Modal-Attack.", + "primary_area": "computer vision ii", + "author": "Ruikui Wang; Yuanfang Guo; Yunhong Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, China; School of Computer Science and Engineering, Beihang University, China + Zhongguancun Laboratory, Beijing, China; School of Computer Science and Engineering, Beihang University, China", + "bibtex": "@article{Wang_Guo_Wang_2023, title={Global-Local Characteristic Excited Cross-Modal Attacks from Images to Videos}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25362}, DOI={10.1609/aaai.v37i2.25362}, abstractNote={The transferability of adversarial examples is the key property in practical black-box scenarios. Currently, numerous methods improve the transferability across different models trained on the same modality of data. The investigation of generating video adversarial examples with imagebased substitute models to attack the target video models, i.e., cross-modal transferability of adversarial examples, is rarely explored. A few works on cross-modal transferability directly apply image attack methods for each frame and no factors especial for video data are considered, which limits the cross-modal transferability of adversarial examples. In this paper, we propose an effective cross-modal attack method which considers both the global and local characteristics of video data. Firstly, from the global perspective, we introduce inter-frame interaction into attack process to induce more diverse and stronger gradients rather than perturb each frame separately. Secondly, from the local perspective, we disrupt the inherently local correlation of frames within a video, which prevents black-box video model from capturing valuable temporal clues. Extensive experiments on the UCF-101 and Kinetics-400 validate the proposed method significantly improves cross-modal transferability and even surpasses strong baseline using video models as substitute model. Our source codes are available at https://github.com/lwmming/Cross-Modal-Attack.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Ruikui and Guo, Yuanfang and Wang, Yunhong}, year={2023}, month={Jun.}, pages={2635-2643} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25362/25134", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25362", + "pdf_size": 386725, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17795844530914303837&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/lwmming/Cross-Modal-Attack", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "Beihang;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25912", + "title": "Goal-Conditioned Generators of Deep Policies", + "track": "main", + "status": "Technical", + "abstract": "Goal-conditioned Reinforcement Learning (RL) aims at learning optimal policies, given goals encoded in special command inputs. Here we study goal-conditioned neural nets (NNs) that learn to generate deep NN policies in form of context-specific weight matrices, similar to Fast Weight Programmers and other methods from the 1990s. Using context commands of the form ``generate a policy that achieves a desired expected return,'' our NN generators combine powerful exploration of parameter space with generalization across commands to iteratively find better and better policies. A form of weight-sharing HyperNetworks and policy embeddings scales our method to generate deep NNs. Experiments show how a single learned policy generator can produce policies that achieve any return seen during training. Finally, we evaluate our algorithm on a set of continuous control tasks where it exhibits competitive performance.\nOur code is public.", + "primary_area": "machine learning i", + "author": "Francesco Faccio; Vincent Herrmann; Aditya Ramesh; Louis Kirsch; J\u00fcrgen Schmidhuber", + "authorids": "", + "aff": "The Swiss AI Lab IDSIA/USI/SUPSI, Lugano, Ticino, Switzerland + AI Initiative, KAUST, Thuwal, Saudi Arabia; The Swiss AI Lab IDSIA/USI/SUPSI, Lugano, Ticino, Switzerland + AI Initiative, KAUST, Thuwal, Saudi Arabia; The Swiss AI Lab IDSIA/USI/SUPSI, Lugano, Ticino, Switzerland; The Swiss AI Lab IDSIA/USI/SUPSI, Lugano, Ticino, Switzerland; The Swiss AI Lab IDSIA/USI/SUPSI, Lugano, Ticino, Switzerland + AI Initiative, KAUST, Thuwal, Saudi Arabia + NNAISENSE, Lugano, Switzerland", + "bibtex": "@article{Faccio_Herrmann_Ramesh_Kirsch_Schmidhuber_2023, title={Goal-Conditioned Generators of Deep Policies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25912}, DOI={10.1609/aaai.v37i6.25912}, abstractNote={Goal-conditioned Reinforcement Learning (RL) aims at learning optimal policies, given goals encoded in special command inputs. Here we study goal-conditioned neural nets (NNs) that learn to generate deep NN policies in form of context-specific weight matrices, similar to Fast Weight Programmers and other methods from the 1990s. Using context commands of the form ``generate a policy that achieves a desired expected return,\u2019\u2019 our NN generators combine powerful exploration of parameter space with generalization across commands to iteratively find better and better policies. A form of weight-sharing HyperNetworks and policy embeddings scales our method to generate deep NNs. Experiments show how a single learned policy generator can produce policies that achieve any return seen during training. Finally, we evaluate our algorithm on a set of continuous control tasks where it exhibits competitive performance.\nOur code is public.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Faccio, Francesco and Herrmann, Vincent and Ramesh, Aditya and Kirsch, Louis and Schmidhuber, J\u00fcrgen}, year={2023}, month={Jun.}, pages={7503-7511} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25912/25684", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25912", + "pdf_size": 3417677, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1455336595930071883&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 12, + "aff_domain": "idsia.ch;idsia.ch;idsia.ch;idsia.ch;idsia.ch", + "email": "idsia.ch;idsia.ch;idsia.ch;idsia.ch;idsia.ch", + "github": "https://github.com/IDSIA/GoGePo", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0;0;0+1+2", + "aff_unique_norm": "Swiss AI Lab IDSIA/USI/SUPSI;King Abdullah University of Science and Technology;NNAISENSE", + "aff_unique_dep": "AI Lab;AI Initiative;", + "aff_unique_url": "https://www.idsia.ch/;https://www.kaust.edu.sa;", + "aff_unique_abbr": "IDSIA;KAUST;", + "aff_campus_unique_index": "0+1;0+1;0;0;0+1+0", + "aff_campus_unique": "Lugano;Thuwal", + "aff_country_unique_index": "0+1;0+1;0;0;0+1+0", + "aff_country_unique": "Switzerland;Saudi Arabia" + }, + { + "id": "article-26024", + "title": "Goal-Conditioned Q-learning as Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "Many applications of reinforcement learning can be formalized as goal-conditioned environments, where, in each episode, there is a \"goal\" that affects the rewards obtained during that episode but does not affect the dynamics. Various techniques have been proposed to improve performance in goal-conditioned environments, such as automatic curriculum generation and goal relabeling. In this work, we explore a connection between off-policy reinforcement learning in goal-conditioned settings and knowledge distillation. In particular: the current Q-value function and the target Q-value estimate are both functions of the goal, and we would like to train the Q-value function to match its target for all goals. We therefore apply Gradient-Based Attention Transfer (Zagoruyko and Komodakis 2017), a knowledge distillation technique, to the Q-function update. We empirically show that this can improve the performance of goal-conditioned off-policy reinforcement learning when the space of goals is high-dimensional. We also show that this technique can be adapted to allow for efficient learning in the case of multiple simultaneous sparse goals, where the agent can attain a reward by achieving any one of a large set of objectives, all specified at test time. Finally, to provide theoretical support, we give examples of classes of environments where (under some assumptions) standard off-policy algorithms such as DDPG require at least O(d^2) replay buffer transitions to learn an optimal policy, while our proposed technique requires only O(d) transitions, where d is the dimensionality of the goal and state space. Code and appendix are available at https://github.com/alevine0/ReenGAGE.", + "primary_area": "machine learning ii", + "author": "Alexander Levine; Soheil Feizi", + "authorids": "", + "aff": "University of Maryland, College Park, Maryland, USA; University of Maryland, College Park, Maryland, USA", + "bibtex": "@article{Levine_Feizi_2023, title={Goal-Conditioned Q-learning as Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26024}, DOI={10.1609/aaai.v37i7.26024}, abstractNote={Many applications of reinforcement learning can be formalized as goal-conditioned environments, where, in each episode, there is a "goal" that affects the rewards obtained during that episode but does not affect the dynamics. Various techniques have been proposed to improve performance in goal-conditioned environments, such as automatic curriculum generation and goal relabeling. In this work, we explore a connection between off-policy reinforcement learning in goal-conditioned settings and knowledge distillation. In particular: the current Q-value function and the target Q-value estimate are both functions of the goal, and we would like to train the Q-value function to match its target for all goals. We therefore apply Gradient-Based Attention Transfer (Zagoruyko and Komodakis 2017), a knowledge distillation technique, to the Q-function update. We empirically show that this can improve the performance of goal-conditioned off-policy reinforcement learning when the space of goals is high-dimensional. We also show that this technique can be adapted to allow for efficient learning in the case of multiple simultaneous sparse goals, where the agent can attain a reward by achieving any one of a large set of objectives, all specified at test time. Finally, to provide theoretical support, we give examples of classes of environments where (under some assumptions) standard off-policy algorithms such as DDPG require at least O(d^2) replay buffer transitions to learn an optimal policy, while our proposed technique requires only O(d) transitions, where d is the dimensionality of the goal and state space. Code and appendix are available at https://github.com/alevine0/ReenGAGE.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Levine, Alexander and Feizi, Soheil}, year={2023}, month={Jun.}, pages={8500-8509} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26024/25796", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26024", + "pdf_size": 595045, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9865347194139349774&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.umd.edu;cs.umd.edu", + "email": "cs.umd.edu;cs.umd.edu", + "github": "https://github.com/alevine0/ReenGAGE", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25269", + "title": "Good Helper Is around You: Attention-Driven Masked Image Modeling", + "track": "main", + "status": "Technical", + "abstract": "It has been witnessed that masked image modeling (MIM) has shown a huge potential in self-supervised learning in the past year. Benefiting from the universal backbone vision transformer, MIM learns self-supervised visual representations through masking a part of patches of the image while attempting to recover the missing pixels. Most previous works mask patches of the image randomly, which underutilizes the semantic information that is beneficial to visual representation learning. On the other hand, due to the large size of the backbone, most previous works have to spend much time on pre-training. In this paper, we propose Attention-driven Masking and Throwing Strategy (AMT), which could solve both problems above. We first leverage the self-attention mechanism to obtain the semantic information of the image during the training process automatically without using any supervised methods. Masking strategy can be guided by that information to mask areas selectively, which is helpful for representation learning. Moreover, a redundant patch throwing strategy is proposed, which makes learning more efficient. As a plug-and-play module for masked image modeling, AMT improves the linear probing accuracy of MAE by 2.9% ~ 5.9% on CIFAR-10/100, STL-10, Tiny ImageNet, and ImageNet-1K, and obtains an improved performance with respect to fine-tuning accuracy of MAE and SimMIM. Moreover, this design also achieves superior performance on downstream detection and segmentation tasks.", + "primary_area": "computer vision ii", + "author": "Zhengqi Liu; Jie Gui; Hao Luo", + "authorids": "", + "aff": "Southeast University, Nanjing, China; Southeast University, Nanjing, China + Purple Mountain Laboratories, China; Alibaba group, China", + "bibtex": "@article{Liu_Gui_Luo_2023, title={Good Helper Is around You: Attention-Driven Masked Image Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25269}, DOI={10.1609/aaai.v37i2.25269}, abstractNote={It has been witnessed that masked image modeling (MIM) has shown a huge potential in self-supervised learning in the past year. Benefiting from the universal backbone vision transformer, MIM learns self-supervised visual representations through masking a part of patches of the image while attempting to recover the missing pixels. Most previous works mask patches of the image randomly, which underutilizes the semantic information that is beneficial to visual representation learning. On the other hand, due to the large size of the backbone, most previous works have to spend much time on pre-training. In this paper, we propose Attention-driven Masking and Throwing Strategy (AMT), which could solve both problems above. We first leverage the self-attention mechanism to obtain the semantic information of the image during the training process automatically without using any supervised methods. Masking strategy can be guided by that information to mask areas selectively, which is helpful for representation learning. Moreover, a redundant patch throwing strategy is proposed, which makes learning more efficient. As a plug-and-play module for masked image modeling, AMT improves the linear probing accuracy of MAE by 2.9% ~ 5.9% on CIFAR-10/100, STL-10, Tiny ImageNet, and ImageNet-1K, and obtains an improved performance with respect to fine-tuning accuracy of MAE and SimMIM. Moreover, this design also achieves superior performance on downstream detection and segmentation tasks.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zhengqi and Gui, Jie and Luo, Hao}, year={2023}, month={Jun.}, pages={1799-1807} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25269/25041", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25269", + "pdf_size": 617468, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16536205316332360394&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "seu.edu.cn;seu.edu.cn;alibaba-inc.com", + "email": "seu.edu.cn;seu.edu.cn;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "Southeast University;Purple Mountain Laboratories;Alibaba Group", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.seu.edu.cn/;;https://www.alibaba.com", + "aff_unique_abbr": "SEU;;Alibaba", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25889", + "title": "GradPU: Positive-Unlabeled Learning via Gradient Penalty and Positive Upweighting", + "track": "main", + "status": "Technical", + "abstract": "Positive-unlabeled learning is an essential problem in many real-world applications with only labeled positive and unlabeled data, especially when the negative samples are difficult to identify. Most existing positive-unlabeled learning methods will inevitably overfit the positive class to some extent due to the existence of unidentified positive samples. This paper first analyzes the overfitting problem and proposes to bound the generalization errors via Wasserstein distances. Based on that, we develop a simple yet effective positive-unlabeled learning method, GradPU, which consists of two key ingredients: A gradient-based regularizer that penalizes the gradient norms in the interpolated data region, which improves the generalization of positive class; An unnormalized upweighting mechanism that assigns larger weights to those positive samples that are hard, not-well-fitted and less frequently labeled. It enforces the training error of each positive sample to be small and increases the robustness to the labeling bias. We evaluate our proposed GradPU on three datasets: MNIST, FashionMNIST, and CIFAR10. The results demonstrate that GradPU achieves state-of-the-art performance on both unbiased and biased positive labeling scenarios.", + "primary_area": "machine learning i", + "author": "Songmin Dai; Xiaoqiang Li; Yue Zhou; Xichen Ye; Tong Liu", + "authorids": "", + "aff": "School of Computer Engineering and Science, Shanghai University, China; School of Computer Engineering and Science, Shanghai University, China; School of Computer Engineering and Science, Shanghai University, China; School of Computer Engineering and Science, Shanghai University, China; School of Computer Engineering and Science, Shanghai University, China", + "bibtex": "@article{Dai_Li_Zhou_Ye_Liu_2023, title={GradPU: Positive-Unlabeled Learning via Gradient Penalty and Positive Upweighting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25889}, DOI={10.1609/aaai.v37i6.25889}, abstractNote={Positive-unlabeled learning is an essential problem in many real-world applications with only labeled positive and unlabeled data, especially when the negative samples are difficult to identify. Most existing positive-unlabeled learning methods will inevitably overfit the positive class to some extent due to the existence of unidentified positive samples. This paper first analyzes the overfitting problem and proposes to bound the generalization errors via Wasserstein distances. Based on that, we develop a simple yet effective positive-unlabeled learning method, GradPU, which consists of two key ingredients: A gradient-based regularizer that penalizes the gradient norms in the interpolated data region, which improves the generalization of positive class; An unnormalized upweighting mechanism that assigns larger weights to those positive samples that are hard, not-well-fitted and less frequently labeled. It enforces the training error of each positive sample to be small and increases the robustness to the labeling bias. We evaluate our proposed GradPU on three datasets: MNIST, FashionMNIST, and CIFAR10. The results demonstrate that GradPU achieves state-of-the-art performance on both unbiased and biased positive labeling scenarios.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Songmin and Li, Xiaoqiang and Zhou, Yue and Ye, Xichen and Liu, Tong}, year={2023}, month={Jun.}, pages={7296-7303} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25889/25661", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25889", + "pdf_size": 397148, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7365493927973342709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "shu.edu.cn;shu.edu.cn;shu.edu.cn;shu.edu.cn;shu.edu.cn", + "email": "shu.edu.cn;shu.edu.cn;shu.edu.cn;shu.edu.cn;shu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Shanghai University", + "aff_unique_dep": "School of Computer Engineering and Science", + "aff_unique_url": "https://www.shu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25231", + "title": "Gradient Corner Pooling for Keypoint-Based Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Detecting objects as multiple keypoints is an important approach in the anchor-free object detection methods while corner pooling is an effective feature encoding method for corner positioning. The corners of the bounding box are located by summing the feature maps which are max-pooled in the x and y directions respectively by corner pooling. In the unidirectional max pooling operation, the features of the densely arranged objects of the same class are prone to occlusion. To this end, we propose a method named Gradient Corner Pooling. The spatial distance information of objects on the feature map is encoded during the unidirectional pooling process, which effectively alleviates the occlusion of the homogeneous object features. Further, the computational complexity of gradient corner pooling is the same as traditional corner pooling and hence it can be implemented efficiently. Gradient corner pooling obtains consistent improvements for various keypoint-based methods by directly replacing corner pooling. We verify the gradient corner pooling algorithm on the dataset and in real scenarios, respectively. The networks with gradient corner pooling located the corner points earlier in the training process and achieve an average accuracy improvement of 0.2%-1.6% on the MS-COCO dataset. The detectors with gradient corner pooling show better angle adaptability for arrayed objects in the actual scene test.", + "primary_area": "computer vision ii", + "author": "Xuyang Li; Xuemei Xie; Mingxuan Yu; Jiakai Luo; Chengwei Rao; Guangming Shi", + "authorids": "", + "aff": "Xidian University; Xidian University + Pazhou Lab; Xidian University; Xidian University; Xidian University; Xidian University + Peng Cheng Laboratory", + "bibtex": "@article{Li_Xie_Yu_Luo_Rao_Shi_2023, title={Gradient Corner Pooling for Keypoint-Based Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25231}, DOI={10.1609/aaai.v37i2.25231}, abstractNote={Detecting objects as multiple keypoints is an important approach in the anchor-free object detection methods while corner pooling is an effective feature encoding method for corner positioning. The corners of the bounding box are located by summing the feature maps which are max-pooled in the x and y directions respectively by corner pooling. In the unidirectional max pooling operation, the features of the densely arranged objects of the same class are prone to occlusion. To this end, we propose a method named Gradient Corner Pooling. The spatial distance information of objects on the feature map is encoded during the unidirectional pooling process, which effectively alleviates the occlusion of the homogeneous object features. Further, the computational complexity of gradient corner pooling is the same as traditional corner pooling and hence it can be implemented efficiently. Gradient corner pooling obtains consistent improvements for various keypoint-based methods by directly replacing corner pooling. We verify the gradient corner pooling algorithm on the dataset and in real scenarios, respectively. The networks with gradient corner pooling located the corner points earlier in the training process and achieve an average accuracy improvement of 0.2%-1.6% on the MS-COCO dataset. The detectors with gradient corner pooling show better angle adaptability for arrayed objects in the actual scene test.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xuyang and Xie, Xuemei and Yu, Mingxuan and Luo, Jiakai and Rao, Chengwei and Shi, Guangming}, year={2023}, month={Jun.}, pages={1460-1467} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25231/25003", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25231", + "pdf_size": 1527231, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11515193832183085628&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;stu.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn", + "email": "stu.xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;stu.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;0+2", + "aff_unique_norm": "Xidian University;Pazhou Lab;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.xidian.edu.cn/;;http://www.pcl.ac.cn", + "aff_unique_abbr": "Xidian;;PCL", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-26013", + "title": "Gradient Estimation for Binary Latent Variables via Gradient Variance Clipping", + "track": "main", + "status": "Technical", + "abstract": "Gradient estimation is often necessary for fitting generative models with discrete latent variables, in contexts such as reinforcement learning and variational autoencoder (VAE) training. The DisARM estimator achieves state of the art gradient variance for Bernoulli latent variable models in many contexts. However, DisARM and other estimators have potentially exploding variance near the boundary of the parameter space, where solutions tend to lie. To ameliorate this issue, we propose a new gradient estimator bitflip-1 that is lower variance at the boundaries of the parameter space. As bitflip-1 has complementary properties to existing estimators, we introduce an aggregated estimator, unbiased gradient variance clipping (UGC) that uses either a bitflip-1 or a DisARM gradient update for each coordinate. We theoretically prove that UGC has uniformly lower variance than DisARM.\nEmpirically, we observe that UGC achieves the optimal value of the optimization objectives in toy experiments, discrete VAE training, and in a best subset selection problem.", + "primary_area": "machine learning ii", + "author": "Russell Z. Kunes; Mingzhang Yin; Max Land; Doron Haviv; Dana Pe'er; Simon Tavar\u00e9", + "authorids": "", + "aff": "Department of Statistics, Columbia University+Irving Institute of Cancer Dynamics, Columbia University; Irving Institute of Cancer Dynamics, Columbia University+Warrington College of Business, University of Florida; Computational and Systems Biology, Memorial Sloan Kettering Cancer Center; Computational and Systems Biology, Memorial Sloan Kettering Cancer Center; Computational and Systems Biology, Memorial Sloan Kettering Cancer Center+Howard Hughes Medical Institute; Department of Statistics, Columbia University+Irving Institute of Cancer Dynamics, Columbia University", + "bibtex": "@article{Kunes_Yin_Land_Haviv_Pe\u2019er_Tavar\u00e9_2023, title={Gradient Estimation for Binary Latent Variables via Gradient Variance Clipping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26013}, DOI={10.1609/aaai.v37i7.26013}, abstractNote={Gradient estimation is often necessary for fitting generative models with discrete latent variables, in contexts such as reinforcement learning and variational autoencoder (VAE) training. The DisARM estimator achieves state of the art gradient variance for Bernoulli latent variable models in many contexts. However, DisARM and other estimators have potentially exploding variance near the boundary of the parameter space, where solutions tend to lie. To ameliorate this issue, we propose a new gradient estimator bitflip-1 that is lower variance at the boundaries of the parameter space. As bitflip-1 has complementary properties to existing estimators, we introduce an aggregated estimator, unbiased gradient variance clipping (UGC) that uses either a bitflip-1 or a DisARM gradient update for each coordinate. We theoretically prove that UGC has uniformly lower variance than DisARM.\nEmpirically, we observe that UGC achieves the optimal value of the optimization objectives in toy experiments, discrete VAE training, and in a best subset selection problem.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kunes, Russell Z. and Yin, Mingzhang and Land, Max and Haviv, Doron and Pe\u2019er, Dana and Tavar\u00e9, Simon}, year={2023}, month={Jun.}, pages={8405-8412} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26013/25785", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26013", + "pdf_size": 4605704, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3865827381779843321&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+1;2;2;2+3;0+0", + "aff_unique_norm": "Columbia University;University of Florida;Memorial Sloan Kettering Cancer Center;Howard Hughes Medical Institute", + "aff_unique_dep": "Department of Statistics;Warrington College of Business;Computational and Systems Biology;", + "aff_unique_url": "https://www.columbia.edu;https://warrington.ufl.edu;https://www.mskcc.org;https://www.hhmi.org", + "aff_unique_abbr": "Columbia;UF;MSKCC;HHMI", + "aff_campus_unique_index": ";1;;", + "aff_campus_unique": ";Gainesville", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26353", + "title": "Gradient-Adaptive Pareto Optimization for Constrained Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Constrained Reinforcement Learning (CRL) burgeons broad interest in recent years, which pursues maximizing long-term returns while constraining costs. Although CRL can be cast as a multi-objective optimization problem, it is still facing the key challenge that gradient-based Pareto optimization methods tend to stick to known Pareto-optimal solutions even when they yield poor returns (e.g., the safest self-driving car that never moves) or violate the constraints (e.g., the record-breaking racer that crashes the car). In this paper, we propose Gradient-adaptive Constrained Policy Optimization (GCPO for short), a novel Pareto optimization method for CRL with two adaptive gradient recalibration techniques. First, to find Pareto-optimal solutions with balanced performance over all targets, we propose gradient rebalancing which forces the agent to improve more on under-optimized objectives at every policy iteration. Second, to guarantee that the cost constraints are satisfied, we propose gradient perturbation that can temporarily sacrifice the returns for costs. Experiments on the SafetyGym benchmarks show that our method consistently outperforms previous CRL methods in reward while satisfying the constraints.", + "primary_area": "machine learning iv", + "author": "Zixian Zhou; Mengda Huang; Feiyang Pan; Jia He; Xiang Ao; Dandan Tu; Qing He", + "authorids": "", + "aff": "Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China+University of Chinese Academy of Sciences, Beijing 100049, China; Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China+University of Chinese Academy of Sciences, Beijing 100049, China; Huawei EI Innovation Lab; Huawei EI Innovation Lab; Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China+University of Chinese Academy of Sciences, Beijing 100049, China+Institute of Intelligent Computing Technology, Suzhou, CAS; Huawei EI Innovation Lab; Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China+University of Chinese Academy of Sciences, Beijing 100049, China", + "bibtex": "@article{Zhou_Huang_Pan_He_Ao_Tu_He_2023, title={Gradient-Adaptive Pareto Optimization for Constrained Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26353}, DOI={10.1609/aaai.v37i9.26353}, abstractNote={Constrained Reinforcement Learning (CRL) burgeons broad interest in recent years, which pursues maximizing long-term returns while constraining costs. Although CRL can be cast as a multi-objective optimization problem, it is still facing the key challenge that gradient-based Pareto optimization methods tend to stick to known Pareto-optimal solutions even when they yield poor returns (e.g., the safest self-driving car that never moves) or violate the constraints (e.g., the record-breaking racer that crashes the car). In this paper, we propose Gradient-adaptive Constrained Policy Optimization (GCPO for short), a novel Pareto optimization method for CRL with two adaptive gradient recalibration techniques. First, to find Pareto-optimal solutions with balanced performance over all targets, we propose gradient rebalancing which forces the agent to improve more on under-optimized objectives at every policy iteration. Second, to guarantee that the cost constraints are satisfied, we propose gradient perturbation that can temporarily sacrifice the returns for costs. Experiments on the SafetyGym benchmarks show that our method consistently outperforms previous CRL methods in reward while satisfying the constraints.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Zixian and Huang, Mengda and Pan, Feiyang and He, Jia and Ao, Xiang and Tu, Dandan and He, Qing}, year={2023}, month={Jun.}, pages={11443-11451} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26353/26125", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26353", + "pdf_size": 10492524, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2959052348093108070&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ict.ac.cn;ict.ac.cn;gmail.com; ;ict.ac.cn; ; ", + "email": "ict.ac.cn;ict.ac.cn;gmail.com; ;ict.ac.cn; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;2;2;0+1+0;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Huawei", + "aff_unique_dep": "Institute of Computing Technology;;EI Innovation Lab", + "aff_unique_url": "http://www.cas.ac.cn;http://www.ucas.ac.cn;https://www.huawei.com", + "aff_unique_abbr": "CAS;UCAS;Huawei", + "aff_campus_unique_index": "0+0;0+0;0+0+2;0+0", + "aff_campus_unique": "Beijing;;Suzhou", + "aff_country_unique_index": "0+0;0+0;0;0;0+0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25499", + "title": "Gradient-Based Graph Attention for Scene Text Image Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Scene text image super-resolution (STISR) in the wild has been shown to be beneficial to support improved vision-based text recognition from low-resolution imagery. An intuitive way to enhance STISR performance is to explore the well-structured and repetitive layout characteristics of text and exploit these as prior knowledge to guide model convergence. In this paper, we propose a novel gradient-based graph attention method to embed patch-wise text layout contexts into image feature representations for high-resolution text image reconstruction in an implicit and elegant manner. We introduce a non-local group-wise attention module to extract text features which are then enhanced by a cascaded channel attention module and a novel gradient-based graph attention module in order to obtain more effective representations by exploring correlations of regional and local patch-wise text layout properties. Extensive experiments on the benchmark TextZoom dataset convincingly demonstrate that our method supports excellent text recognition and outperforms the current state-of-the-art in STISR. The source code is available at https://github.com/xyzhu1/TSAN.", + "primary_area": "computer vision iii", + "author": "Xiangyuan Zhu; Kehua Guo; Hui Fang; Rui Ding; Zheng Wu; Gerald Schaefer", + "authorids": "", + "aff": "School of Computer Science and Engineering, Central South University, China; School of Computer Science and Engineering, Central South University, China; Department of Computer Science, Loughborough University, U.K.; School of Computer Science and Engineering, Central South University, China; School of Computer Science and Engineering, Central South University, China; Department of Computer Science, Loughborough University, U.K.", + "bibtex": "@article{Zhu_Guo_Fang_Ding_Wu_Schaefer_2023, title={Gradient-Based Graph Attention for Scene Text Image Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25499}, DOI={10.1609/aaai.v37i3.25499}, abstractNote={Scene text image super-resolution (STISR) in the wild has been shown to be beneficial to support improved vision-based text recognition from low-resolution imagery. An intuitive way to enhance STISR performance is to explore the well-structured and repetitive layout characteristics of text and exploit these as prior knowledge to guide model convergence. In this paper, we propose a novel gradient-based graph attention method to embed patch-wise text layout contexts into image feature representations for high-resolution text image reconstruction in an implicit and elegant manner. We introduce a non-local group-wise attention module to extract text features which are then enhanced by a cascaded channel attention module and a novel gradient-based graph attention module in order to obtain more effective representations by exploring correlations of regional and local patch-wise text layout properties. Extensive experiments on the benchmark TextZoom dataset convincingly demonstrate that our method supports excellent text recognition and outperforms the current state-of-the-art in STISR. The source code is available at https://github.com/xyzhu1/TSAN.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Xiangyuan and Guo, Kehua and Fang, Hui and Ding, Rui and Wu, Zheng and Schaefer, Gerald}, year={2023}, month={Jun.}, pages={3861-3869} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25499/25271", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25499", + "pdf_size": 579462, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5972356948794608445&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "csu.edu.cn;csu.edu.cn;lboro.ac.uk;csu.edu.cn;gmail.com;ieee.org", + "email": "csu.edu.cn;csu.edu.cn;lboro.ac.uk;csu.edu.cn;gmail.com;ieee.org", + "github": "https://github.com/xyzhu1/TSAN", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;1", + "aff_unique_norm": "Central South University;Loughborough University", + "aff_unique_dep": "School of Computer Science and Engineering;Department of Computer Science", + "aff_unique_url": "http://www.csu.edu.cn;https://www.lboro.ac.uk", + "aff_unique_abbr": "CSU;Lboro", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26141", + "title": "Gradient-Variation Bound for Online Convex Optimization with Constraints", + "track": "main", + "status": "Technical", + "abstract": "We study online convex optimization with constraints consisting of multiple functional constraints and a relatively simple constraint set, such as a Euclidean ball. As enforcing the constraints at each time step through projections is computationally challenging in general, we allow decisions to violate the functional constraints but aim to achieve a low regret and cumulative violation of the constraints over a horizon of T time steps. First-order methods achieve an O(sqrt{T}) regret and an O(1) constraint violation, which is the best-known bound under the Slater's condition, but do not take into account the structural information of the problem. Furthermore, the existing algorithms and analysis are limited to Euclidean space. In this paper, we provide an instance-dependent bound for online convex optimization with complex constraints obtained by a novel online primal-dual mirror-prox algorithm. Our instance-dependent regret is quantified by the total gradient variation V_*(T) in the sequence of loss functions. The proposed algorithm works in general normed spaces and simultaneously achieves an O(sqrt{V_*(T)}) regret and an O(1) constraint violation, which is never worse than the best-known (O(sqrt{T}), O(1)) result and improves over previous works that applied mirror-prox-type algorithms for this problem achieving O(T^{2/3}) regret and constraint violation. Finally, our algorithm is computationally efficient, as it only performs mirror descent steps in each iteration instead of solving a general Lagrangian minimization problem.", + "primary_area": "machine learning iii", + "author": "Shuang Qiu; Xiaohan Wei; Mladen Kolar", + "authorids": "", + "aff": "Booth School of Business, the University of Chicago; Meta Platforms, Inc.; Booth School of Business, the University of Chicago", + "bibtex": "@article{Qiu_Wei_Kolar_2023, title={Gradient-Variation Bound for Online Convex Optimization with Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26141}, DOI={10.1609/aaai.v37i8.26141}, abstractNote={We study online convex optimization with constraints consisting of multiple functional constraints and a relatively simple constraint set, such as a Euclidean ball. As enforcing the constraints at each time step through projections is computationally challenging in general, we allow decisions to violate the functional constraints but aim to achieve a low regret and cumulative violation of the constraints over a horizon of T time steps. First-order methods achieve an O(sqrt{T}) regret and an O(1) constraint violation, which is the best-known bound under the Slater\u2019s condition, but do not take into account the structural information of the problem. Furthermore, the existing algorithms and analysis are limited to Euclidean space. In this paper, we provide an instance-dependent bound for online convex optimization with complex constraints obtained by a novel online primal-dual mirror-prox algorithm. Our instance-dependent regret is quantified by the total gradient variation V_*(T) in the sequence of loss functions. The proposed algorithm works in general normed spaces and simultaneously achieves an O(sqrt{V_*(T)}) regret and an O(1) constraint violation, which is never worse than the best-known (O(sqrt{T}), O(1)) result and improves over previous works that applied mirror-prox-type algorithms for this problem achieving O(T^{2/3}) regret and constraint violation. Finally, our algorithm is computationally efficient, as it only performs mirror descent steps in each iteration instead of solving a general Lagrangian minimization problem.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qiu, Shuang and Wei, Xiaohan and Kolar, Mladen}, year={2023}, month={Jun.}, pages={9534-9542} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26141/25913", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26141", + "pdf_size": 176636, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9006185860854975735&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "uchicago.edu;fb.com;chicagobooth.edu", + "email": "uchicago.edu;fb.com;chicagobooth.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Chicago;Meta Platforms, Inc.", + "aff_unique_dep": "Booth School of Business;", + "aff_unique_url": "https://www.chicagobooth.edu;https://www.meta.com", + "aff_unique_abbr": "Chicago Booth;Meta", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26865", + "title": "Grape Cold Hardiness Prediction via Multi-Task Learning", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Cold temperatures during fall and spring have the potential to cause frost damage to grapevines and other fruit plants, which can significantly decrease harvest yields. To help prevent these losses, farmers deploy expensive frost mitigation measures, such as, sprinklers, heaters, and wind machines, when they judge that damage may occur. This judgment, however, is challenging because the cold hardiness of plants changes throughout the dormancy period and it is difficult to directly measure. This has led scientists to develop cold hardiness prediction models that can be tuned to different grape cultivars based on laborious field measurement data. In this paper, we study whether deep-learning models can improve cold hardiness prediction for grapes based on data that has been collected over a 30-year time period. A key challenge is that the amount of data per cultivar is highly variable, with some cultivars having only a small amount. For this purpose, we investigate the use of multi-task learning to leverage data across cultivars in order to improve prediction performance for individual cultivars. We evaluate a number of multi-task learning approaches and show that the highest performing approach is able to significantly improve over learning for single cultivars and outperforms the current state-of-the-art scientific model for most cultivars.", + "primary_area": "emerging applications of ai", + "author": "Aseem Saxena; Paola Pesantez-Cabrera; Rohan Ballapragada; Kin-Ho Lam; Markus Keller; Alan Fern", + "authorids": "", + "aff": "School of Electrical Engineering and Computer Science, Oregon State University; School of Electrical Engineering and Computer Science, Washington State University; School of Electrical Engineering and Computer Science, Oregon State University; School of Electrical Engineering and Computer Science, Oregon State University; Irrigated Agriculture Research and Extension Center, Washington State University; School of Electrical Engineering and Computer Science, Oregon State University", + "bibtex": "@article{Saxena_Pesantez-Cabrera_Ballapragada_Lam_Keller_Fern_2024, title={Grape Cold Hardiness Prediction via Multi-Task Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26865}, DOI={10.1609/aaai.v37i13.26865}, abstractNote={Cold temperatures during fall and spring have the potential to cause frost damage to grapevines and other fruit plants, which can significantly decrease harvest yields. To help prevent these losses, farmers deploy expensive frost mitigation measures, such as, sprinklers, heaters, and wind machines, when they judge that damage may occur. This judgment, however, is challenging because the cold hardiness of plants changes throughout the dormancy period and it is difficult to directly measure. This has led scientists to develop cold hardiness prediction models that can be tuned to different grape cultivars based on laborious field measurement data. In this paper, we study whether deep-learning models can improve cold hardiness prediction for grapes based on data that has been collected over a 30-year time period. A key challenge is that the amount of data per cultivar is highly variable, with some cultivars having only a small amount. For this purpose, we investigate the use of multi-task learning to leverage data across cultivars in order to improve prediction performance for individual cultivars. We evaluate a number of multi-task learning approaches and show that the highest performing approach is able to significantly improve over learning for single cultivars and outperforms the current state-of-the-art scientific model for most cultivars.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Saxena, Aseem and Pesantez-Cabrera, Paola and Ballapragada, Rohan and Lam, Kin-Ho and Keller, Markus and Fern, Alan}, year={2024}, month={Jul.}, pages={15717-15723} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26865/26637", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26865", + "pdf_size": 225856, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14812474743092673191&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "oregonstate.edu;wsu.edu;oregonstate.edu;oregonstate.edu;wsu.edu;oregonstate.edu", + "email": "oregonstate.edu;wsu.edu;oregonstate.edu;oregonstate.edu;wsu.edu;oregonstate.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;1;0", + "aff_unique_norm": "Oregon State University;Washington State University", + "aff_unique_dep": "School of Electrical Engineering and Computer Science;School of Electrical Engineering and Computer Science", + "aff_unique_url": "https://osu.edu;https://www.wsu.edu", + "aff_unique_abbr": "OSU;WSU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Corvallis;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25907", + "title": "Graph Anomaly Detection via Multi-Scale Contrastive Learning Networks with Augmented View", + "track": "main", + "status": "Technical", + "abstract": "Graph anomaly detection (GAD) is a vital task in graph-based machine learning and has been widely applied in many real-world applications. The primary goal of GAD is to capture anomalous nodes from graph datasets, which evidently deviate from the majority of nodes. Recent methods have paid attention to various scales of contrastive strategies for GAD, i.e., node-subgraph and node-node contrasts. However, they neglect the subgraph-subgraph comparison information which the normal and abnormal subgraph pairs behave differently in terms of embeddings and structures in GAD, resulting in sub-optimal task performance. In this paper, we fulfill the above idea in the proposed multi-view multi-scale contrastive learning framework with subgraph-subgraph contrast for the first practice. To be specific, we regard the original input graph as the first view and generate the second view by graph augmentation with edge modifications. With the guidance of maximizing the similarity of the subgraph pairs, the proposed subgraph-subgraph contrast contributes to more robust subgraph embeddings despite of the structure variation. Moreover, the introduced subgraph-subgraph contrast cooperates well with the widely-adopted node-subgraph and node-node contrastive counterparts for mutual GAD performance promotions. Besides, we also conduct sufficient experiments to investigate the impact of different graph augmentation approaches on detection performance. The comprehensive experimental results well demonstrate the superiority of our method compared with the state-of-the-art approaches and the effectiveness of the multi-view subgraph pair contrastive strategy for the GAD task. The source code is released at https://github.com/FelixDJC/GRADATE.", + "primary_area": "machine learning i", + "author": "Jingcan Duan; Siwei Wang; Pei Zhang; En Zhu; Jingtao Hu; Hu Jin; Yue Liu; Zhibin Dong", + "authorids": "", + "aff": "College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China; College of Computer, National University of Defense Technology, Changsha, China", + "bibtex": "@article{Duan_Wang_Zhang_Zhu_Hu_Jin_Liu_Dong_2023, title={Graph Anomaly Detection via Multi-Scale Contrastive Learning Networks with Augmented View}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25907}, DOI={10.1609/aaai.v37i6.25907}, abstractNote={Graph anomaly detection (GAD) is a vital task in graph-based machine learning and has been widely applied in many real-world applications. The primary goal of GAD is to capture anomalous nodes from graph datasets, which evidently deviate from the majority of nodes. Recent methods have paid attention to various scales of contrastive strategies for GAD, i.e., node-subgraph and node-node contrasts. However, they neglect the subgraph-subgraph comparison information which the normal and abnormal subgraph pairs behave differently in terms of embeddings and structures in GAD, resulting in sub-optimal task performance. In this paper, we fulfill the above idea in the proposed multi-view multi-scale contrastive learning framework with subgraph-subgraph contrast for the first practice. To be specific, we regard the original input graph as the first view and generate the second view by graph augmentation with edge modifications. With the guidance of maximizing the similarity of the subgraph pairs, the proposed subgraph-subgraph contrast contributes to more robust subgraph embeddings despite of the structure variation. Moreover, the introduced subgraph-subgraph contrast cooperates well with the widely-adopted node-subgraph and node-node contrastive counterparts for mutual GAD performance promotions. Besides, we also conduct sufficient experiments to investigate the impact of different graph augmentation approaches on detection performance. The comprehensive experimental results well demonstrate the superiority of our method compared with the state-of-the-art approaches and the effectiveness of the multi-view subgraph pair contrastive strategy for the GAD task. The source code is released at https://github.com/FelixDJC/GRADATE.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Duan, Jingcan and Wang, Siwei and Zhang, Pei and Zhu, En and Hu, Jingtao and Jin, Hu and Liu, Yue and Dong, Zhibin}, year={2023}, month={Jun.}, pages={7459-7467} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25907/25679", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25907", + "pdf_size": 1154988, + "gs_citation": 104, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5525012945653597577&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "163.com;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;163.com;nudt.edu.cn", + "email": "163.com;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;163.com;nudt.edu.cn", + "github": "https://github.com/FelixDJC/GRADATE", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "National University of Defense Technology", + "aff_unique_dep": "College of Computer", + "aff_unique_url": "http://www.nudt.edu.cn", + "aff_unique_abbr": "NUDT", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Changsha", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26568", + "title": "Graph Component Contrastive Learning for Concept Relatedness Estimation", + "track": "main", + "status": "Technical", + "abstract": "Concept relatedness estimation (CRE) aims to determine whether two given concepts are related. Existing methods only consider the pairwise relationship between concepts, while overlooking the higher-order relationship that could be encoded in a concept-level graph structure. We discover that this underlying graph satisfies a set of intrinsic properties of CRE, including reflexivity, commutativity, and transitivity. In this paper, we formalize the CRE properties and introduce a graph structure named ConcreteGraph. To address the data scarcity issue in CRE, we introduce a novel data augmentation approach to sample new concept pairs from the graph. As it is intractable for data augmentation to fully capture the structural information of the ConcreteGraph due to a large amount of potential concept pairs, we further introduce a novel Graph Component Contrastive Learning framework to implicitly learn the complete structure of the ConcreteGraph. Empirical results on three datasets show significant improvement over the state-of-the-art model. Detailed ablation studies demonstrate that our proposed approach can effectively capture the high-order relationship among concepts.", + "primary_area": "speech natural language processing", + "author": "Yueen Ma; Zixing Song; Xuming Hu; Jingjing Li; Yifei Zhang; Irwin King", + "authorids": "", + "aff": "The Chinese University of Hong Kong; The Chinese University of Hong Kong; Tsinghua University; The Chinese University of Hong Kong; The Chinese University of Hong Kong; The Chinese University of Hong Kong", + "bibtex": "@article{Ma_Song_Hu_Li_Zhang_King_2023, title={Graph Component Contrastive Learning for Concept Relatedness Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26568}, DOI={10.1609/aaai.v37i11.26568}, abstractNote={Concept relatedness estimation (CRE) aims to determine whether two given concepts are related. Existing methods only consider the pairwise relationship between concepts, while overlooking the higher-order relationship that could be encoded in a concept-level graph structure. We discover that this underlying graph satisfies a set of intrinsic properties of CRE, including reflexivity, commutativity, and transitivity. In this paper, we formalize the CRE properties and introduce a graph structure named ConcreteGraph. To address the data scarcity issue in CRE, we introduce a novel data augmentation approach to sample new concept pairs from the graph. As it is intractable for data augmentation to fully capture the structural information of the ConcreteGraph due to a large amount of potential concept pairs, we further introduce a novel Graph Component Contrastive Learning framework to implicitly learn the complete structure of the ConcreteGraph. Empirical results on three datasets show significant improvement over the state-of-the-art model. Detailed ablation studies demonstrate that our proposed approach can effectively capture the high-order relationship among concepts.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Yueen and Song, Zixing and Hu, Xuming and Li, Jingjing and Zhang, Yifei and King, Irwin}, year={2023}, month={Jun.}, pages={13362-13370} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26568/26340", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26568", + "pdf_size": 375177, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13650221593465571766&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;mails.tsinghua.edu.cn;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;mails.tsinghua.edu.cn;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Tsinghua University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "CUHK;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25942", + "title": "Graph Knows Unknowns: Reformulate Zero-Shot Learning as Sample-Level Graph Recognition", + "track": "main", + "status": "Technical", + "abstract": "Zero-shot learning (ZSL) is an extreme case of transfer learning that aims to recognize samples (e.g., images) of unseen classes relying on a train-set covering only seen classes and a set of auxiliary knowledge (e.g., semantic descriptors). Existing methods usually resort to constructing a visual-to-semantics mapping based on features extracted from each whole sample. However, since the visual and semantic spaces are inherently independent and may exist in different manifolds, these methods may easily suffer from the domain bias problem due to the knowledge transfer from seen to unseen classes. Unlike existing works, this paper investigates the fine-grained ZSL from a novel perspective of sample-level graph. Specifically, we decompose an input into several fine-grained elements and construct a graph structure per sample to measure and utilize element-granularity relations within each sample. Taking advantage of recently developed graph neural networks (GNNs), we formulate the ZSL problem to a graph-to-semantics mapping task, which can better exploit element-semantics correlation and local sub-structural information in samples. Experimental results on the widely used benchmark datasets demonstrate that the proposed method can mitigate the domain bias problem and achieve competitive performance against other representative methods.", + "primary_area": "machine learning i", + "author": "Jingcai Guo; Song Guo; Qihua Zhou; Ziming Liu; Xiaocheng Lu; Fushuo Huo", + "authorids": "", + "aff": "Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China+The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China+The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China+School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China", + "bibtex": "@article{Guo_Guo_Zhou_Liu_Lu_Huo_2023, title={Graph Knows Unknowns: Reformulate Zero-Shot Learning as Sample-Level Graph Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25942}, DOI={10.1609/aaai.v37i6.25942}, abstractNote={Zero-shot learning (ZSL) is an extreme case of transfer learning that aims to recognize samples (e.g., images) of unseen classes relying on a train-set covering only seen classes and a set of auxiliary knowledge (e.g., semantic descriptors). Existing methods usually resort to constructing a visual-to-semantics mapping based on features extracted from each whole sample. However, since the visual and semantic spaces are inherently independent and may exist in different manifolds, these methods may easily suffer from the domain bias problem due to the knowledge transfer from seen to unseen classes. Unlike existing works, this paper investigates the fine-grained ZSL from a novel perspective of sample-level graph. Specifically, we decompose an input into several fine-grained elements and construct a graph structure per sample to measure and utilize element-granularity relations within each sample. Taking advantage of recently developed graph neural networks (GNNs), we formulate the ZSL problem to a graph-to-semantics mapping task, which can better exploit element-semantics correlation and local sub-structural information in samples. Experimental results on the widely used benchmark datasets demonstrate that the proposed method can mitigate the domain bias problem and achieve competitive performance against other representative methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Jingcai and Guo, Song and Zhou, Qihua and Liu, Ziming and Lu, Xiaocheng and Huo, Fushuo}, year={2023}, month={Jun.}, pages={7775-7783} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25942/25714", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25942", + "pdf_size": 4277434, + "gs_citation": 67, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12517783448356550407&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "polyu.edu.hk;polyu.edu.hk;connect.polyu.hk;connect.polyu.hk;connect.polyu.hk;connect.polyu.hk", + "email": "polyu.edu.hk;polyu.edu.hk;connect.polyu.hk;connect.polyu.hk;connect.polyu.hk;connect.polyu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0;0;0+1;0", + "aff_unique_norm": "The Hong Kong Polytechnic University;Northwestern Polytechnical University", + "aff_unique_dep": "Department of Computing;School of Computer Science", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.nwpu.edu.cn", + "aff_unique_abbr": "PolyU;NPU", + "aff_campus_unique_index": "0+1;0+1;0;0;0+2;0", + "aff_campus_unique": "Hong Kong SAR;Shenzhen;Xi'an", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25856", + "title": "Graph Ordering Attention Networks", + "track": "main", + "status": "Technical", + "abstract": "Graph Neural Networks (GNNs) have been successfully used in many problems involving graph-structured data, achieving state-of-the-art performance. \nGNNs typically employ a message-passing scheme, in which every node aggregates information from its neighbors using a permutation-invariant aggregation function.\nStandard well-examined choices such as the mean or sum aggregation functions have limited capabilities, as they are not able to capture interactions among neighbors. \nIn this work, we formalize these interactions using an information-theoretic framework that notably includes synergistic information. \nDriven by this definition, we introduce the Graph Ordering Attention (GOAT) layer, a novel GNN component that captures interactions between nodes in a neighborhood. \nThis is achieved by learning local node orderings via an attention mechanism and processing the ordered representations using a recurrent neural network aggregator. \nThis design allows us to make use of a permutation-sensitive aggregator while maintaining the permutation-equivariance of the proposed GOAT layer. \nThe GOAT model demonstrates its increased performance in modeling graph metrics that capture complex information, such as the betweenness centrality and the effective size of a node. In practical use-cases, its superior modeling capability is confirmed through its success in several real-world node classification benchmarks.", + "primary_area": "machine learning i", + "author": "Michail Chatzianastasis; Johannes Lutzeyer; George Dasoulas; Michalis Vazirgiannis", + "authorids": "", + "aff": "Ecole Polytechnique, Institut Polytechnique de Paris, France; Ecole Polytechnique, Institut Polytechnique de Paris, France; Ecole Polytechnique, Institut Polytechnique de Paris, France + Harvard University, Cambridge, MA, USA; Ecole Polytechnique, Institut Polytechnique de Paris, France", + "bibtex": "@article{Chatzianastasis_Lutzeyer_Dasoulas_Vazirgiannis_2023, title={Graph Ordering Attention Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25856}, DOI={10.1609/aaai.v37i6.25856}, abstractNote={Graph Neural Networks (GNNs) have been successfully used in many problems involving graph-structured data, achieving state-of-the-art performance. GNNs typically employ a message-passing scheme, in which every node aggregates information from its neighbors using a permutation-invariant aggregation function.\nStandard well-examined choices such as the mean or sum aggregation functions have limited capabilities, as they are not able to capture interactions among neighbors. In this work, we formalize these interactions using an information-theoretic framework that notably includes synergistic information. Driven by this definition, we introduce the Graph Ordering Attention (GOAT) layer, a novel GNN component that captures interactions between nodes in a neighborhood. This is achieved by learning local node orderings via an attention mechanism and processing the ordered representations using a recurrent neural network aggregator. This design allows us to make use of a permutation-sensitive aggregator while maintaining the permutation-equivariance of the proposed GOAT layer. The GOAT model demonstrates its increased performance in modeling graph metrics that capture complex information, such as the betweenness centrality and the effective size of a node. In practical use-cases, its superior modeling capability is confirmed through its success in several real-world node classification benchmarks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chatzianastasis, Michail and Lutzeyer, Johannes and Dasoulas, George and Vazirgiannis, Michalis}, year={2023}, month={Jun.}, pages={7006-7014} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25856/25628", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25856", + "pdf_size": 261623, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8538803812874496606&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "polytechnique.edu;polytechnique.edu;hms.harvard.edu;lix.polytechnique.fr", + "email": "polytechnique.edu;polytechnique.edu;hms.harvard.edu;lix.polytechnique.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "Ecole Polytechnique;Harvard University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ecolepolytechnique.fr;https://www.harvard.edu", + "aff_unique_abbr": "Ecole Polytechnique;Harvard", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0;0+1;0", + "aff_country_unique": "France;United States" + }, + { + "id": "article-25580", + "title": "Graph Structure Learning on User Mobility Data for Social Relationship Inference", + "track": "main", + "status": "Technical", + "abstract": "With the prevalence of smart mobile devices and location-based services, uncovering social relationships from human mobility data is of great value in real-world spatio-temporal applications ranging from friend recommendation, advertisement targeting to transportation scheduling. While a handful of sophisticated graph embedding techniques are developed for social relationship inference, they are significantly limited to the sparse and noisy nature of user mobility data, as they all ignore the essential problem of the existence of a large amount of noisy data unrelated to social activities in such mobility data. In this work, we present Social Relationship Inference Network (SRINet), a novel Graph Neural Network (GNN) framework, to improve inference performance by learning to remove noisy data. Specifically, we first construct a multiplex user meeting graph to model the spatial-temporal interactions among users in different semantic contexts. Our proposed SRINet tactfully combines the representation learning ability of Graph Convolutional Networks (GCNs) with the power of removing noisy edges of graph structure learning, which can learn effective user embeddings on the multiplex user meeting graph in a semi-supervised manner. Extensive experiments on three real-world datasets demonstrate the superiority of SRINet against state-of-the-art techniques in inferring social relationships from user mobility data. The source code of our method is available at https://github.com/qinguangming1999/SRINet.", + "primary_area": "data mining and knowledge management", + "author": "Guangming Qin; Lexue Song; Yanwei Yu; Chao Huang; Wenzhe Jia; Yuan Cao; Junyu Dong", + "authorids": "", + "aff": "College of Computer Science and Technology, Ocean University of China; Department of Data Science, Duke Kunshan University; College of Computer Science and Technology, Ocean University of China; Department of Computer Science, University of Hong Kong; College of Computer Science and Technology, Ocean University of China; College of Computer Science and Technology, Ocean University of China; College of Computer Science and Technology, Ocean University of China", + "bibtex": "@article{Qin_Song_Yu_Huang_Jia_Cao_Dong_2023, title={Graph Structure Learning on User Mobility Data for Social Relationship Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25580}, DOI={10.1609/aaai.v37i4.25580}, abstractNote={With the prevalence of smart mobile devices and location-based services, uncovering social relationships from human mobility data is of great value in real-world spatio-temporal applications ranging from friend recommendation, advertisement targeting to transportation scheduling. While a handful of sophisticated graph embedding techniques are developed for social relationship inference, they are significantly limited to the sparse and noisy nature of user mobility data, as they all ignore the essential problem of the existence of a large amount of noisy data unrelated to social activities in such mobility data. In this work, we present Social Relationship Inference Network (SRINet), a novel Graph Neural Network (GNN) framework, to improve inference performance by learning to remove noisy data. Specifically, we first construct a multiplex user meeting graph to model the spatial-temporal interactions among users in different semantic contexts. Our proposed SRINet tactfully combines the representation learning ability of Graph Convolutional Networks (GCNs) with the power of removing noisy edges of graph structure learning, which can learn effective user embeddings on the multiplex user meeting graph in a semi-supervised manner. Extensive experiments on three real-world datasets demonstrate the superiority of SRINet against state-of-the-art techniques in inferring social relationships from user mobility data. The source code of our method is available at https://github.com/qinguangming1999/SRINet.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Guangming and Song, Lexue and Yu, Yanwei and Huang, Chao and Jia, Wenzhe and Cao, Yuan and Dong, Junyu}, year={2023}, month={Jun.}, pages={4578-4586} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25580/25352", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25580", + "pdf_size": 588714, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4062732078426998340&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.ouc.edu.cn;duke.edu;ouc.edu.cn;gmail.com;stu.ouc.edu.cn;ouc.edu.cn;ouc.edu.cn", + "email": "stu.ouc.edu.cn;duke.edu;ouc.edu.cn;gmail.com;stu.ouc.edu.cn;ouc.edu.cn;ouc.edu.cn", + "github": "https://github.com/qinguangming1999/SRINet", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;2;0;0;0", + "aff_unique_norm": "Ocean University of China;Duke Kunshan University;University of Hong Kong", + "aff_unique_dep": "College of Computer Science and Technology;Department of Data Science;Department of Computer Science", + "aff_unique_url": "http://www.ouc.edu.cn;https://www.duk/Dk.edu;https://www.hku.hk", + "aff_unique_abbr": ";DKU;HKU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Kunshan", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27053", + "title": "Graph of Graphs: A New Knowledge Representation Mechanism for Graph Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Supervised graph classification is one of the most actively developing areas in machine learning (ML), with a broad range of domain applications, from social media to bioinformatics. Given a collection of graphs with categorical labels, the goal is to predict correct classes for unlabelled graphs. However, currently available ML tools view each such graph as a standalone entity and, as such, do not account for complex interdependencies among graphs. We propose a novel knowledge representation for graph learning called a {\\it Graph of Graphs} (GoG). The key idea is to construct a new abstraction where each graph in the collection is represented by a node, while an edge then reflects similarity among the graphs. Such similarity can be assessed via a suitable graph distance. As a result, the graph classification problem can be then reformulated as a node classification problem. We show that the proposed new knowledge representation approach not only improves classification performance but substantially enhances robustness against label perturbation attacks.", + "primary_area": "", + "author": "Zhwiei Zhen; Yuzhou Chen; Murat Kantarcioglu; Yulia R. Gel", + "authorids": "", + "aff": "University of Texas at Dallas; Temple University; University of Texas at Dallas; University of Texas at Dallas", + "bibtex": "@article{Zhen_Chen_Kantarcioglu_Gel_2024, title={Graph of Graphs: A New Knowledge Representation Mechanism for Graph Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27053}, DOI={10.1609/aaai.v37i13.27053}, abstractNote={Supervised graph classification is one of the most actively developing areas in machine learning (ML), with a broad range of domain applications, from social media to bioinformatics. Given a collection of graphs with categorical labels, the goal is to predict correct classes for unlabelled graphs. However, currently available ML tools view each such graph as a standalone entity and, as such, do not account for complex interdependencies among graphs. We propose a novel knowledge representation for graph learning called a {\\it Graph of Graphs} (GoG). The key idea is to construct a new abstraction where each graph in the collection is represented by a node, while an edge then reflects similarity among the graphs. Such similarity can be assessed via a suitable graph distance. As a result, the graph classification problem can be then reformulated as a node classification problem. We show that the proposed new knowledge representation approach not only improves classification performance but substantially enhances robustness against label perturbation attacks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhen, Zhwiei and Chen, Yuzhou and Kantarcioglu, Murat and Gel, Yulia R.}, year={2024}, month={Jul.}, pages={16386-16387} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27053/26825", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27053", + "pdf_size": 243690, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:f3ne7bsJ4ZYJ:scholar.google.com/&scioq=Graph+of+Graphs:+A+New+Knowledge+Representation+Mechanism+for+Graph+Learning+(Student+Abstract)&hl=en&as_sdt=0,48", + "gs_version_total": 2, + "aff_domain": "utdallas.edu;temple.edu;utdallas.edu;utdallas.edu", + "email": "utdallas.edu;temple.edu;utdallas.edu;utdallas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Texas at Dallas;Temple University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.utdallas.edu;https://www.temple.edu", + "aff_unique_abbr": "UT Dallas;Temple", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Dallas;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26256", + "title": "GraphPrompt: Graph-Based Prompt Templates for Biomedical Synonym Prediction", + "track": "main", + "status": "Technical", + "abstract": "In the expansion of biomedical dataset, the same category may be labeled with different terms, thus being tedious and onerous to curate these terms. Therefore, automatically mapping synonymous terms onto the ontologies is desirable, which we name as biomedical synonym prediction task. Unlike biomedical concept normalization (BCN), no clues from context can be used to enhance synonym prediction, making it essential to extract graph features from ontology. We introduce an expert-curated dataset OBO-syn encompassing 70 different types of concepts and 2 million curated concept-term pairs for evaluating synonym prediction methods. We find BCN methods perform weakly on this task for not making full use of graph information. Therefore, we propose GraphPrompt, a prompt-based learning approach that creates prompt templates according to the graphs. GraphPrompt obtained 37.2% and 28.5% improvement on zero-shot and few-shot settings respectively, indicating the effectiveness of these graph-based prompt templates. We envision that our method GraphPrompt and OBO-syn dataset can be broadly applied to graph-based NLP tasks, and serve as the basis for analyzing diverse and accumulating biomedical data. All the data and codes are avalible at: https://github.com/HanwenXuTHU/GraphPrompt", + "primary_area": "machine learning iv", + "author": "Hanwen Xu; Jiayou Zhang; Zhirui Wang; Shizhuo Zhang; Megh Bhalerao; Yucong Liu; Dawei Zhu; Sheng Wang", + "authorids": "", + "aff": "University of Washington; Mohamed bin Zayed University of Artificial Intelligence; Carnegie Mellon University; Nanyang Technological University; University of Washington; Peking University; Peking University; University of Washington+The Thirty-Seventh AAAI Conference on Artificial Intelligence (AAAI-23)", + "bibtex": "@article{Xu_Zhang_Wang_Zhang_Bhalerao_Liu_Zhu_Wang_2023, title={GraphPrompt: Graph-Based Prompt Templates for Biomedical Synonym Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26256}, DOI={10.1609/aaai.v37i9.26256}, abstractNote={In the expansion of biomedical dataset, the same category may be labeled with different terms, thus being tedious and onerous to curate these terms. Therefore, automatically mapping synonymous terms onto the ontologies is desirable, which we name as biomedical synonym prediction task. Unlike biomedical concept normalization (BCN), no clues from context can be used to enhance synonym prediction, making it essential to extract graph features from ontology. We introduce an expert-curated dataset OBO-syn encompassing 70 different types of concepts and 2 million curated concept-term pairs for evaluating synonym prediction methods. We find BCN methods perform weakly on this task for not making full use of graph information. Therefore, we propose GraphPrompt, a prompt-based learning approach that creates prompt templates according to the graphs. GraphPrompt obtained 37.2% and 28.5% improvement on zero-shot and few-shot settings respectively, indicating the effectiveness of these graph-based prompt templates. We envision that our method GraphPrompt and OBO-syn dataset can be broadly applied to graph-based NLP tasks, and serve as the basis for analyzing diverse and accumulating biomedical data. All the data and codes are avalible at: https://github.com/HanwenXuTHU/GraphPrompt}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Hanwen and Zhang, Jiayou and Wang, Zhirui and Zhang, Shizhuo and Bhalerao, Megh and Liu, Yucong and Zhu, Dawei and Wang, Sheng}, year={2023}, month={Jun.}, pages={10576-10584} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26256/26028", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26256", + "pdf_size": 730634, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16519281954525765675&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.washington.edu; ; ; ; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ; ; ; ", + "github": "https://github.com/HanwenXuTHU/GraphPrompt", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;0;4;4;0+5", + "aff_unique_norm": "University of Washington;Mohamed bin Zayed University of Artificial Intelligence;Carnegie Mellon University;Nanyang Technological University;Peking University;AAAI Conference on Artificial Intelligence", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.washington.edu;https://www.mbzuai.ac.ae;https://www.cmu.edu;https://www.ntu.edu.sg;http://www.pku.edu.cn;https://www.aaai.org", + "aff_unique_abbr": "UW;MBZUAI;CMU;NTU;Peking U;AAAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2;0;3;3;0+0", + "aff_country_unique": "United States;United Arab Emirates;Singapore;China" + }, + { + "id": "article-25622", + "title": "GraphSR: A Data Augmentation Algorithm for Imbalanced Node Classification", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) have achieved great success in node classification tasks. However, existing GNNs naturally bias towards the majority classes with more labelled data and ignore those minority classes with relatively few labelled ones. The traditional techniques often resort over-sampling methods, but they may cause overfitting problem. More recently, some works propose to synthesize additional nodes for minority classes from the labelled nodes, however, there is no any guarantee if those generated nodes really stand for the the corresponding minority classes. In fact, improperly synthesized nodes may result in insufficient generalization of the algorithm. To resolve the problem, in this paper we seek to automatically augment the minority classes from the massive unlabelled nodes of the graph. Specifically, we propose \\textit{GraphSR}, a novel self-training strategy to augment the minority classes with significant diversity of unlabelled nodes, which is based on a Similarity-based selection module and a Reinforcement Learning(RL) selection module. The first module finds a subset of unlabelled nodes which are most similar to those labelled minority nodes, and the second one further determines the representative and reliable nodes from the subset via RL technique. \n Furthermore, the RL-based module can adaptively determine the sampling scale according to current training data. This strategy is general and can be easily combined with different GNNs models. Our experiments demonstrate the proposed approach outperforms the state-of-the-art baselines on various class-imbalanced datasets.", + "primary_area": "data mining and knowledge management", + "author": "Mengting Zhou; Zhiguo Gong", + "authorids": "", + "aff": "State Key Laboratory of Internet of Things for Smart City, University of Macau, Macao + Guangdong-Macau Joint Laboratory for Advanced and Intelligent Computing; State Key Laboratory of Internet of Things for Smart City, University of Macau, Macao + Guangdong-Macau Joint Laboratory for Advanced and Intelligent Computing", + "bibtex": "@article{Zhou_Gong_2023, title={GraphSR: A Data Augmentation Algorithm for Imbalanced Node Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25622}, DOI={10.1609/aaai.v37i4.25622}, abstractNote={Graph neural networks (GNNs) have achieved great success in node classification tasks. However, existing GNNs naturally bias towards the majority classes with more labelled data and ignore those minority classes with relatively few labelled ones. The traditional techniques often resort over-sampling methods, but they may cause overfitting problem. More recently, some works propose to synthesize additional nodes for minority classes from the labelled nodes, however, there is no any guarantee if those generated nodes really stand for the the corresponding minority classes. In fact, improperly synthesized nodes may result in insufficient generalization of the algorithm. To resolve the problem, in this paper we seek to automatically augment the minority classes from the massive unlabelled nodes of the graph. Specifically, we propose \\textit{GraphSR}, a novel self-training strategy to augment the minority classes with significant diversity of unlabelled nodes, which is based on a Similarity-based selection module and a Reinforcement Learning(RL) selection module. The first module finds a subset of unlabelled nodes which are most similar to those labelled minority nodes, and the second one further determines the representative and reliable nodes from the subset via RL technique. Furthermore, the RL-based module can adaptively determine the sampling scale according to current training data. This strategy is general and can be easily combined with different GNNs models. Our experiments demonstrate the proposed approach outperforms the state-of-the-art baselines on various class-imbalanced datasets.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Mengting and Gong, Zhiguo}, year={2023}, month={Jun.}, pages={4954-4962} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25622/25394", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25622", + "pdf_size": 430402, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3984686274054606670&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "um.edu.mo;um.edu.mo", + "email": "um.edu.mo;um.edu.mo", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "University of Macau;Guangdong-Macau Joint Laboratory for Advanced and Intelligent Computing", + "aff_unique_dep": "State Key Laboratory of Internet of Things for Smart City;", + "aff_unique_url": "https://www.um.edu.mo;", + "aff_unique_abbr": "UM;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Macao;", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26536", + "title": "Graphix-T5: Mixing Pre-trained Transformers with Graph-Aware Layers for Text-to-SQL Parsing", + "track": "main", + "status": "Technical", + "abstract": "The task of text-to-SQL parsing, which aims at converting natural language questions into executable SQL queries, has garnered increasing attention in recent years. One of the major challenges in text-to-SQL parsing is domain generalization, i.e., how to generalize well to unseen databases. Recently, the pre-trained text-to-text transformer model, namely T5, though not specialized for text-to-SQL parsing, has achieved state-of-the-art performance on standard benchmarks targeting domain generalization. In this work, we explore ways to further augment the pre-trained T5 model with specialized components for text-to-SQL parsing. Such components are expected to introduce structural inductive bias into text-to-SQL parsers thus improving the model\u2019s capacity on (potentially multi-hop) reasoning, which is critical for generating structure-rich SQLs. To this end, we propose a new architecture GRAPHIX-T5, a mixed model with the standard pre-trained transformer model augmented by specially-designed graph-aware layers. Extensive experiments and analysis demonstrate the effectiveness of GRAPHIX-T5 across four text-to-SQL benchmarks: SPIDER, SYN, REALISTIC and DK. GRAPHIX-T5 surpasses all other T5-based parsers with a significant margin, achieving new state-of-the-art performance. Notably, GRAPHIX-T5-large reaches performance superior to the original T5-large by 5.7% on exact match (EM) accuracy and 6.6% on execution accuracy (EX). This even outperforms the T5-3B by 1.2% on EM and 1.5% on EX", + "primary_area": "speech natural language processing", + "author": "Jinyang Li; Binyuan Hui; Reynold Cheng; Bowen Qin; Chenhao Ma; Nan Huo; Fei Huang; Wenyu Du; Luo Si; Yongbin Li", + "authorids": "", + "aff": "The University of Hong Kong+DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; The University of Hong Kong+DAMO Academy, Alibaba Group+Guangdong\u2013Hong Kong-Macau Joint Laboratory; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; The Chinese University of Hong Kong (Shenzhen); The University of Hong Kong; DAMO Academy, Alibaba Group; The University of Hong Kong; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group+Guangdong\u2013Hong Kong-Macau Joint Laboratory", + "bibtex": "@article{Li_Hui_Cheng_Qin_Ma_Huo_Huang_Du_Si_Li_2023, title={Graphix-T5: Mixing Pre-trained Transformers with Graph-Aware Layers for Text-to-SQL Parsing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26536}, DOI={10.1609/aaai.v37i11.26536}, abstractNote={The task of text-to-SQL parsing, which aims at converting natural language questions into executable SQL queries, has garnered increasing attention in recent years. One of the major challenges in text-to-SQL parsing is domain generalization, i.e., how to generalize well to unseen databases. Recently, the pre-trained text-to-text transformer model, namely T5, though not specialized for text-to-SQL parsing, has achieved state-of-the-art performance on standard benchmarks targeting domain generalization. In this work, we explore ways to further augment the pre-trained T5 model with specialized components for text-to-SQL parsing. Such components are expected to introduce structural inductive bias into text-to-SQL parsers thus improving the model\u2019s capacity on (potentially multi-hop) reasoning, which is critical for generating structure-rich SQLs. To this end, we propose a new architecture GRAPHIX-T5, a mixed model with the standard pre-trained transformer model augmented by specially-designed graph-aware layers. Extensive experiments and analysis demonstrate the effectiveness of GRAPHIX-T5 across four text-to-SQL benchmarks: SPIDER, SYN, REALISTIC and DK. GRAPHIX-T5 surpasses all other T5-based parsers with a significant margin, achieving new state-of-the-art performance. Notably, GRAPHIX-T5-large reaches performance superior to the original T5-large by 5.7% on exact match (EM) accuracy and 6.6% on execution accuracy (EX). This even outperforms the T5-3B by 1.2% on EM and 1.5% on EX}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jinyang and Hui, Binyuan and Cheng, Reynold and Qin, Bowen and Ma, Chenhao and Huo, Nan and Huang, Fei and Du, Wenyu and Si, Luo and Li, Yongbin}, year={2023}, month={Jun.}, pages={13076-13084} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26536/26308", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26536", + "pdf_size": 463009, + "gs_citation": 123, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5318478208289139639&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "connect.hku.hk;alibaba-inc.com;cs.hku.hk;siat.ac.cn;cuhk.edu.cn;connect.hku.hk;alibaba-inc.com;connect.hku.hk;alibaba-inc.com;alibaba-inc.com", + "email": "connect.hku.hk;alibaba-inc.com;cs.hku.hk;siat.ac.cn;cuhk.edu.cn;connect.hku.hk;alibaba-inc.com;connect.hku.hk;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1;1;0+1+2;3;4;0;1;0;1;1+2", + "aff_unique_norm": "The University of Hong Kong;Alibaba Group;Guangdong\u2013Hong Kong-Macau Joint Laboratory;Shenzhen Institute of Advanced Technology;The Chinese University of Hong Kong", + "aff_unique_dep": ";DAMO Academy;;;", + "aff_unique_url": "https://www.hku.hk;https://www.alibaba-group.com;;http://www.siat.cas.cn;https://www.cuhk.edu.cn", + "aff_unique_abbr": "HKU;Alibaba;;SIAT;CUHK", + "aff_campus_unique_index": ";;1;1;", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0;0+0+0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25527", + "title": "Graphs, Constraints, and Search for the Abstraction and Reasoning Corpus", + "track": "main", + "status": "Technical", + "abstract": "The Abstraction and Reasoning Corpus (ARC) aims at benchmarking the performance of general artificial intelligence algorithms. The ARC's focus on broad generalization and few-shot learning has made it difficult to solve using pure machine learning. A more promising approach has been to perform program synthesis within an appropriately designed Domain Specific Language (DSL). However, these too have seen limited success. We propose Abstract Reasoning with Graph Abstractions (ARGA), a new object-centric framework that first represents images using graphs and then performs a search for a correct program in a DSL that is based on the abstracted graph space. The complexity of this combinatorial search is tamed through the use of constraint acquisition, state hashing, and Tabu search. An extensive set of experiments demonstrates the promise of ARGA in tackling some of the complicated object-centric tasks of the ARC rather efficiently, producing programs that are correct and easy to understand.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yudong Xu; Elias B. Khalil; Scott Sanner", + "authorids": "", + "aff": "Department of Mechanical & Industrial Engineering, University of Toronto + Scale AI Research Chair in Data-Driven Algorithms for Modern Supply Chains; Department of Mechanical & Industrial Engineering, University of Toronto; Department of Mechanical & Industrial Engineering, University of Toronto", + "bibtex": "@article{Xu_Khalil_Sanner_2023, title={Graphs, Constraints, and Search for the Abstraction and Reasoning Corpus}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25527}, DOI={10.1609/aaai.v37i4.25527}, abstractNote={The Abstraction and Reasoning Corpus (ARC) aims at benchmarking the performance of general artificial intelligence algorithms. The ARC\u2019s focus on broad generalization and few-shot learning has made it difficult to solve using pure machine learning. A more promising approach has been to perform program synthesis within an appropriately designed Domain Specific Language (DSL). However, these too have seen limited success. We propose Abstract Reasoning with Graph Abstractions (ARGA), a new object-centric framework that first represents images using graphs and then performs a search for a correct program in a DSL that is based on the abstracted graph space. The complexity of this combinatorial search is tamed through the use of constraint acquisition, state hashing, and Tabu search. An extensive set of experiments demonstrates the promise of ARGA in tackling some of the complicated object-centric tasks of the ARC rather efficiently, producing programs that are correct and easy to understand.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Yudong and Khalil, Elias B. and Sanner, Scott}, year={2023}, month={Jun.}, pages={4115-4122} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25527/25299", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25527", + "pdf_size": 2920006, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8133629296848399794&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.utoronto.ca;mie.utoronto.ca;mie.utoronto.ca", + "email": "mail.utoronto.ca;mie.utoronto.ca;mie.utoronto.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "University of Toronto;Scale AI", + "aff_unique_dep": "Department of Mechanical & Industrial Engineering;Research", + "aff_unique_url": "https://www.utoronto.ca;https://scale.ai/", + "aff_unique_abbr": "U of T;Scale AI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Toronto;", + "aff_country_unique_index": "0+1;0;0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "article-25472", + "title": "Grouped Knowledge Distillation for Deep Face Recognition", + "track": "main", + "status": "Technical", + "abstract": "Compared with the feature-based distillation methods, logits distillation can liberalize the requirements of consistent feature dimension between teacher and student networks, while the performance is deemed inferior in face recognition. One major challenge is that the light-weight student network has difficulty fitting the target logits due to its low model capacity, which is attributed to the significant number of identities in face recognition. Therefore, we seek to probe the target logits to extract the primary knowledge related to face identity, and discard the others, to make the distillation more achievable for the student network. Specifically, there is a tail group with near-zero values in the prediction, containing minor knowledge for distillation. To provide a clear perspective of its impact, we first partition the logits into two groups, i.e., Primary Group and Secondary Group, according to the cumulative probability of the softened prediction. Then, we reorganize the Knowledge Distillation (KD) loss of grouped logits into three parts, i.e., Primary-KD, Secondary-KD, and Binary-KD. Primary-KD refers to distilling the primary knowledge from the teacher, Secondary-KD aims to refine minor knowledge but increases the difficulty of distillation, and Binary-KD ensures the consistency of knowledge distribution between teacher and student. We experimentally found that (1) Primary-KD and Binary-KD are indispensable for KD, and (2) Secondary-KD is the culprit restricting KD at the bottleneck. Therefore, we propose a Grouped Knowledge Distillation (GKD) that retains the Primary-KD and Binary-KD but omits Secondary-KD in the ultimate KD loss calculation. Extensive experimental results on popular face recognition benchmarks demonstrate the superiority of proposed GKD over state-of-the-art methods.", + "primary_area": "computer vision iii", + "author": "Weisong Zhao; Xiangyu Zhu; Kaiwen Guo; Xiao-Yu Zhang; Zhen Lei", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; CBSR&NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; CBSR&NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; CBSR&NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences, Hong Kong, China", + "bibtex": "@article{Zhao_Zhu_Guo_Zhang_Lei_2023, title={Grouped Knowledge Distillation for Deep Face Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25472}, DOI={10.1609/aaai.v37i3.25472}, abstractNote={Compared with the feature-based distillation methods, logits distillation can liberalize the requirements of consistent feature dimension between teacher and student networks, while the performance is deemed inferior in face recognition. One major challenge is that the light-weight student network has difficulty fitting the target logits due to its low model capacity, which is attributed to the significant number of identities in face recognition. Therefore, we seek to probe the target logits to extract the primary knowledge related to face identity, and discard the others, to make the distillation more achievable for the student network. Specifically, there is a tail group with near-zero values in the prediction, containing minor knowledge for distillation. To provide a clear perspective of its impact, we first partition the logits into two groups, i.e., Primary Group and Secondary Group, according to the cumulative probability of the softened prediction. Then, we reorganize the Knowledge Distillation (KD) loss of grouped logits into three parts, i.e., Primary-KD, Secondary-KD, and Binary-KD. Primary-KD refers to distilling the primary knowledge from the teacher, Secondary-KD aims to refine minor knowledge but increases the difficulty of distillation, and Binary-KD ensures the consistency of knowledge distribution between teacher and student. We experimentally found that (1) Primary-KD and Binary-KD are indispensable for KD, and (2) Secondary-KD is the culprit restricting KD at the bottleneck. Therefore, we propose a Grouped Knowledge Distillation (GKD) that retains the Primary-KD and Binary-KD but omits Secondary-KD in the ultimate KD loss calculation. Extensive experimental results on popular face recognition benchmarks demonstrate the superiority of proposed GKD over state-of-the-art methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Weisong and Zhu, Xiangyu and Guo, Kaiwen and Zhang, Xiao-Yu and Lei, Zhen}, year={2023}, month={Jun.}, pages={3615-3623} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25472/25244", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25472", + "pdf_size": 550321, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4743192680015430262&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "iie.ac.cn;nlpr.ia.ac.cn;gmail.com;iie.ac.cn;nlpr.ia.ac.cn", + "email": "iie.ac.cn;nlpr.ia.ac.cn;gmail.com;iie.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0;0+1;0+1+2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;Centre for Artificial Intelligence and Robotics", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;", + "aff_unique_abbr": "CAS;UCAS;", + "aff_campus_unique_index": "0+0;0+0;0;0+0;0+0+1", + "aff_campus_unique": "Beijing;Hong Kong", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26005", + "title": "Grouping Matrix Based Graph Pooling with Adaptive Number of Clusters", + "track": "main", + "status": "Technical", + "abstract": "Graph pooling is a crucial operation for encoding hierarchical structures within graphs. Most existing graph pooling approaches formulate the problem as a node clustering task which effectively captures the graph topology. Conventional methods ask users to specify an appropriate number of clusters as a hyperparameter, then assuming that all input graphs share the same number of clusters. In inductive settings where the number of clusters could vary, however, the model should be able to represent this variation in its pooling layers in order to learn suitable clusters. Thus we propose GMPool, a novel differentiable graph pooling architecture that automatically determines the appropriate number of clusters based on the input data. The main intuition involves a grouping matrix defined as a quadratic form of the pooling operator, which induces use of binary classification probabilities of pairwise combinations of nodes. GMPool obtains the pooling operator by first computing the grouping matrix, then decomposing it. Extensive evaluations on molecular property prediction tasks demonstrate that our method outperforms conventional methods.", + "primary_area": "machine learning ii", + "author": "Sung Moon Ko; Sungjun Cho; Dae-Woong Jeong; Sehui Han; Moontae Lee; Honglak Lee", + "authorids": "", + "aff": "LG AI Research; LG AI Research; LG AI Research; LG AI Research; LG AI Research + University of Illinois Chicago; LG AI Research", + "bibtex": "@article{Ko_Cho_Jeong_Han_Lee_Lee_2023, title={Grouping Matrix Based Graph Pooling with Adaptive Number of Clusters}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26005}, DOI={10.1609/aaai.v37i7.26005}, abstractNote={Graph pooling is a crucial operation for encoding hierarchical structures within graphs. Most existing graph pooling approaches formulate the problem as a node clustering task which effectively captures the graph topology. Conventional methods ask users to specify an appropriate number of clusters as a hyperparameter, then assuming that all input graphs share the same number of clusters. In inductive settings where the number of clusters could vary, however, the model should be able to represent this variation in its pooling layers in order to learn suitable clusters. Thus we propose GMPool, a novel differentiable graph pooling architecture that automatically determines the appropriate number of clusters based on the input data. The main intuition involves a grouping matrix defined as a quadratic form of the pooling operator, which induces use of binary classification probabilities of pairwise combinations of nodes. GMPool obtains the pooling operator by first computing the grouping matrix, then decomposing it. Extensive evaluations on molecular property prediction tasks demonstrate that our method outperforms conventional methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ko, Sung Moon and Cho, Sungjun and Jeong, Dae-Woong and Han, Sehui and Lee, Moontae and Lee, Honglak}, year={2023}, month={Jun.}, pages={8334-8342} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26005/25777", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26005", + "pdf_size": 781812, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3062887319529610775&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai", + "email": "lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai;lgresearch.ai", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0+1;0", + "aff_unique_norm": "LG AI Research;University of Illinois at Chicago", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.lgaires.com;https://www.uic.edu", + "aff_unique_abbr": "LG AI;UIC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0;0;0;0;0+1;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-25191", + "title": "GuidedMixup: An Efficient Mixup Strategy Guided by Saliency Maps", + "track": "main", + "status": "Technical", + "abstract": "Data augmentation is now an essential part of the image training process, as it effectively prevents overfitting and makes the model more robust against noisy datasets. Recent mixing augmentation strategies have advanced to generate the mixup mask that can enrich the saliency information, which is a supervisory signal. However, these methods incur a significant computational burden to optimize the mixup mask. From this motivation, we propose a novel saliency-aware mixup method, GuidedMixup, which aims to retain the salient regions in mixup images with low computational overhead. We develop an efficient pairing algorithm that pursues to minimize the conflict of salient regions of paired images and achieve rich saliency in mixup images. Moreover, GuidedMixup controls the mixup ratio for each pixel to better preserve the salient region by interpolating two paired images smoothly. The experiments on several datasets demonstrate that GuidedMixup provides a good trade-off between augmentation overhead and generalization performance on classification datasets. In addition, our method shows good performance in experiments with corrupted or reduced datasets.", + "primary_area": "computer vision i", + "author": "Minsoo Kang; Suhyun Kim", + "authorids": "", + "aff": "Korea University, Republic of Korea+Korea Institute of Science and Technology, Republic of Korea; Korea Institute of Science and Technology, Republic of Korea", + "bibtex": "@article{Kang_Kim_2023, title={GuidedMixup: An Efficient Mixup Strategy Guided by Saliency Maps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25191}, DOI={10.1609/aaai.v37i1.25191}, abstractNote={Data augmentation is now an essential part of the image training process, as it effectively prevents overfitting and makes the model more robust against noisy datasets. Recent mixing augmentation strategies have advanced to generate the mixup mask that can enrich the saliency information, which is a supervisory signal. However, these methods incur a significant computational burden to optimize the mixup mask. From this motivation, we propose a novel saliency-aware mixup method, GuidedMixup, which aims to retain the salient regions in mixup images with low computational overhead. We develop an efficient pairing algorithm that pursues to minimize the conflict of salient regions of paired images and achieve rich saliency in mixup images. Moreover, GuidedMixup controls the mixup ratio for each pixel to better preserve the salient region by interpolating two paired images smoothly. The experiments on several datasets demonstrate that GuidedMixup provides a good trade-off between augmentation overhead and generalization performance on classification datasets. In addition, our method shows good performance in experiments with corrupted or reduced datasets.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Minsoo and Kim, Suhyun}, year={2023}, month={Jun.}, pages={1096-1104} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25191/24963", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25191", + "pdf_size": 397703, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13182338676948637791&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Korea University;Korea Institute of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.korea.ac.kr;https://www.kist.re.kr", + "aff_unique_abbr": "KU;KIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26905", + "title": "Guiding Students to Investigate What Google Speech Recognition Knows about Language", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Today, children of all ages interact with speech recognition systems but are largely unaware of how they work. Teaching K-12 students to investigate how these systems employ phonological, syntactic, semantic, and cultural knowledge to resolve ambiguities in the audio signal can provide them a window on complex AI decision-making and also help them appreciate the richness and complexity of human language. We describe a browser-based tool for exploring the Google Web Speech API and a series of experiments students can engage in to measure what the service knows about language and the types of biases it exhibits. Middle school students taking an introductory AI elective were able to use the tool to explore Google\u2019s knowledge of homophones and its ability to exploit context to disambiguate them. Older students could potentially conduct more comprehensive investigations, which we lay out here. This approach to investigating the power and limitations of speech technology through carefully designed experiments can also be applied to other AI application areas, such as face detection, object recognition, machine translation, or question answering.", + "primary_area": "", + "author": "David S. Touretzky; Christina Gardner-McCune", + "authorids": "", + "aff": "Computer Science Department, Carnegie Mellon University, Pittsburgh, PA 15213; Department of Computer & Information Science & Engineering, University of Florida, Gainesville, FL 32611", + "bibtex": "@article{Touretzky_Gardner-McCune_2024, title={Guiding Students to Investigate What Google Speech Recognition Knows about Language}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26905}, DOI={10.1609/aaai.v37i13.26905}, abstractNote={Today, children of all ages interact with speech recognition systems but are largely unaware of how they work. Teaching K-12 students to investigate how these systems employ phonological, syntactic, semantic, and cultural knowledge to resolve ambiguities in the audio signal can provide them a window on complex AI decision-making and also help them appreciate the richness and complexity of human language. We describe a browser-based tool for exploring the Google Web Speech API and a series of experiments students can engage in to measure what the service knows about language and the types of biases it exhibits. Middle school students taking an introductory AI elective were able to use the tool to explore Google\u2019s knowledge of homophones and its ability to exploit context to disambiguate them. Older students could potentially conduct more comprehensive investigations, which we lay out here. This approach to investigating the power and limitations of speech technology through carefully designed experiments can also be applied to other AI application areas, such as face detection, object recognition, machine translation, or question answering.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Touretzky, David S. and Gardner-McCune, Christina}, year={2024}, month={Jul.}, pages={16040-16047} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26905/26677", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26905", + "pdf_size": 8732534, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=287736139368751930&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.cmu.edu;ufl.edu", + "email": "cs.cmu.edu;ufl.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Carnegie Mellon University;University of Florida", + "aff_unique_dep": "Computer Science Department;Department of Computer & Information Science & Engineering", + "aff_unique_url": "https://www.cmu.edu;https://www.ufl.edu", + "aff_unique_abbr": "CMU;UF", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Pittsburgh;Gainesville", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26894", + "title": "H-AES: Towards Automated Essay Scoring for Hindi", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "The use of Natural Language Processing (NLP) for Automated Essay Scoring (AES) has been well explored in the English language, with benchmark models exhibiting performance comparable to human scorers. However, AES in Hindi and other low-resource languages remains unexplored. In this study, we reproduce and compare state-of-the-art methods for AES in the Hindi domain. We employ classical feature-based Machine Learning (ML) and advanced end-to-end models, including LSTM Networks and Fine-Tuned Transformer Architecture, in our approach and derive results comparable to those in the English language domain. Hindi being a low-resource language, lacks a dedicated essay-scoring corpus. We train and evaluate our models using translated English essays and empirically measure their performance on our own small-scale, real-world Hindi corpus. We follow this up with an in-depth analysis discussing prompt-specific behavior of different language models implemented.", + "primary_area": "", + "author": "Shubhankar Singh; Anirudh Pupneja; Shivaansh Mital; Cheril Shah; Manish Bawkar; Lakshman Prasad Gupta; Ajit Kumar; Yaman Kumar; Rushali Gupta; Rajiv Ratn Shah", + "authorids": "", + "aff": "Manipal University Jaipur; BITS Pilani, K K Birla Goa Campus; Indraprastha Institute of Information Technology, Delhi; Pune Institute of Computer Technology; Sardar Vallabhbhai National Institute of Technology, Surat; University of Allahabad; Banaras Hindu University; Indraprastha Institute of Information Technology, Delhi; Indraprastha Institute of Information Technology, Delhi; Banaras Hindu University", + "bibtex": "@article{Singh_Pupneja_Mital_Shah_Bawkar_Gupta_Kumar_Kumar_Gupta_Ratn Shah_2024, title={H-AES: Towards Automated Essay Scoring for Hindi}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26894}, DOI={10.1609/aaai.v37i13.26894}, abstractNote={The use of Natural Language Processing (NLP) for Automated Essay Scoring (AES) has been well explored in the English language, with benchmark models exhibiting performance comparable to human scorers. However, AES in Hindi and other low-resource languages remains unexplored. In this study, we reproduce and compare state-of-the-art methods for AES in the Hindi domain. We employ classical feature-based Machine Learning (ML) and advanced end-to-end models, including LSTM Networks and Fine-Tuned Transformer Architecture, in our approach and derive results comparable to those in the English language domain. Hindi being a low-resource language, lacks a dedicated essay-scoring corpus. We train and evaluate our models using translated English essays and empirically measure their performance on our own small-scale, real-world Hindi corpus. We follow this up with an in-depth analysis discussing prompt-specific behavior of different language models implemented.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Singh, Shubhankar and Pupneja, Anirudh and Mital, Shivaansh and Shah, Cheril and Bawkar, Manish and Gupta, Lakshman Prasad and Kumar, Ajit and Kumar, Yaman and Gupta, Rushali and Ratn Shah, Rajiv}, year={2024}, month={Jul.}, pages={15955-15963} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26894/26666", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26894", + "pdf_size": 158475, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2154906308426003818&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;2;3;4;5;6;2;2;6", + "aff_unique_norm": "Manipal University;Birla Institute of Technology and Science;Indraprastha Institute of Information Technology;Pune Institute of Computer Technology;Sardar Vallabhbhai National Institute of Technology;University of Allahabad;Banaras Hindu University", + "aff_unique_dep": ";;;;;;", + "aff_unique_url": "https://www.manipaluniversityjaipur.com;https://www.bits-pilani.ac.in/goa/;http://www.iiitd.ac.in;http://www.pict.edu;https://www.svnit.ac.in;https://www.allduniv.ac.in;https://www.bhu.ac.in", + "aff_unique_abbr": ";BITS Pilani;IIIT-D;PICT;SVNIT;UoA;BHU", + "aff_campus_unique_index": "0;1;2;4;2;2", + "aff_campus_unique": "Jaipur;Goa;Delhi;;Surat", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26120", + "title": "H-TSP: Hierarchically Solving the Large-Scale Traveling Salesman Problem", + "track": "main", + "status": "Technical", + "abstract": "We propose an end-to-end learning framework based on hierarchical reinforcement learning, called H-TSP, for addressing the large-scale Traveling Salesman Problem (TSP). The proposed H-TSP constructs a solution of a TSP instance starting from the scratch relying on two components: the upper-level policy chooses a small subset of nodes (up to 200 in our experiment) from all nodes that are to be traversed, while the lower-level policy takes the chosen nodes as input and outputs a tour connecting them to the existing partial route (initially only containing the depot). After jointly training the upper-level and lower-level policies, our approach can directly generate solutions for the given TSP instances without relying on any time-consuming search procedures. To demonstrate effectiveness of the proposed approach, we have conducted extensive experiments on randomly generated TSP instances with different numbers of nodes. We show that H-TSP can achieve comparable results (gap 3.42% vs. 7.32%) as SOTA search-based approaches, and more importantly, we reduce the time consumption up to two orders of magnitude (3.32s vs. 395.85s). To the best of our knowledge, H-TSP is the first end-to-end deep reinforcement learning approach that can scale to TSP instances of up to 10000 nodes. Although there are still gaps to SOTA results with respect to solution quality, we believe that H-TSP will be useful for practical applications, particularly those that are time-sensitive e.g., on-call routing and ride hailing service.", + "primary_area": "machine learning iii", + "author": "Xuanhao Pan; Yan Jin; Yuandong Ding; Mingxiao Feng; Li Zhao; Lei Song; Jiang Bian", + "authorids": "", + "aff": "School of Computer Science, Huazhong University of Science and Technology, China; School of Computer Science, Huazhong University of Science and Technology, China; School of Computer Science, Huazhong University of Science and Technology, China; University of Science and Technology of China; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia", + "bibtex": "@article{Pan_Jin_Ding_Feng_Zhao_Song_Bian_2023, title={H-TSP: Hierarchically Solving the Large-Scale Traveling Salesman Problem}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26120}, DOI={10.1609/aaai.v37i8.26120}, abstractNote={We propose an end-to-end learning framework based on hierarchical reinforcement learning, called H-TSP, for addressing the large-scale Traveling Salesman Problem (TSP). The proposed H-TSP constructs a solution of a TSP instance starting from the scratch relying on two components: the upper-level policy chooses a small subset of nodes (up to 200 in our experiment) from all nodes that are to be traversed, while the lower-level policy takes the chosen nodes as input and outputs a tour connecting them to the existing partial route (initially only containing the depot). After jointly training the upper-level and lower-level policies, our approach can directly generate solutions for the given TSP instances without relying on any time-consuming search procedures. To demonstrate effectiveness of the proposed approach, we have conducted extensive experiments on randomly generated TSP instances with different numbers of nodes. We show that H-TSP can achieve comparable results (gap 3.42% vs. 7.32%) as SOTA search-based approaches, and more importantly, we reduce the time consumption up to two orders of magnitude (3.32s vs. 395.85s). To the best of our knowledge, H-TSP is the first end-to-end deep reinforcement learning approach that can scale to TSP instances of up to 10000 nodes. Although there are still gaps to SOTA results with respect to solution quality, we believe that H-TSP will be useful for practical applications, particularly those that are time-sensitive e.g., on-call routing and ride hailing service.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Xuanhao and Jin, Yan and Ding, Yuandong and Feng, Mingxiao and Zhao, Li and Song, Lei and Bian, Jiang}, year={2023}, month={Jun.}, pages={9345-9353} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26120/25892", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26120", + "pdf_size": 222210, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13543895602963070229&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "hust.edu.cn; ; ; ; ; ; ", + "email": "hust.edu.cn; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;2;2;2", + "aff_unique_norm": "Huazhong University of Science and Technology;University of Science and Technology of China;Microsoft Research", + "aff_unique_dep": "School of Computer Science;;Research", + "aff_unique_url": "http://www.hust.edu.cn;http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "HUST;USTC;MSR Asia", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26244", + "title": "HALOC: Hardware-Aware Automatic Low-Rank Compression for Compact Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Low-rank compression is an important model compression strategy for obtaining compact neural network models. In general, because the rank values directly determine the model complexity and model accuracy, proper selection of layer-wise rank is very critical and desired. To date, though many low-rank compression approaches, either selecting the ranks in a manual or automatic way, have been proposed, they suffer from costly manual trials or unsatisfied compression performance. In addition, all of the existing works are not designed in a hardware-aware way, limiting the practical performance of the compressed models on real-world hardware platforms. \n\nTo address these challenges, in this paper we propose HALOC, a hardware-aware automatic low-rank compression framework. By interpreting automatic rank selection from an architecture search perspective, we develop an end-to-end solution to determine the suitable layer-wise ranks in a differentiable and hardware-aware way. We further propose design principles and mitigation strategy to efficiently explore the rank space and reduce the potential interference problem.\n\nExperimental results on different datasets and hardware platforms demonstrate the effectiveness of our proposed approach. On CIFAR-10 dataset, HALOC enables 0.07% and 0.38% accuracy increase over the uncompressed ResNet-20 and VGG-16 models with 72.20% and 86.44% fewer FLOPs, respectively. On ImageNet dataset, HALOC achieves 0.9% higher top-1 accuracy than the original ResNet-18 model with 66.16% fewer FLOPs. HALOC also shows 0.66% higher top-1 accuracy increase than the state-of-the-art automatic low-rank compression solution with fewer computational and memory costs. In addition, HALOC demonstrates the practical speedups on different hardware platforms, verified by the measurement results on desktop GPU, embedded GPU and ASIC accelerator.", + "primary_area": "machine learning iv", + "author": "Jinqi Xiao; Chengming Zhang; Yu Gong; Miao Yin; Yang Sui; Lizhi Xiang; Dingwen Tao; Bo Yuan", + "authorids": "", + "aff": "Rutgers University; Indiana University; Rutgers University; Rutgers University; Rutgers University; Washington State University; Indiana University; Rutgers University", + "bibtex": "@article{Xiao_Zhang_Gong_Yin_Sui_Xiang_Tao_Yuan_2023, title={HALOC: Hardware-Aware Automatic Low-Rank Compression for Compact Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26244}, DOI={10.1609/aaai.v37i9.26244}, abstractNote={Low-rank compression is an important model compression strategy for obtaining compact neural network models. In general, because the rank values directly determine the model complexity and model accuracy, proper selection of layer-wise rank is very critical and desired. To date, though many low-rank compression approaches, either selecting the ranks in a manual or automatic way, have been proposed, they suffer from costly manual trials or unsatisfied compression performance. In addition, all of the existing works are not designed in a hardware-aware way, limiting the practical performance of the compressed models on real-world hardware platforms. To address these challenges, in this paper we propose HALOC, a hardware-aware automatic low-rank compression framework. By interpreting automatic rank selection from an architecture search perspective, we develop an end-to-end solution to determine the suitable layer-wise ranks in a differentiable and hardware-aware way. We further propose design principles and mitigation strategy to efficiently explore the rank space and reduce the potential interference problem. Experimental results on different datasets and hardware platforms demonstrate the effectiveness of our proposed approach. On CIFAR-10 dataset, HALOC enables 0.07% and 0.38% accuracy increase over the uncompressed ResNet-20 and VGG-16 models with 72.20% and 86.44% fewer FLOPs, respectively. On ImageNet dataset, HALOC achieves 0.9% higher top-1 accuracy than the original ResNet-18 model with 66.16% fewer FLOPs. HALOC also shows 0.66% higher top-1 accuracy increase than the state-of-the-art automatic low-rank compression solution with fewer computational and memory costs. In addition, HALOC demonstrates the practical speedups on different hardware platforms, verified by the measurement results on desktop GPU, embedded GPU and ASIC accelerator.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Jinqi and Zhang, Chengming and Gong, Yu and Yin, Miao and Sui, Yang and Xiang, Lizhi and Tao, Dingwen and Yuan, Bo}, year={2023}, month={Jun.}, pages={10464-10472} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26244/26016", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26244", + "pdf_size": 1366063, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9703974926988581554&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "rutgers.edu;iu.edu;soe.rutgers.edu;rutgers.edu;rutgers.edu;wsu.edu;iu.edu;soe.rutgers.edu", + "email": "rutgers.edu;iu.edu;soe.rutgers.edu;rutgers.edu;rutgers.edu;wsu.edu;iu.edu;soe.rutgers.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;2;1;0", + "aff_unique_norm": "Rutgers University;Indiana University;Washington State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.rutgers.edu;https://www.indiana.edu;https://wsu.edu", + "aff_unique_abbr": "Rutgers;IU;WSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27064", + "title": "HAPI Explorer: Comprehension, Discovery, and Explanation on History of ML APIs", + "track": "demonstrations", + "status": "Technical", + "abstract": "Machine learning prediction APIs offered by Google, Microsoft, Amazon, and many other providers have been continuously adopted in a plethora of applications, such as visual object detection, natural language comprehension, and speech recognition. Despite the importance of a systematic study and comparison of different APIs over time, this topic is currently under-explored because of the lack of data and user-friendly exploration tools. To address this issue, we present HAPI Explorer (History of API Explorer), an interactive system that offers easy access to millions of instances of commercial API applications collected in three years, prioritize attention on user-defined instance regimes, and explain interesting patterns across different APIs, subpopulations, and time periods via visual and natural languages. HAPI Explorer can facilitate further comprehension and exploitation of ML prediction APIs.", + "primary_area": "", + "author": "Lingjiao Chen; Zhihua Jin; Sabri Eyuboglu; Huamin Qu; Christopher R\u00e9; Matei Zaharia; James Zou", + "authorids": "", + "aff": "Stanford University; Hong Kong University of Science and Technology; Stanford University; Hong Kong University of Science and Technology; Stanford University; Stanford University; Stanford University", + "bibtex": "@article{Chen_Jin_Eyuboglu_Qu_R\u00e9_Zaharia_Zou_2024, title={HAPI Explorer: Comprehension, Discovery, and Explanation on History of ML APIs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27064}, DOI={10.1609/aaai.v37i13.27064}, abstractNote={Machine learning prediction APIs offered by Google, Microsoft, Amazon, and many other providers have been continuously adopted in a plethora of applications, such as visual object detection, natural language comprehension, and speech recognition. Despite the importance of a systematic study and comparison of different APIs over time, this topic is currently under-explored because of the lack of data and user-friendly exploration tools. To address this issue, we present HAPI Explorer (History of API Explorer), an interactive system that offers easy access to millions of instances of commercial API applications collected in three years, prioritize attention on user-defined instance regimes, and explain interesting patterns across different APIs, subpopulations, and time periods via visual and natural languages. HAPI Explorer can facilitate further comprehension and exploitation of ML prediction APIs.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Lingjiao and Jin, Zhihua and Eyuboglu, Sabri and Qu, Huamin and R\u00e9, Christopher and Zaharia, Matei and Zou, James}, year={2024}, month={Jul.}, pages={16416-16418} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27064/26836", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27064", + "pdf_size": 164960, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=890918554648776948&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stanford.edu;cse.ust.hk;stanford.edu;cse.ust.hk;cs.stanford.edu;cs.stanford.edu;stanford.edu", + "email": "stanford.edu;cse.ust.hk;stanford.edu;cse.ust.hk;cs.stanford.edu;cs.stanford.edu;stanford.edu", + "github": "https://hapi-explore.github.io/", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;1;0;0;0", + "aff_unique_norm": "Stanford University;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stanford.edu;https://www.ust.hk", + "aff_unique_abbr": "Stanford;HKUST", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;1;0;1;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26386", + "title": "HAVEN: Hierarchical Cooperative Multi-Agent Reinforcement Learning with Dual Coordination Mechanism", + "track": "main", + "status": "Technical", + "abstract": "Recently, some challenging tasks in multi-agent systems have been solved by some hierarchical reinforcement learning methods. Inspired by the intra-level and inter-level coordination in the human nervous system, we propose a novel value decomposition framework HAVEN based on hierarchical reinforcement learning for fully cooperative multi-agent problems. To address the instability arising from the concurrent optimization of policies between various levels and agents, we introduce the dual coordination mechanism of inter-level and inter-agent strategies by designing reward functions in a two-level hierarchy. HAVEN does not require domain knowledge and pre-training, and can be applied to any value decomposition variant. Our method achieves desirable results on different decentralized partially observable Markov decision process domains and outperforms other popular multi-agent hierarchical reinforcement learning algorithms.", + "primary_area": "multiagent systems", + "author": "Zhiwei Xu; Yunpeng Bai; Bin Zhang; Dapeng Li; Guoliang Fan", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences; School of Artificial Intelligence, University of Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences", + "bibtex": "@article{Xu_Bai_Zhang_Li_Fan_2023, title={HAVEN: Hierarchical Cooperative Multi-Agent Reinforcement Learning with Dual Coordination Mechanism}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26386}, DOI={10.1609/aaai.v37i10.26386}, abstractNote={Recently, some challenging tasks in multi-agent systems have been solved by some hierarchical reinforcement learning methods. Inspired by the intra-level and inter-level coordination in the human nervous system, we propose a novel value decomposition framework HAVEN based on hierarchical reinforcement learning for fully cooperative multi-agent problems. To address the instability arising from the concurrent optimization of policies between various levels and agents, we introduce the dual coordination mechanism of inter-level and inter-agent strategies by designing reward functions in a two-level hierarchy. HAVEN does not require domain knowledge and pre-training, and can be applied to any value decomposition variant. Our method achieves desirable results on different decentralized partially observable Markov decision process domains and outperforms other popular multi-agent hierarchical reinforcement learning algorithms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zhiwei and Bai, Yunpeng and Zhang, Bin and Li, Dapeng and Fan, Guoliang}, year={2023}, month={Jun.}, pages={11735-11743} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26386/26158", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26386", + "pdf_size": 3425740, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5123918610354998382&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25655", + "title": "HG-SL: Jointly Learning of Global and Local User Spreading Behavior for Fake News Early Detection", + "track": "main", + "status": "Technical", + "abstract": "Recently, fake news forgery technology has become more and more sophisticated, and even the profiles of participants may be faked, which challenges the robustness and effectiveness of traditional detection methods involving text or user identity. Most propagation-only approaches mainly rely on neural networks to learn the diffusion pattern of individual news, which is insufficient to describe the differences in news spread ability, and also ignores the valuable global connections of news and users, limiting the performance of detection. Therefore, we propose a joint learning model named HG-SL, which is blind to news content and user identities, but capable of catching the differences between true and fake news in the early stages of propagation through global and local user spreading behavior. Specifically, we innovatively design a Hypergraph-based Global interaction learning module to capture the global preferences of users from their co-spreading relationships, and introduce node centrality encoding to complement user influence in hypergraph learning. Moreover, the designed Self-attention-based Local context learning module first introduce spread status to highlight the propagation ability of news and users, thus providing additional signals for verifying news authenticity. Experiments on real-world datasets indicate that our HG-SL, which solely relies on user behavior, outperforms SOTA baselines utilizing multidimensional features in both fake news detection and early detection task.", + "primary_area": "domain s of application", + "author": "Ling Sun; Yuan Rao; Yuqian Lan; Bingcan Xia; Yangyang Li", + "authorids": "", + "aff": "Xi\u2019an Key Laboratory of Social Intelligence and Complexity Data Processing, School of Software Engineering, Xi\u2019an Jiaotong University, China; Xi\u2019an Key Laboratory of Social Intelligence and Complexity Data Processing, School of Software Engineering, Xi\u2019an Jiaotong University, China; Xi\u2019an Key Laboratory of Social Intelligence and Complexity Data Processing, School of Software Engineering, Xi\u2019an Jiaotong University, China; Xi\u2019an Key Laboratory of Social Intelligence and Complexity Data Processing, School of Software Engineering, Xi\u2019an Jiaotong University, China; National Engineering Laboratory for Risk Perception and Prevention, China", + "bibtex": "@article{Sun_Rao_Lan_Xia_Li_2023, title={HG-SL: Jointly Learning of Global and Local User Spreading Behavior for Fake News Early Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25655}, DOI={10.1609/aaai.v37i4.25655}, abstractNote={Recently, fake news forgery technology has become more and more sophisticated, and even the profiles of participants may be faked, which challenges the robustness and effectiveness of traditional detection methods involving text or user identity. Most propagation-only approaches mainly rely on neural networks to learn the diffusion pattern of individual news, which is insufficient to describe the differences in news spread ability, and also ignores the valuable global connections of news and users, limiting the performance of detection. Therefore, we propose a joint learning model named HG-SL, which is blind to news content and user identities, but capable of catching the differences between true and fake news in the early stages of propagation through global and local user spreading behavior. Specifically, we innovatively design a Hypergraph-based Global interaction learning module to capture the global preferences of users from their co-spreading relationships, and introduce node centrality encoding to complement user influence in hypergraph learning. Moreover, the designed Self-attention-based Local context learning module first introduce spread status to highlight the propagation ability of news and users, thus providing additional signals for verifying news authenticity. Experiments on real-world datasets indicate that our HG-SL, which solely relies on user behavior, outperforms SOTA baselines utilizing multidimensional features in both fake news detection and early detection task.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Ling and Rao, Yuan and Lan, Yuqian and Xia, Bingcan and Li, Yangyang}, year={2023}, month={Jun.}, pages={5248-5256} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25655/25427", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25655", + "pdf_size": 1004591, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10114861493376467175&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "stu.xjtu.edu.cn;mail.xjtu.edu.cn; Yuqian Lan xjtu;stu.xjtu.edu.cn;cetc.com.cn", + "email": "stu.xjtu.edu.cn;mail.xjtu.edu.cn; Yuqian Lan xjtu;stu.xjtu.edu.cn;cetc.com.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Xi'an Jiaotong University;National Engineering Laboratory for Risk Perception and Prevention", + "aff_unique_dep": "School of Software Engineering;", + "aff_unique_url": "http://www.xjtu.edu.cn;", + "aff_unique_abbr": "XJTU;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26777", + "title": "HOTCOLD Block: Fooling Thermal Infrared Detectors with a Novel Wearable Design", + "track": "aaai special track", + "status": "Technical", + "abstract": "Adversarial attacks on thermal infrared imaging expose the risk of related applications. Estimating the security of these systems is essential for safely deploying them in the real world. In many cases, realizing the attacks in the physical space requires elaborate special perturbations. These solutions are often impractical and attention-grabbing. To address the need for a physically practical and stealthy adversarial attack, we introduce HotCold Block, a novel physical attack for infrared detectors that hide persons utilizing the wearable Warming Paste and Cooling Paste. By attaching these readily available temperature-controlled materials to the body, HotCold Block evades human eyes efficiently. Moreover, unlike existing methods that build adversarial patches with complex texture and structure features, HotCold Block utilizes an SSP-oriented adversarial optimization algorithm that enables attacks with pure color blocks and explores the influence of size, shape, and position on attack performance. Extensive experimental results in both digital and physical environments demonstrate the performance of our proposed HotCold Block. Code is available: https://github.com/weihui1308/HOTCOLDBlock.", + "primary_area": "safe and robust ai", + "author": "Hui Wei; Zhixiang Wang; Xuemei Jia; Yinqiang Zheng; Hao Tang; Shin'ichi Satoh; Zheng Wang", + "authorids": "", + "aff": "National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science, Wuhan University+Hubei Key Laboratory of Multimedia and Network Communication Engineering; The University of Tokyo+RIISE+National Institute of Informatics; National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science, Wuhan University+Hubei Key Laboratory of Multimedia and Network Communication Engineering; The University of Tokyo; CVL, ETH Zurich; The University of Tokyo+National Institute of Informatics; National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science, Wuhan University+Hubei Key Laboratory of Multimedia and Network Communication Engineering", + "bibtex": "@article{Wei_Wang_Jia_Zheng_Tang_Satoh_Wang_2023, title={HOTCOLD Block: Fooling Thermal Infrared Detectors with a Novel Wearable Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26777}, DOI={10.1609/aaai.v37i12.26777}, abstractNote={Adversarial attacks on thermal infrared imaging expose the risk of related applications. Estimating the security of these systems is essential for safely deploying them in the real world. In many cases, realizing the attacks in the physical space requires elaborate special perturbations. These solutions are often impractical and attention-grabbing. To address the need for a physically practical and stealthy adversarial attack, we introduce HotCold Block, a novel physical attack for infrared detectors that hide persons utilizing the wearable Warming Paste and Cooling Paste. By attaching these readily available temperature-controlled materials to the body, HotCold Block evades human eyes efficiently. Moreover, unlike existing methods that build adversarial patches with complex texture and structure features, HotCold Block utilizes an SSP-oriented adversarial optimization algorithm that enables attacks with pure color blocks and explores the influence of size, shape, and position on attack performance. Extensive experimental results in both digital and physical environments demonstrate the performance of our proposed HotCold Block. Code is available: https://github.com/weihui1308/HOTCOLDBlock.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wei, Hui and Wang, Zhixiang and Jia, Xuemei and Zheng, Yinqiang and Tang, Hao and Satoh, Shin\u2019ichi and Wang, Zheng}, year={2023}, month={Jun.}, pages={15233-15241} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26777/26549", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26777", + "pdf_size": 1291592, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18118004731499215459&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "; ; ; ; ; ; ", + "email": "; ; ; ; ; ; ", + "github": "https://github.com/weihui1308/HOTCOLDBlock", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2+3+4;0+1;2;5;2+4;0+1", + "aff_unique_norm": "Wuhan University;Hubei Key Laboratory of Multimedia and Network Communication Engineering;University of Tokyo;RIISE;National Institute of Informatics;ETH Zurich", + "aff_unique_dep": "School of Computer Science;Multimedia and Network Communication Engineering;;;;Computer Vision Laboratory", + "aff_unique_url": "http://www.whu.edu.cn/;;https://www.u-tokyo.ac.jp;;https://www.nii.ac.jp/;https://www.ethz.ch", + "aff_unique_abbr": "WHU;;UTokyo;;NII;ETHZ", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1+1;0+0;1;3;1+1;0+0", + "aff_country_unique": "China;Japan;;Switzerland" + }, + { + "id": "article-25277", + "title": "HRDoc: Dataset and Baseline Method toward Hierarchical Reconstruction of Document Structures", + "track": "main", + "status": "Technical", + "abstract": "The problem of document structure reconstruction refers to converting digital or scanned documents into corresponding semantic structures. Most existing works mainly focus on splitting the boundary of each element in a single document page, neglecting the reconstruction of semantic structure in multi-page documents. This paper introduces hierarchical reconstruction of document structures as a novel task suitable for NLP and CV fields. To better evaluate the system performance on the new task, we built a large-scale dataset named HRDoc, which consists of 2,500 multi-page documents with nearly 2 million semantic units. Every document in HRDoc has line-level annotations including categories and relations obtained from rule-based extractors and human annotators. Moreover, we proposed an encoder-decoder-based hierarchical document structure parsing system (DSPS) to tackle this problem. By adopting a multi-modal bidirectional encoder and a structure-aware GRU decoder with soft-mask operation, the DSPS model surpass the baseline method by a large margin. All scripts and datasets will be made publicly available at https://github.com/jfma-USTC/HRDoc.", + "primary_area": "computer vision ii", + "author": "Jiefeng Ma; Jun Du; Pengfei Hu; Zhenrong Zhang; Jianshu Zhang; Huihui Zhu; Cong Liu", + "authorids": "", + "aff": "NERC-SLIP, University of Science and Technology of China; NERC-SLIP, University of Science and Technology of China; NERC-SLIP, University of Science and Technology of China; NERC-SLIP, University of Science and Technology of China; iFLYTEK Research; iFLYTEK Research; iFLYTEK Research", + "bibtex": "@article{Ma_Du_Hu_Zhang_Zhang_Zhu_Liu_2023, title={HRDoc: Dataset and Baseline Method toward Hierarchical Reconstruction of Document Structures}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25277}, DOI={10.1609/aaai.v37i2.25277}, abstractNote={The problem of document structure reconstruction refers to converting digital or scanned documents into corresponding semantic structures. Most existing works mainly focus on splitting the boundary of each element in a single document page, neglecting the reconstruction of semantic structure in multi-page documents. This paper introduces hierarchical reconstruction of document structures as a novel task suitable for NLP and CV fields. To better evaluate the system performance on the new task, we built a large-scale dataset named HRDoc, which consists of 2,500 multi-page documents with nearly 2 million semantic units. Every document in HRDoc has line-level annotations including categories and relations obtained from rule-based extractors and human annotators. Moreover, we proposed an encoder-decoder-based hierarchical document structure parsing system (DSPS) to tackle this problem. By adopting a multi-modal bidirectional encoder and a structure-aware GRU decoder with soft-mask operation, the DSPS model surpass the baseline method by a large margin. All scripts and datasets will be made publicly available at https://github.com/jfma-USTC/HRDoc.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Jiefeng and Du, Jun and Hu, Pengfei and Zhang, Zhenrong and Zhang, Jianshu and Zhu, Huihui and Liu, Cong}, year={2023}, month={Jun.}, pages={1870-1877} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25277/25049", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25277", + "pdf_size": 3158714, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7190408595008252474&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;iflytek.com;iflytek.com;iflytek.com", + "email": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;iflytek.com;iflytek.com;iflytek.com", + "github": "https://github.com/jfma-USTC/HRDoc", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;1;1", + "aff_unique_norm": "University of Science and Technology of China;iFLYTEK", + "aff_unique_dep": "NERC-SLIP;Research", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.iflytek.com", + "aff_unique_abbr": "USTC;iFLYTEK", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25315", + "title": "HVTSurv: Hierarchical Vision Transformer for Patient-Level Survival Prediction from Whole Slide Image", + "track": "main", + "status": "Technical", + "abstract": "Survival prediction based on whole slide images (WSIs) is a challenging task for patient-level multiple instance learning (MIL). Due to the vast amount of data for a patient (one or multiple gigapixels WSIs) and the irregularly shaped property of WSI, it is difficult to fully explore spatial, contextual, and hierarchical interaction in the patient-level bag. Many studies adopt random sampling pre-processing strategy and WSI-level aggregation models, which inevitably lose critical prognostic information in the patient-level bag. In this work, we propose a hierarchical vision Transformer framework named HVTSurv, which can encode the local-level relative spatial information, strengthen WSI-level context-aware communication, and establish patient-level hierarchical interaction. Firstly, we design a feature pre-processing strategy, including feature rearrangement and random window masking. Then, we devise three layers to progressively obtain patient-level representation, including a local-level interaction layer adopting Manhattan distance, a WSI-level interaction layer employing spatial shuffle, and a patient-level interaction layer using attention pooling. Moreover, the design of hierarchical network helps the model become more computationally efficient. Finally, we validate HVTSurv with 3,104 patients and 3,752 WSIs across 6 cancer types from The Cancer Genome Atlas (TCGA). The average C-Index is 2.50-11.30% higher than all the prior weakly supervised methods over 6 TCGA datasets. Ablation study and attention visualization further verify the superiority of the proposed HVTSurv. Implementation is available at: https://github.com/szc19990412/HVTSurv.", + "primary_area": "computer vision ii", + "author": "Zhuchen Shao; Yang Chen; Hao Bian; Jian Zhang; Guojun Liu; Yongbing Zhang", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; School of Electronic and Computer Engineering, Peking University; Computer Science and Technology, Harbin Institute of Technology; Computer Science and Technology, Harbin Institute of Technology (Shenzhen)", + "bibtex": "@article{Shao_Chen_Bian_Zhang_Liu_Zhang_2023, title={HVTSurv: Hierarchical Vision Transformer for Patient-Level Survival Prediction from Whole Slide Image}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25315}, DOI={10.1609/aaai.v37i2.25315}, abstractNote={Survival prediction based on whole slide images (WSIs) is a challenging task for patient-level multiple instance learning (MIL). Due to the vast amount of data for a patient (one or multiple gigapixels WSIs) and the irregularly shaped property of WSI, it is difficult to fully explore spatial, contextual, and hierarchical interaction in the patient-level bag. Many studies adopt random sampling pre-processing strategy and WSI-level aggregation models, which inevitably lose critical prognostic information in the patient-level bag. In this work, we propose a hierarchical vision Transformer framework named HVTSurv, which can encode the local-level relative spatial information, strengthen WSI-level context-aware communication, and establish patient-level hierarchical interaction. Firstly, we design a feature pre-processing strategy, including feature rearrangement and random window masking. Then, we devise three layers to progressively obtain patient-level representation, including a local-level interaction layer adopting Manhattan distance, a WSI-level interaction layer employing spatial shuffle, and a patient-level interaction layer using attention pooling. Moreover, the design of hierarchical network helps the model become more computationally efficient. Finally, we validate HVTSurv with 3,104 patients and 3,752 WSIs across 6 cancer types from The Cancer Genome Atlas (TCGA). The average C-Index is 2.50-11.30% higher than all the prior weakly supervised methods over 6 TCGA datasets. Ablation study and attention visualization further verify the superiority of the proposed HVTSurv. Implementation is available at: https://github.com/szc19990412/HVTSurv.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shao, Zhuchen and Chen, Yang and Bian, Hao and Zhang, Jian and Liu, Guojun and Zhang, Yongbing}, year={2023}, month={Jun.}, pages={2209-2217} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25315/25087", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25315", + "pdf_size": 2928970, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7265717391609324368&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com; ; ;hit.edu.cn; ; ", + "email": "gmail.com; ; ;hit.edu.cn; ; ", + "github": "https://github.com/szc19990412/HVTSurv", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;2", + "aff_unique_norm": "Tsinghua University;Peking University;Harbin Institute of Technology", + "aff_unique_dep": "International Graduate School;School of Electronic and Computer Engineering;Computer Science and Technology", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.pku.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "THU;PKU;HIT", + "aff_campus_unique_index": "0;0;0;2;0", + "aff_campus_unique": "Shenzhen;;Harbin", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27052", + "title": "HaPPy: Harnessing the Wisdom from Multi-Perspective Graphs for Protein-Ligand Binding Affinity Prediction (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Gathering information from multi-perspective graphs is an essential issue for many applications especially for proteinligand binding affinity prediction. Most of traditional approaches obtained such information individually with low interpretability. In this paper, we harness the rich information from multi-perspective graphs with a general model, which abstractly represents protein-ligand complexes with better interpretability while achieving excellent predictive performance. In addition, we specially analyze the protein-ligand binding affinity problem, taking into account the heterogeneity of proteins and ligands. Experimental evaluations demonstrate the effectiveness of our data representation strategy on public datasets by fusing information from different perspectives.", + "primary_area": "", + "author": "Xianfeng Zhang; Yanhui Gu; Guandong Xu; Yafei Li; Jinlan Wang; Zhenglu Yang", + "authorids": "", + "aff": "School of Computer and Electronic Information Science, Nanjing Normal University, Nanjing, China; School of Computer and Electronic Information Science, Nanjing Normal University, Nanjing, China; School of Computer Science and Advanced Analytics Institute, University of Technology Sydney, Sydeny, Australia; School of Chemistry and Materials Science, Nanjing Normal University, Nanjing, China; School of Physics, Southeast University, Nanjing, China; College of Computer Science, Nankai University, Tianjin, China", + "bibtex": "@article{Zhang_Gu_Xu_Li_Wang_Yang_2024, title={HaPPy: Harnessing the Wisdom from Multi-Perspective Graphs for Protein-Ligand Binding Affinity Prediction (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27052}, DOI={10.1609/aaai.v37i13.27052}, abstractNote={Gathering information from multi-perspective graphs is an essential issue for many applications especially for proteinligand binding affinity prediction. Most of traditional approaches obtained such information individually with low interpretability. In this paper, we harness the rich information from multi-perspective graphs with a general model, which abstractly represents protein-ligand complexes with better interpretability while achieving excellent predictive performance. In addition, we specially analyze the protein-ligand binding affinity problem, taking into account the heterogeneity of proteins and ligands. Experimental evaluations demonstrate the effectiveness of our data representation strategy on public datasets by fusing information from different perspectives.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xianfeng and Gu, Yanhui and Xu, Guandong and Li, Yafei and Wang, Jinlan and Yang, Zhenglu}, year={2024}, month={Jul.}, pages={16384-16385} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27052/26824", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27052", + "pdf_size": 766960, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:dlUvzwt05sYJ:scholar.google.com/&scioq=HaPPy:+Harnessing+the+Wisdom+from+Multi-Perspective+Graphs+for+Protein-Ligand+Binding+Affinity+Prediction+(Student+Abstract)&hl=en&as_sdt=0,23", + "gs_version_total": 3, + "aff_domain": "njnu.edu.cn;njnu.edu.cn;uts.edu.au;njnu.edu.cn;seu.edu.cn;nankai.edu.cn", + "email": "njnu.edu.cn;njnu.edu.cn;uts.edu.au;njnu.edu.cn;seu.edu.cn;nankai.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;3", + "aff_unique_norm": "Nanjing Normal University;University of Technology Sydney;Southeast University;Nankai University", + "aff_unique_dep": "School of Computer and Electronic Information Science;School of Computer Science and Advanced Analytics Institute;School of Physics;College of Computer Science", + "aff_unique_url": "http://www.nju.edu.cn;https://www.uts.edu.au;https://www.seu.edu.cn/;http://www.nankai.edu.cn", + "aff_unique_abbr": "NNU;UTS;SEU;Nankai", + "aff_campus_unique_index": "0;0;1;0;0;2", + "aff_campus_unique": "Nanjing;Sydney;Tianjin", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25928", + "title": "Handling Missing Data via Max-Entropy Regularized Graph Autoencoder", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) are popular weapons for modeling relational data. Existing GNNs are not specified for attribute-incomplete graphs, making missing attribute imputation a burning issue. Until recently, many works notice that GNNs are coupled with spectral concentration, which means the spectrum obtained by GNNs concentrates on a local part in spectral domain, e.g., low-frequency due to oversmoothing issue. As a consequence, GNNs may be seriously flawed for reconstructing graph attributes as graph spectral concentration tends to cause a low imputation precision. In this work, we present a regularized graph autoencoder for graph attribute imputation, named MEGAE, which aims at mitigating spectral concentration problem by maximizing the graph spectral entropy. Notably, we first present the method for estimating graph spectral entropy without the eigen-decomposition of Laplacian matrix and provide the theoretical upper error bound. A maximum entropy regularization then acts in the latent space, which directly increases the graph spectral entropy. Extensive experiments show that MEGAE outperforms all the other state-of-the-art imputation methods on a variety of benchmark datasets.", + "primary_area": "machine learning i", + "author": "Ziqi Gao; Yifan Niu; Jiashun Cheng; Jianheng Tang; Lanqing Li; Tingyang Xu; Peilin Zhao; Fugee Tsung; Jia Li", + "authorids": "", + "aff": "The Hong Kong University of Science and Technology (Guangzhou); The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology; AI Lab, Tencent; AI Lab, Tencent; AI Lab, Tencent; The Hong Kong University of Science and Technology+The Hong Kong University of Science and Technology (Guangzhou); The Hong Kong University of Science and Technology+The Hong Kong University of Science and Technology (Guangzhou)", + "bibtex": "@article{Gao_Niu_Cheng_Tang_Li_Xu_Zhao_Tsung_Li_2023, title={Handling Missing Data via Max-Entropy Regularized Graph Autoencoder}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25928}, DOI={10.1609/aaai.v37i6.25928}, abstractNote={Graph neural networks (GNNs) are popular weapons for modeling relational data. Existing GNNs are not specified for attribute-incomplete graphs, making missing attribute imputation a burning issue. Until recently, many works notice that GNNs are coupled with spectral concentration, which means the spectrum obtained by GNNs concentrates on a local part in spectral domain, e.g., low-frequency due to oversmoothing issue. As a consequence, GNNs may be seriously flawed for reconstructing graph attributes as graph spectral concentration tends to cause a low imputation precision. In this work, we present a regularized graph autoencoder for graph attribute imputation, named MEGAE, which aims at mitigating spectral concentration problem by maximizing the graph spectral entropy. Notably, we first present the method for estimating graph spectral entropy without the eigen-decomposition of Laplacian matrix and provide the theoretical upper error bound. A maximum entropy regularization then acts in the latent space, which directly increases the graph spectral entropy. Extensive experiments show that MEGAE outperforms all the other state-of-the-art imputation methods on a variety of benchmark datasets.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Ziqi and Niu, Yifan and Cheng, Jiashun and Tang, Jianheng and Li, Lanqing and Xu, Tingyang and Zhao, Peilin and Tsung, Fugee and Li, Jia}, year={2023}, month={Jun.}, pages={7651-7659} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25928/25700", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25928", + "pdf_size": 4122148, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1285020041022519542&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "connect.ust.hk;connect.hkust-gz.edu.cn;connect.ust.hk;connect.ust.hk;tencent.com;tencent.com;tencent.com;ust.hk;ust.hk", + "email": "connect.ust.hk;connect.hkust-gz.edu.cn;connect.ust.hk;connect.ust.hk;tencent.com;tencent.com;tencent.com;ust.hk;ust.hk", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;2;2;2;1+0;1+0", + "aff_unique_norm": "The Hong Kong University of Science and Technology;Hong Kong University of Science and Technology;Tencent", + "aff_unique_dep": ";;AI Lab", + "aff_unique_url": "https://www.ust.hk;https://www.ust.hk;https://www.tencent.com", + "aff_unique_abbr": "HKUST;HKUST;Tencent", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26071", + "title": "Hard Sample Aware Network for Contrastive Deep Graph Clustering", + "track": "main", + "status": "Technical", + "abstract": "Contrastive deep graph clustering, which aims to divide nodes into disjoint groups via contrastive mechanisms, is a challenging research spot. Among the recent works, hard sample mining-based algorithms have achieved great attention for their promising performance. However, we find that the existing hard sample mining methods have two problems as follows. 1) In the hardness measurement, the important structural information is overlooked for similarity calculation, degrading the representativeness of the selected hard negative samples. 2) Previous works merely focus on the hard negative sample pairs while neglecting the hard positive sample pairs. Nevertheless, samples within the same cluster but with low similarity should also be carefully learned. To solve the problems, we propose a novel contrastive deep graph clustering method dubbed Hard Sample Aware Network (HSAN) by introducing a comprehensive similarity measure criterion and a general dynamic sample weighing strategy. Concretely, in our algorithm, the similarities between samples are calculated by considering both the attribute embeddings and the structure embeddings, better revealing sample relationships and assisting hardness measurement. Moreover, under the guidance of the carefully collected high-confidence clustering information, our proposed weight modulating function will first recognize the positive and negative samples and then dynamically up-weight the hard sample pairs while down-weighting the easy ones. In this way, our method can mine not only the hard negative samples but also the hard positive sample, thus improving the discriminative capability of the samples further. Extensive experiments and analyses demonstrate the superiority and effectiveness of our proposed method. The source code of HSAN is shared at https://github.com/yueliu1999/HSAN and a collection (papers, codes and, datasets) of deep graph clustering is shared at https://github.com/yueliu1999/Awesome-Deep-Graph-Clustering on Github.", + "primary_area": "machine learning ii", + "author": "Yue Liu; Xihong Yang; Sihang Zhou; Xinwang Liu; Zhen Wang; Ke Liang; Wenxuan Tu; Liang Li; Jingcan Duan; Cancan Chen", + "authorids": "", + "aff": "College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; College of Intelligence Science and Technology, National University of Defense Technology; College of Computer, National University of Defense Technology; Northwestern Polytechnical University; College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; College of Computer, National University of Defense Technology; Beijing Information Science and Technology University", + "bibtex": "@article{Liu_Yang_Zhou_Liu_Wang_Liang_Tu_Li_Duan_Chen_2023, title={Hard Sample Aware Network for Contrastive Deep Graph Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26071}, DOI={10.1609/aaai.v37i7.26071}, abstractNote={Contrastive deep graph clustering, which aims to divide nodes into disjoint groups via contrastive mechanisms, is a challenging research spot. Among the recent works, hard sample mining-based algorithms have achieved great attention for their promising performance. However, we find that the existing hard sample mining methods have two problems as follows. 1) In the hardness measurement, the important structural information is overlooked for similarity calculation, degrading the representativeness of the selected hard negative samples. 2) Previous works merely focus on the hard negative sample pairs while neglecting the hard positive sample pairs. Nevertheless, samples within the same cluster but with low similarity should also be carefully learned. To solve the problems, we propose a novel contrastive deep graph clustering method dubbed Hard Sample Aware Network (HSAN) by introducing a comprehensive similarity measure criterion and a general dynamic sample weighing strategy. Concretely, in our algorithm, the similarities between samples are calculated by considering both the attribute embeddings and the structure embeddings, better revealing sample relationships and assisting hardness measurement. Moreover, under the guidance of the carefully collected high-confidence clustering information, our proposed weight modulating function will first recognize the positive and negative samples and then dynamically up-weight the hard sample pairs while down-weighting the easy ones. In this way, our method can mine not only the hard negative samples but also the hard positive sample, thus improving the discriminative capability of the samples further. Extensive experiments and analyses demonstrate the superiority and effectiveness of our proposed method. The source code of HSAN is shared at https://github.com/yueliu1999/HSAN and a collection (papers, codes and, datasets) of deep graph clustering is shared at https://github.com/yueliu1999/Awesome-Deep-Graph-Clustering on Github.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yue and Yang, Xihong and Zhou, Sihang and Liu, Xinwang and Wang, Zhen and Liang, Ke and Tu, Wenxuan and Li, Liang and Duan, Jingcan and Chen, Cancan}, year={2023}, month={Jun.}, pages={8914-8922} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26071/25843", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26071", + "pdf_size": 7432524, + "gs_citation": 135, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1820733522692783615&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "nudt.edu.cn; ; ; ; ; ; ; ; ;", + "email": "nudt.edu.cn; ; ; ; ; ; ; ; ;", + "github": "https://github.com/yueliu1999/HSAN", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;1;0;0;0;0;2", + "aff_unique_norm": "National University of Defense Technology;Northwestern Polytechnical University;Beijing Information Science and Technology University", + "aff_unique_dep": "College of Computer;;", + "aff_unique_url": "http://www.nudt.edu.cn/;https://www.nwpu.edu.cn;http://www.bistu.edu.cn", + "aff_unique_abbr": "NUDT;NWPU;BISTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27008", + "title": "Hardness of Learning AES Key (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We show hardness of learning AES key from pairs of ciphertexts under the assumption of computational closeness of AES to pairwise independence. The latter is motivated by a recent result on statistical closeness of AES to pairwise independence.", + "primary_area": "", + "author": "Artur Pak; Sultan Nurmukhamedov; Rustem Takhanov; Zhenisbek Assylbekov", + "authorids": "", + "aff": "Department of Mathematics, Nazarbayev University; Department of Mathematics, Nazarbayev University; Department of Mathematics, Nazarbayev University; Department of Mathematics, Nazarbayev University", + "bibtex": "@article{Pak_Nurmukhamedov_Takhanov_Assylbekov_2024, title={Hardness of Learning AES Key (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27008}, DOI={10.1609/aaai.v37i13.27008}, abstractNote={We show hardness of learning AES key from pairs of ciphertexts under the assumption of computational closeness of AES to pairwise independence. The latter is motivated by a recent result on statistical closeness of AES to pairwise independence.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pak, Artur and Nurmukhamedov, Sultan and Takhanov, Rustem and Assylbekov, Zhenisbek}, year={2024}, month={Jul.}, pages={16296-16297} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27008/26780", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27008", + "pdf_size": 85822, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:sFOgbHnfslYJ:scholar.google.com/&scioq=Hardness+of+Learning+AES+Key+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "nu.edu.kz;nu.edu.kz;nu.edu.kz;nu.edu.kz", + "email": "nu.edu.kz;nu.edu.kz;nu.edu.kz;nu.edu.kz", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nazarbayev University", + "aff_unique_dep": "Department of Mathematics", + "aff_unique_url": "https://www.nu.edu.kz", + "aff_unique_abbr": "NU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Kazakhstan" + }, + { + "id": "article-25126", + "title": "Head-Free Lightweight Semantic Segmentation with Linear Transformer", + "track": "main", + "status": "Technical", + "abstract": "Existing semantic segmentation works have been mainly focused on designing effective decoders; however, the computational load introduced by the overall structure has long been ignored, which hinders their applications on resource-constrained hardwares. In this paper, we propose a head-free lightweight architecture specifically for semantic segmentation, named Adaptive Frequency Transformer (AFFormer). AFFormer adopts a parallel architecture to leverage prototype representations as specific learnable local descriptions which replaces the decoder and preserves the rich image semantics on high-resolution features. Although removing the decoder compresses most of the computation, the accuracy of the parallel structure is still limited by low computational resources. Therefore, we employ heterogeneous operators (CNN and vision Transformer) for pixel embedding and prototype representations to further save computational costs. Moreover, it is very difficult to linearize the complexity of the vision Transformer from the perspective of spatial domain. Due to the fact that semantic segmentation is very sensitive to frequency information, we construct a lightweight prototype learning block with adaptive frequency filter of complexity O(n) to replace standard self attention with O(n^2). Extensive experiments on widely adopted datasets demonstrate that AFFormer achieves superior accuracy while retaining only 3M parameters. On the ADE20K dataset, AFFormer achieves 41.8 mIoU and 4.6 GFLOPs, which is 4.4 mIoU higher than Segformer, with 45% less GFLOPs. On the Cityscapes dataset, AFFormer achieves 78.7 mIoU and 34.4 GFLOPs, which is 2.5 mIoU higher than Segformer with 72.5% less GFLOPs. Code is available at https://github.com/dongbo811/AFFormer.", + "primary_area": "computer vision i", + "author": "Bo Dong; Pichao Wang; Fan Wang", + "authorids": "", + "aff": "Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Dong_Wang_Wang_2023, title={Head-Free Lightweight Semantic Segmentation with Linear Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25126}, DOI={10.1609/aaai.v37i1.25126}, abstractNote={Existing semantic segmentation works have been mainly focused on designing effective decoders; however, the computational load introduced by the overall structure has long been ignored, which hinders their applications on resource-constrained hardwares. In this paper, we propose a head-free lightweight architecture specifically for semantic segmentation, named Adaptive Frequency Transformer (AFFormer). AFFormer adopts a parallel architecture to leverage prototype representations as specific learnable local descriptions which replaces the decoder and preserves the rich image semantics on high-resolution features. Although removing the decoder compresses most of the computation, the accuracy of the parallel structure is still limited by low computational resources. Therefore, we employ heterogeneous operators (CNN and vision Transformer) for pixel embedding and prototype representations to further save computational costs. Moreover, it is very difficult to linearize the complexity of the vision Transformer from the perspective of spatial domain. Due to the fact that semantic segmentation is very sensitive to frequency information, we construct a lightweight prototype learning block with adaptive frequency filter of complexity O(n) to replace standard self attention with O(n^2). Extensive experiments on widely adopted datasets demonstrate that AFFormer achieves superior accuracy while retaining only 3M parameters. On the ADE20K dataset, AFFormer achieves 41.8 mIoU and 4.6 GFLOPs, which is 4.4 mIoU higher than Segformer, with 45% less GFLOPs. On the Cityscapes dataset, AFFormer achieves 78.7 mIoU and 34.4 GFLOPs, which is 2.5 mIoU higher than Segformer with 72.5% less GFLOPs. Code is available at https://github.com/dongbo811/AFFormer.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Bo and Wang, Pichao and Wang, Fan}, year={2023}, month={Jun.}, pages={516-524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25126/24898", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25126", + "pdf_size": 677492, + "gs_citation": 91, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13687827984438130958&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;alibaba-inc.com", + "email": "gmail.com;gmail.com;alibaba-inc.com", + "github": "https://github.com/dongbo811/AFFormer", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26685", + "title": "Help Me Heal: A Reinforced Polite and Empathetic Mental Health and Legal Counseling Dialogue System for Crime Victims", + "track": "aaai special track", + "status": "Technical", + "abstract": "The potential for conversational agents offering mental health and legal counseling in an autonomous, interactive, and vitally accessible environment is getting highlighted due to the increased access to information through the internet and mobile devices. A counseling conversational agent should be able to offer higher engagement mimicking the real-time counseling sessions. The ability to empathize or comprehend and feel another person\u2019s emotions and experiences is a crucial quality that promotes effective therapeutic bonding and rapport-building. Further, the use of polite encoded language in the counseling reflects the nobility and creates a familiar, warm, and comfortable atmosphere to resolve human issues. Therefore, focusing on these two aspects, we propose a Polite and Empathetic Mental Health and Legal Counseling Dialogue System (Po-Em-MHLCDS) for the victims of crimes. To build Po-Em-MHLCDS, we first create a Mental Health and Legal Counseling Dataset (MHLCD) by recruiting six employees who are asked to converse with each other, acting as a victim and the agent interchangeably following a fixed stated guidelines. Second, the MHLCD dataset is annotated with three informative labels, viz. counseling strategies, politeness, and empathy. Lastly, we train the Po-Em-MHLCDS in a reinforcement learning framework by designing an efficient and effective reward function to reinforce correct counseling strategy, politeness and empathy while maintaining contextual-coherence and non-repetitiveness in the generated responses. Our extensive automatic and human evaluation demonstrate the strength of the proposed system. Codes and Data can be accessed at https://www.iitp.ac.in/ ai-nlp-ml/resources.html#MHLCD or https://github.com/Mishrakshitij/Po-Em-MHLCDS", + "primary_area": "ai for social impact", + "author": "Kshitij Mishra; Priyanshu Priya; Asif Ekbal", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Indian Institute of Technology Patna, Bihta, India; Department of Computer Science and Engineering, Indian Institute of Technology Patna, Bihta, India; Department of Computer Science and Engineering, Indian Institute of Technology Patna, Bihta, India", + "bibtex": "@article{Mishra_Priya_Ekbal_2023, title={Help Me Heal: A Reinforced Polite and Empathetic Mental Health and Legal Counseling Dialogue System for Crime Victims}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26685}, DOI={10.1609/aaai.v37i12.26685}, abstractNote={The potential for conversational agents offering mental health and legal counseling in an autonomous, interactive, and vitally accessible environment is getting highlighted due to the increased access to information through the internet and mobile devices. A counseling conversational agent should be able to offer higher engagement mimicking the real-time counseling sessions. The ability to empathize or comprehend and feel another person\u2019s emotions and experiences is a crucial quality that promotes effective therapeutic bonding and rapport-building. Further, the use of polite encoded language in the counseling reflects the nobility and creates a familiar, warm, and comfortable atmosphere to resolve human issues. Therefore, focusing on these two aspects, we propose a Polite and Empathetic Mental Health and Legal Counseling Dialogue System (Po-Em-MHLCDS) for the victims of crimes. To build Po-Em-MHLCDS, we first create a Mental Health and Legal Counseling Dataset (MHLCD) by recruiting six employees who are asked to converse with each other, acting as a victim and the agent interchangeably following a fixed stated guidelines. Second, the MHLCD dataset is annotated with three informative labels, viz. counseling strategies, politeness, and empathy. Lastly, we train the Po-Em-MHLCDS in a reinforcement learning framework by designing an efficient and effective reward function to reinforce correct counseling strategy, politeness and empathy while maintaining contextual-coherence and non-repetitiveness in the generated responses. Our extensive automatic and human evaluation demonstrate the strength of the proposed system. Codes and Data can be accessed at https://www.iitp.ac.in/ ai-nlp-ml/resources.html#MHLCD or https://github.com/Mishrakshitij/Po-Em-MHLCDS}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mishra, Kshitij and Priya, Priyanshu and Ekbal, Asif}, year={2023}, month={Jun.}, pages={14408-14416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26685/26457", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26685", + "pdf_size": 211715, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2906314460791048937&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iitp.ac.in;iitp.ac.in;iitp.ac.in", + "email": "iitp.ac.in;iitp.ac.in;iitp.ac.in", + "github": "https://github.com/Mishrakshitij/Po-Em-MHLCDS", + "project": "https://www.iitp.ac.in/ ai-nlp-ml/resources.html#MHLCD", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Patna", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitp.ac.in", + "aff_unique_abbr": "IIT Patna", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Patna", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25643", + "title": "Heterogeneous Graph Learning for Multi-Modal Medical Data Analysis", + "track": "main", + "status": "Technical", + "abstract": "Routine clinical visits of a patient produce not only image data, but also non-image data containing clinical information regarding the patient, i.e., medical data is multi-modal in nature. Such heterogeneous modalities offer different and complementary perspectives on the same patient, resulting in more accurate clinical decisions when they are properly combined. However, despite its significance, how to effectively fuse the multi-modal medical data into a unified framework has received relatively little attention. In this paper, we propose an effective graph-based framework called HetMed (Heterogeneous Graph Learning for Multi-modal Medical Data Analysis) for fusing the multi-modal medical data.\nSpecifically, we construct a multiplex network that incorporates multiple types of non-image features of patients to capture the complex relationship between patients in a systematic way, which leads to more accurate clinical decisions. Extensive experiments on various real-world datasets demonstrate the superiority and practicality of HetMed. The source code for HetMed is available at https://github.com/Sein-Kim/Multimodal-Medical.", + "primary_area": "domain s of application", + "author": "Sein Kim; Namkyeong Lee; Junseok Lee; Dongmin Hyun; Chanyoung Park", + "authorids": "", + "aff": "Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Institute of Artifical Intelligence, POSTECH, Pohang, Republic of Korea; Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea+Graduate School of Artificial Intelligence, KAIST, Daejeon, Republic of Korea", + "bibtex": "@article{Kim_Lee_Lee_Hyun_Park_2023, title={Heterogeneous Graph Learning for Multi-Modal Medical Data Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25643}, DOI={10.1609/aaai.v37i4.25643}, abstractNote={Routine clinical visits of a patient produce not only image data, but also non-image data containing clinical information regarding the patient, i.e., medical data is multi-modal in nature. Such heterogeneous modalities offer different and complementary perspectives on the same patient, resulting in more accurate clinical decisions when they are properly combined. However, despite its significance, how to effectively fuse the multi-modal medical data into a unified framework has received relatively little attention. In this paper, we propose an effective graph-based framework called HetMed (Heterogeneous Graph Learning for Multi-modal Medical Data Analysis) for fusing the multi-modal medical data.\nSpecifically, we construct a multiplex network that incorporates multiple types of non-image features of patients to capture the complex relationship between patients in a systematic way, which leads to more accurate clinical decisions. Extensive experiments on various real-world datasets demonstrate the superiority and practicality of HetMed. The source code for HetMed is available at https://github.com/Sein-Kim/Multimodal-Medical.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Sein and Lee, Namkyeong and Lee, Junseok and Hyun, Dongmin and Park, Chanyoung}, year={2023}, month={Jun.}, pages={5141-5150} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25643/25415", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25643", + "pdf_size": 573949, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4737217715770676259&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;postech.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;postech.ac.kr;kaist.ac.kr", + "github": "https://github.com/Sein-Kim/Multimodal-Medical", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+0", + "aff_unique_norm": "KAIST;POSTECH", + "aff_unique_dep": "Dept. of Industrial and Systems Engineering;Institute of Artifical Intelligence", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.postech.ac.kr", + "aff_unique_abbr": "KAIST;POSTECH", + "aff_campus_unique_index": "0;0;0;1;0+0", + "aff_campus_unique": "Daejeon;Pohang", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26192", + "title": "Heterogeneous Graph Masked Autoencoders", + "track": "main", + "status": "Technical", + "abstract": "Generative self-supervised learning (SSL), especially masked autoencoders, has become one of the most exciting learning paradigms and has shown great potential in handling graph data. However, real-world graphs are always heterogeneous, which poses three critical challenges that existing methods ignore: 1) how to capture complex graph structure? 2) how to incorporate various node attributes? and 3) how to encode different node positions? In light of this, we study the problem of generative SSL on heterogeneous graphs and propose HGMAE, a novel heterogeneous graph masked autoencoder model to address these challenges. HGMAE captures comprehensive graph information via two innovative masking techniques and three unique training strategies. In particular, we first develop metapath masking and adaptive attribute masking with dynamic mask rate to enable effective and stable learning on heterogeneous graphs. We then design several training strategies including metapath-based edge reconstruction to adopt complex structural information, target attribute restoration to incorporate various node attributes, and positional feature prediction to encode node positional information. Extensive experiments demonstrate that HGMAE outperforms both contrastive and generative state-of-the-art baselines on several tasks across multiple datasets. Codes are available at https://github.com/meettyj/HGMAE.", + "primary_area": "machine learning iii", + "author": "Yijun Tian; Kaiwen Dong; Chunhui Zhang; Chuxu Zhang; Nitesh V. Chawla", + "authorids": "", + "aff": "Department of Computer Science and Engineering, University of Notre Dame, USA+Lucy Family Institute for Data and Society, University of Notre Dame, USA; Department of Computer Science and Engineering, University of Notre Dame, USA+Lucy Family Institute for Data and Society, University of Notre Dame, USA; Department of Computer Science, Brandeis University, USA; Department of Computer Science, Brandeis University, USA; Department of Computer Science and Engineering, University of Notre Dame, USA+Lucy Family Institute for Data and Society, University of Notre Dame, USA", + "bibtex": "@article{Tian_Dong_Zhang_Zhang_Chawla_2023, title={Heterogeneous Graph Masked Autoencoders}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26192}, DOI={10.1609/aaai.v37i8.26192}, abstractNote={Generative self-supervised learning (SSL), especially masked autoencoders, has become one of the most exciting learning paradigms and has shown great potential in handling graph data. However, real-world graphs are always heterogeneous, which poses three critical challenges that existing methods ignore: 1) how to capture complex graph structure? 2) how to incorporate various node attributes? and 3) how to encode different node positions? In light of this, we study the problem of generative SSL on heterogeneous graphs and propose HGMAE, a novel heterogeneous graph masked autoencoder model to address these challenges. HGMAE captures comprehensive graph information via two innovative masking techniques and three unique training strategies. In particular, we first develop metapath masking and adaptive attribute masking with dynamic mask rate to enable effective and stable learning on heterogeneous graphs. We then design several training strategies including metapath-based edge reconstruction to adopt complex structural information, target attribute restoration to incorporate various node attributes, and positional feature prediction to encode node positional information. Extensive experiments demonstrate that HGMAE outperforms both contrastive and generative state-of-the-art baselines on several tasks across multiple datasets. Codes are available at https://github.com/meettyj/HGMAE.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Yijun and Dong, Kaiwen and Zhang, Chunhui and Zhang, Chuxu and Chawla, Nitesh V.}, year={2023}, month={Jun.}, pages={9997-10005} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26192/25964", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26192", + "pdf_size": 2695646, + "gs_citation": 104, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8047219038479979567&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "nd.edu;nd.edu;brandeis.edu;brandeis.edu;nd.edu", + "email": "nd.edu;nd.edu;brandeis.edu;brandeis.edu;nd.edu", + "github": "https://github.com/meettyj/HGMAE", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1;1;0+0", + "aff_unique_norm": "University of Notre Dame;Brandeis University", + "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science", + "aff_unique_url": "https://www.nd.edu;https://www.brandeis.edu", + "aff_unique_abbr": "Notre Dame;Brandeis", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25625", + "title": "Heterogeneous Region Embedding with Prompt Learning", + "track": "main", + "status": "Technical", + "abstract": "The prevalence of region-based urban data has opened new possibilities for exploring correlations among regions to improve urban planning and smart-city solutions. Region embedding, which plays a critical role in this endeavor, faces significant challenges related to the varying nature of city data and the effectiveness of downstream applications. In this paper, we propose a novel framework, HREP (Heterogeneous Region Embedding with Prompt learning), which addresses both intra-region and inter-region correlations through two key modules: Heterogeneous Region Embedding (HRE) and prompt learning for different downstream tasks. The HRE module constructs a heterogeneous region graph based on three categories of data, capturing inter-region contexts such as human mobility and geographic neighbors, and intraregion contexts such as POI (Point-of-Interest) information. We use relation-aware graph embedding to learn region and relation embeddings of edge types, and introduce selfattention to capture global correlations among regions. Additionally, we develop an attention-based fusion module to integrate shared information among different types of correlations. To enhance the effectiveness of region embedding in downstream tasks, we incorporate prompt learning, specifically prefix-tuning, which guides the learning of downstream tasks and results in better prediction performance. Our experiment results on real-world datasets demonstrate that our proposed model outperforms state-of-the-art methods.", + "primary_area": "data mining and knowledge management", + "author": "Silin Zhou; Dan He; Lisi Chen; Shuo Shang; Peng Han", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; The University of Queensland, Australia; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China", + "bibtex": "@article{Zhou_He_Chen_Shang_Han_2023, title={Heterogeneous Region Embedding with Prompt Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25625}, DOI={10.1609/aaai.v37i4.25625}, abstractNote={The prevalence of region-based urban data has opened new possibilities for exploring correlations among regions to improve urban planning and smart-city solutions. Region embedding, which plays a critical role in this endeavor, faces significant challenges related to the varying nature of city data and the effectiveness of downstream applications. In this paper, we propose a novel framework, HREP (Heterogeneous Region Embedding with Prompt learning), which addresses both intra-region and inter-region correlations through two key modules: Heterogeneous Region Embedding (HRE) and prompt learning for different downstream tasks. The HRE module constructs a heterogeneous region graph based on three categories of data, capturing inter-region contexts such as human mobility and geographic neighbors, and intraregion contexts such as POI (Point-of-Interest) information. We use relation-aware graph embedding to learn region and relation embeddings of edge types, and introduce selfattention to capture global correlations among regions. Additionally, we develop an attention-based fusion module to integrate shared information among different types of correlations. To enhance the effectiveness of region embedding in downstream tasks, we incorporate prompt learning, specifically prefix-tuning, which guides the learning of downstream tasks and results in better prediction performance. Our experiment results on real-world datasets demonstrate that our proposed model outperforms state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Silin and He, Dan and Chen, Lisi and Shang, Shuo and Han, Peng}, year={2023}, month={Jun.}, pages={4981-4989} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25625/25397", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25625", + "pdf_size": 239763, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5992617140860486201&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;uq.edu.au;e.ntu.edu.sg;gmail.com;hotmail.com", + "email": "gmail.com;uq.edu.au;e.ntu.edu.sg;gmail.com;hotmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;The University of Queensland", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.uq.edu.au", + "aff_unique_abbr": "UESTC;UQ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26544", + "title": "Heterogeneous-Branch Collaborative Learning for Dialogue Generation", + "track": "main", + "status": "Technical", + "abstract": "With the development of deep learning, advanced dialogue generation methods usually require a greater amount of computational resources. One promising approach to obtaining a high-performance and lightweight model is knowledge distillation, which relies heavily on the pre-trained powerful teacher. Collaborative learning, also known as online knowledge distillation, is an effective way to conduct one-stage group distillation in the absence of a well-trained large teacher model. However, previous work has a severe branch homogeneity problem due to the same training objective and the independent identical training sets. To alleviate this problem, we consider the dialogue attributes in the training of network branches. Each branch learns the attribute-related features based on the selected subset. Furthermore, we propose a dual group-based knowledge distillation method, consisting of positive distillation and negative distillation, to further diversify the features of different branches in a steadily and interpretable way. The proposed approach significantly improves branch heterogeneity and outperforms state-of-the-art collaborative learning methods on two widely used open-domain dialogue datasets.", + "primary_area": "speech natural language processing", + "author": "Yiwei Li; Shaoxiong Feng; Bin Sun; Kan Li", + "authorids": "", + "aff": "School of Computer Science, Beijing Institute of Technology; School of Computer Science, Beijing Institute of Technology; School of Computer Science, Beijing Institute of Technology; School of Computer Science, Beijing Institute of Technology", + "bibtex": "@article{Li_Feng_Sun_Li_2023, title={Heterogeneous-Branch Collaborative Learning for Dialogue Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26544}, DOI={10.1609/aaai.v37i11.26544}, abstractNote={With the development of deep learning, advanced dialogue generation methods usually require a greater amount of computational resources. One promising approach to obtaining a high-performance and lightweight model is knowledge distillation, which relies heavily on the pre-trained powerful teacher. Collaborative learning, also known as online knowledge distillation, is an effective way to conduct one-stage group distillation in the absence of a well-trained large teacher model. However, previous work has a severe branch homogeneity problem due to the same training objective and the independent identical training sets. To alleviate this problem, we consider the dialogue attributes in the training of network branches. Each branch learns the attribute-related features based on the selected subset. Furthermore, we propose a dual group-based knowledge distillation method, consisting of positive distillation and negative distillation, to further diversify the features of different branches in a steadily and interpretable way. The proposed approach significantly improves branch heterogeneity and outperforms state-of-the-art collaborative learning methods on two widely used open-domain dialogue datasets.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yiwei and Feng, Shaoxiong and Sun, Bin and Li, Kan}, year={2023}, month={Jun.}, pages={13148-13156} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26544/26316", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26544", + "pdf_size": 187714, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2013714507598047693&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beijing Institute of Technology", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.bit.edu.cn", + "aff_unique_abbr": "BIT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26409", + "title": "Heuristic Search for Multi-Objective Probabilistic Planning", + "track": "main", + "status": "Technical", + "abstract": "Heuristic search is a powerful approach that has successfully been applied to a broad class of planning problems, including classical planning, multi-objective planning, and probabilistic planning modelled as a stochastic shortest path (SSP) problem. Here, we extend the reach of heuristic search to a more expressive class of problems, namely multi-objective stochastic shortest paths (MOSSPs), which require computing a coverage set of non-dominated policies. We design new heuristic search algorithms MOLAO* and MOLRTDP, which extend well-known SSP algorithms to the multi-objective case. We further construct a spectrum of domain-independent heuristic functions differing in their ability to take into account the stochastic and multi-objective features of the problem to guide the search. Our experiments demonstrate the benefits of these algorithms and the relative merits of the heuristics.", + "primary_area": "planning routing and scheduling", + "author": "Dillon Z. Chen; Felipe Trevizan; Sylvie Thi\u00e9baux", + "authorids": "", + "aff": "School of Computing, The Australian National University; School of Computing, The Australian National University; School of Computing, The Australian National University + LAAS-CNRS, ANITI, Universit\u00e9 de Toulouse", + "bibtex": "@article{Chen_Trevizan_Thi\u00e9baux_2023, title={Heuristic Search for Multi-Objective Probabilistic Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26409}, DOI={10.1609/aaai.v37i10.26409}, abstractNote={Heuristic search is a powerful approach that has successfully been applied to a broad class of planning problems, including classical planning, multi-objective planning, and probabilistic planning modelled as a stochastic shortest path (SSP) problem. Here, we extend the reach of heuristic search to a more expressive class of problems, namely multi-objective stochastic shortest paths (MOSSPs), which require computing a coverage set of non-dominated policies. We design new heuristic search algorithms MOLAO* and MOLRTDP, which extend well-known SSP algorithms to the multi-objective case. We further construct a spectrum of domain-independent heuristic functions differing in their ability to take into account the stochastic and multi-objective features of the problem to guide the search. Our experiments demonstrate the benefits of these algorithms and the relative merits of the heuristics.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Dillon Z. and Trevizan, Felipe and Thi\u00e9baux, Sylvie}, year={2023}, month={Jun.}, pages={11945-11954} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26409/26181", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26409", + "pdf_size": 297017, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2604137217729616315&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "anu.edu.au;anu.edu.au;anu.edu.au", + "email": "anu.edu.au;anu.edu.au;anu.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "The Australian National University;LAAS-CNRS", + "aff_unique_dep": "School of Computing;", + "aff_unique_url": "https://www.anu.edu.au;https://www.laas.fr", + "aff_unique_abbr": "ANU;LAAS-CNRS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "Australia;France" + }, + { + "id": "article-26743", + "title": "Heuristic Search in Dual Space for Constrained Fixed-Horizon POMDPs with Durative Actions", + "track": "aaai special track", + "status": "Technical", + "abstract": "The Partially Observable Markov Decision Process (POMDP) is widely used in probabilistic planning for stochastic domains. However, current extensions, such as constrained and chance-constrained POMDPs, have limitations in modeling real-world planning problems because they assume that all actions have a fixed duration. To address this issue, we propose a unified model that encompasses durative POMDP and its constrained extensions. To solve the durative POMDP and its constrained extensions, we first convert them into an Integer Linear Programming (ILP) formulation. This approach leverages existing solvers in the ILP literature and provides a foundation for solving these problems. We then introduce a heuristic search approach that prunes the search space, which is guided by solving successive partial ILP programs. Our empirical evaluation results show that our approach outperforms the current state-of-the-art fixed-horizon chance-constrained POMDP solver.", + "primary_area": "safe and robust ai", + "author": "Majid Khonji; Duoaa Khalifa", + "authorids": "", + "aff": "Khalifa University; Khalifa University", + "bibtex": "@article{Khonji_Khalifa_2023, title={Heuristic Search in Dual Space for Constrained Fixed-Horizon POMDPs with Durative Actions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26743}, DOI={10.1609/aaai.v37i12.26743}, abstractNote={The Partially Observable Markov Decision Process (POMDP) is widely used in probabilistic planning for stochastic domains. However, current extensions, such as constrained and chance-constrained POMDPs, have limitations in modeling real-world planning problems because they assume that all actions have a fixed duration. To address this issue, we propose a unified model that encompasses durative POMDP and its constrained extensions. To solve the durative POMDP and its constrained extensions, we first convert them into an Integer Linear Programming (ILP) formulation. This approach leverages existing solvers in the ILP literature and provides a foundation for solving these problems. We then introduce a heuristic search approach that prunes the search space, which is guided by solving successive partial ILP programs. Our empirical evaluation results show that our approach outperforms the current state-of-the-art fixed-horizon chance-constrained POMDP solver.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Khonji, Majid and Khalifa, Duoaa}, year={2023}, month={Jun.}, pages={14927-14936} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26743/26515", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26743", + "pdf_size": 313662, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:EkpytPAK2ioJ:scholar.google.com/&scioq=Heuristic+Search+in+Dual+Space+for+Constrained+Fixed-Horizon+POMDPs+with+Durative+Actions&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "ku.ac.ae;ku.ac.ae", + "email": "ku.ac.ae;ku.ac.ae", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Khalifa University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.khalifa.edu", + "aff_unique_abbr": "KU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Arab Emirates" + }, + { + "id": "article-26937", + "title": "Hey, Siri! Why Are You Biased against Women? (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The intersection of pervasive technology and verbal communication has resulted in the creation of Automatic Speech Recognition Systems (ASRs), which automate the conversion of spontaneous speech into texts. ASR enables human-computer interactions through speech and is rapidly integrated into our daily lives. However, the research studies on current ASR technologies have reported unfulfilled social inclusivity and accentuated biases and stereotypes towards minorities. In this work, we provide a review of examples and evidence to demonstrate preexisting sexist behavior in ASR systems through a systematic review of research literature over the past five years. For each article, we also provide the ASR technology used, highlight specific instances of reported bias, discuss the impact of this bias on the female community, and suggest possible methods of mitigation. We believe this paper will provide insights into the harm that unchecked AI-powered technologies can have on a community by contributing to the growing body of research on this topic and underscoring the need for technological inclusivity for all demographics, especially women.", + "primary_area": "", + "author": "Surakshya Aryal; Mikel K. Ngueajio; Saurav Keshari Aryal; Gloria Washington", + "authorids": "", + "aff": "Howard University; Howard University; Howard University; Howard University", + "bibtex": "@article{Aryal_Ngueajio_Aryal_Washington_2024, title={Hey, Siri! Why Are You Biased against Women? (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26937}, DOI={10.1609/aaai.v37i13.26937}, abstractNote={The intersection of pervasive technology and verbal communication has resulted in the creation of Automatic Speech Recognition Systems (ASRs), which automate the conversion of spontaneous speech into texts. ASR enables human-computer interactions through speech and is rapidly integrated into our daily lives. However, the research studies on current ASR technologies have reported unfulfilled social inclusivity and accentuated biases and stereotypes towards minorities. In this work, we provide a review of examples and evidence to demonstrate preexisting sexist behavior in ASR systems through a systematic review of research literature over the past five years. For each article, we also provide the ASR technology used, highlight specific instances of reported bias, discuss the impact of this bias on the female community, and suggest possible methods of mitigation. We believe this paper will provide insights into the harm that unchecked AI-powered technologies can have on a community by contributing to the growing body of research on this topic and underscoring the need for technological inclusivity for all demographics, especially women.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aryal, Surakshya and Ngueajio, Mikel K. and Aryal, Saurav Keshari and Washington, Gloria}, year={2024}, month={Jul.}, pages={16154-16155} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26937/26709", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26937", + "pdf_size": 61767, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7637810406733638685&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "bison.howard.edu;gmail.com;howard.edu;howard.edu", + "email": "bison.howard.edu;gmail.com;howard.edu;howard.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Howard University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.howard.edu", + "aff_unique_abbr": "HU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25072", + "title": "Hierarchical ConViT with Attention-Based Relational Reasoner for Visual Analogical Reasoning", + "track": "main", + "status": "Technical", + "abstract": "Raven\u2019s Progressive Matrices (RPMs) have been widely used to evaluate the visual reasoning ability of humans. To tackle the challenges of visual perception and logic reasoning on RPMs, we propose a Hierarchical ConViT with Attention-based Relational Reasoner (HCV-ARR). Traditional solution methods often apply relatively shallow convolution networks to visually perceive shape patterns in RPM images, which may not fully model the long-range dependencies of complex pattern combinations in RPMs. The proposed ConViT consists of a convolutional block to capture the low-level attributes of visual patterns, and a transformer block to capture the high-level image semantics such as pattern formations. Furthermore, the proposed hierarchical ConViT captures visual features from multiple receptive fields, where the shallow layers focus on the image fine details while the deeper layers focus on the image semantics. To better model the underlying reasoning rules embedded in RPM images, an Attention-based Relational Reasoner (ARR) is proposed to establish the underlying relations among images. The proposed ARR well exploits the hidden relations among question images through the developed element-wise attentive reasoner. Experimental results on three RPM datasets demonstrate that the proposed HCV-ARR achieves a significant performance gain compared with the state-of-the-art models. The source code is available at: https://github.com/wentaoheunnc/HCV-ARR.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Wentao He; Jialu Zhang; Jianfeng Ren; Ruibin Bai; Xudong Jiang", + "authorids": "", + "aff": "The Digital Port Technologies Lab, School of Computer Science, University of Nottingham Ningbo China; The Digital Port Technologies Lab, School of Computer Science, University of Nottingham Ningbo China + Nottingham Ningbo China Beacons of Excellence Research and Innovation Institute, University of Nottingham Ningbo China; The Digital Port Technologies Lab, School of Computer Science, University of Nottingham Ningbo China + Nottingham Ningbo China Beacons of Excellence Research and Innovation Institute, University of Nottingham Ningbo China; The Digital Port Technologies Lab, School of Computer Science, University of Nottingham Ningbo China + Nottingham Ningbo China Beacons of Excellence Research and Innovation Institute, University of Nottingham Ningbo China; School of Electrical & Electronic Engineering, Nanyang Technological University", + "bibtex": "@article{He_Zhang_Ren_Bai_Jiang_2023, title={Hierarchical ConViT with Attention-Based Relational Reasoner for Visual Analogical Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25072}, DOI={10.1609/aaai.v37i1.25072}, abstractNote={Raven\u2019s Progressive Matrices (RPMs) have been widely used to evaluate the visual reasoning ability of humans. To tackle the challenges of visual perception and logic reasoning on RPMs, we propose a Hierarchical ConViT with Attention-based Relational Reasoner (HCV-ARR). Traditional solution methods often apply relatively shallow convolution networks to visually perceive shape patterns in RPM images, which may not fully model the long-range dependencies of complex pattern combinations in RPMs. The proposed ConViT consists of a convolutional block to capture the low-level attributes of visual patterns, and a transformer block to capture the high-level image semantics such as pattern formations. Furthermore, the proposed hierarchical ConViT captures visual features from multiple receptive fields, where the shallow layers focus on the image fine details while the deeper layers focus on the image semantics. To better model the underlying reasoning rules embedded in RPM images, an Attention-based Relational Reasoner (ARR) is proposed to establish the underlying relations among images. The proposed ARR well exploits the hidden relations among question images through the developed element-wise attentive reasoner. Experimental results on three RPM datasets demonstrate that the proposed HCV-ARR achieves a significant performance gain compared with the state-of-the-art models. The source code is available at: https://github.com/wentaoheunnc/HCV-ARR.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Wentao and Zhang, Jialu and Ren, Jianfeng and Bai, Ruibin and Jiang, Xudong}, year={2023}, month={Jun.}, pages={22-30} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25072/24844", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25072", + "pdf_size": 911907, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7429669913174999208&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;ntu.edu.sg", + "email": "nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;ntu.edu.sg", + "github": "https://github.com/wentaoheunnc/HCV-ARR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0;0+0;0+0;1", + "aff_unique_norm": "University of Nottingham Ningbo China;Nanyang Technological University", + "aff_unique_dep": "School of Computer Science;School of Electrical & Electronic Engineering", + "aff_unique_url": "https://www.nottingham.edu.cn;https://www.ntu.edu.sg", + "aff_unique_abbr": "UNNC;NTU", + "aff_campus_unique_index": "0;0+0;0+0;0+0", + "aff_campus_unique": "Ningbo;", + "aff_country_unique_index": "0;0+0;0+0;0+0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25451", + "title": "Hierarchical Consistent Contrastive Learning for Skeleton-Based Action Recognition with Growing Augmentations", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning has been proven beneficial for self-supervised skeleton-based action recognition. Most contrastive learning methods utilize carefully designed augmentations to generate different movement patterns of skeletons for the same semantics. However, it is still a pending issue to apply strong augmentations, which distort the images/skeletons\u2019 structures and cause semantic loss, due to their resulting unstable training. In this paper, we investigate the potential of adopting strong augmentations and propose a general hierarchical consistent contrastive learning framework (HiCLR) for skeleton-based action recognition. Specifically, we first design a gradual growing augmentation policy to generate multiple ordered positive pairs, which guide to achieve the consistency of the learned representation from different views. Then, an asymmetric loss is proposed to enforce the hierarchical consistency via a directional clustering operation in the feature space, pulling the representations from strongly augmented views closer to those from weakly augmented views for better generalizability. Meanwhile, we propose and evaluate three kinds of strong augmentations for 3D skeletons to demonstrate the effectiveness of our method. Extensive experiments show that HiCLR outperforms the state-of-the-art methods notably on three large-scale datasets, i.e., NTU60, NTU120, and PKUMMD. Our project is publicly available at: https://jhang2020.github.io/Projects/HiCLR/HiCLR.html.", + "primary_area": "computer vision iii", + "author": "Jiahang Zhang; Lilang Lin; Jiaying Liu", + "authorids": "", + "aff": "Wangxuan Institute of Computer Technology, Peking University, Beijing, China; Wangxuan Institute of Computer Technology, Peking University, Beijing, China; Wangxuan Institute of Computer Technology, Peking University, Beijing, China", + "bibtex": "@article{Zhang_Lin_Liu_2023, title={Hierarchical Consistent Contrastive Learning for Skeleton-Based Action Recognition with Growing Augmentations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25451}, DOI={10.1609/aaai.v37i3.25451}, abstractNote={Contrastive learning has been proven beneficial for self-supervised skeleton-based action recognition. Most contrastive learning methods utilize carefully designed augmentations to generate different movement patterns of skeletons for the same semantics. However, it is still a pending issue to apply strong augmentations, which distort the images/skeletons\u2019 structures and cause semantic loss, due to their resulting unstable training. In this paper, we investigate the potential of adopting strong augmentations and propose a general hierarchical consistent contrastive learning framework (HiCLR) for skeleton-based action recognition. Specifically, we first design a gradual growing augmentation policy to generate multiple ordered positive pairs, which guide to achieve the consistency of the learned representation from different views. Then, an asymmetric loss is proposed to enforce the hierarchical consistency via a directional clustering operation in the feature space, pulling the representations from strongly augmented views closer to those from weakly augmented views for better generalizability. Meanwhile, we propose and evaluate three kinds of strong augmentations for 3D skeletons to demonstrate the effectiveness of our method. Extensive experiments show that HiCLR outperforms the state-of-the-art methods notably on three large-scale datasets, i.e., NTU60, NTU120, and PKUMMD. Our project is publicly available at: https://jhang2020.github.io/Projects/HiCLR/HiCLR.html.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jiahang and Lin, Lilang and Liu, Jiaying}, year={2023}, month={Jun.}, pages={3427-3435} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25451/25223", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25451", + "pdf_size": 655092, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12911973028706216165&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "https://jhang2020.github.io/Projects/HiCLR/HiCLR.html", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Wangxuan Institute of Computer Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25127", + "title": "Hierarchical Contrast for Unsupervised Skeleton-Based Action Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "This paper targets unsupervised skeleton-based action representation learning and proposes a new Hierarchical Contrast (HiCo) framework. Different from the existing contrastive-based solutions that typically represent an input skeleton sequence into instance-level features and perform contrast holistically, our proposed HiCo represents the input into multiple-level features and performs contrast in a hierarchical manner. Specifically, given a human skeleton sequence, we represent it into multiple feature vectors of different granularities from both temporal and spatial domains via sequence-to-sequence (S2S) encoders and unified downsampling modules. Besides, the hierarchical contrast is conducted in terms of four levels: instance level, domain level, clip level, and part level. Moreover, HiCo is orthogonal to the S2S encoder, which allows us to flexibly embrace state-of-the-art S2S encoders. Extensive experiments on four datasets, i.e., NTU-60, NTU-120, PKU-I and PKU-II, show that HiCo achieves a new state-of-the-art for unsupervised skeleton-based action representation learning in two downstream tasks including action recognition and retrieval, and its learned action representation is of good transferability. Besides, we also show that our framework is effective for semi-supervised skeleton-based action recognition. Our code is available at https://github.com/HuiGuanLab/HiCo.", + "primary_area": "computer vision i", + "author": "Jianfeng Dong; Shengkai Sun; Zhonglin Liu; Shujie Chen; Baolong Liu; Xun Wang", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang Gongshang University, China+Zhejiang Key Lab of E-Commerce, China; College of Computer Science and Technology, Zhejiang Gongshang University, China; College of Computer Science and Technology, Zhejiang Gongshang University, China; College of Computer Science and Technology, Zhejiang Gongshang University, China+Zhejiang Key Lab of E-Commerce, China; College of Computer Science and Technology, Zhejiang Gongshang University, China+Zhejiang Key Lab of E-Commerce, China; College of Computer Science and Technology, Zhejiang Gongshang University, China+Zhejiang Key Lab of E-Commerce, China", + "bibtex": "@article{Dong_Sun_Liu_Chen_Liu_Wang_2023, title={Hierarchical Contrast for Unsupervised Skeleton-Based Action Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25127}, DOI={10.1609/aaai.v37i1.25127}, abstractNote={This paper targets unsupervised skeleton-based action representation learning and proposes a new Hierarchical Contrast (HiCo) framework. Different from the existing contrastive-based solutions that typically represent an input skeleton sequence into instance-level features and perform contrast holistically, our proposed HiCo represents the input into multiple-level features and performs contrast in a hierarchical manner. Specifically, given a human skeleton sequence, we represent it into multiple feature vectors of different granularities from both temporal and spatial domains via sequence-to-sequence (S2S) encoders and unified downsampling modules. Besides, the hierarchical contrast is conducted in terms of four levels: instance level, domain level, clip level, and part level. Moreover, HiCo is orthogonal to the S2S encoder, which allows us to flexibly embrace state-of-the-art S2S encoders. Extensive experiments on four datasets, i.e., NTU-60, NTU-120, PKU-I and PKU-II, show that HiCo achieves a new state-of-the-art for unsupervised skeleton-based action representation learning in two downstream tasks including action recognition and retrieval, and its learned action representation is of good transferability. Besides, we also show that our framework is effective for semi-supervised skeleton-based action recognition. Our code is available at https://github.com/HuiGuanLab/HiCo.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Jianfeng and Sun, Shengkai and Liu, Zhonglin and Chen, Shujie and Liu, Baolong and Wang, Xun}, year={2023}, month={Jun.}, pages={525-533} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25127/24899", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25127", + "pdf_size": 2132279, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9095839310062060587&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/HuiGuanLab/HiCo", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;0+1;0+1;0+1", + "aff_unique_norm": "Zhejiang Gongshang University;Zhejiang Key Lab of E-Commerce", + "aff_unique_dep": "College of Computer Science and Technology;E-Commerce", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26211", + "title": "Hierarchical Contrastive Learning for Temporal Point Processes", + "track": "main", + "status": "Technical", + "abstract": "As an important sequential model, the temporal point process (TPP) plays a central role in real-world sequence modeling and analysis, whose learning is often based on the maximum likelihood estimation (MLE). However, due to imperfect observations, such as incomplete and sparse sequences that are common in practice, the MLE of TPP models often suffers from overfitting and leads to unsatisfactory generalization power. In this work, we develop a novel hierarchical contrastive (HCL) learning method for temporal point processes, which provides a new regularizer of MLE. In principle, our HCL considers the noise contrastive estimation (NCE) problem at the event-level and at the sequence-level jointly. Given a sequence, the event-level NCE maximizes the probability of each observed event given its history while penalizing the conditional probabilities of the unobserved events. At the same time, we generate positive and negative event sequences from the observed sequence and maximize the discrepancy between their likelihoods through the sequence-level NCE. Instead of using time-consuming simulation methods, we generate the positive and negative sequences via a simple but efficient model-guided thinning process. Experimental results show that the MLE method assisted by the HCL regularizer outperforms classic MLE and other contrastive learning methods in learning various TPP models consistently. The code is available at https://github.com/qingmeiwangdaily/HCL_TPP.", + "primary_area": "machine learning iii", + "author": "Qingmei Wang; Minjie Cheng; Shen Yuan; Hongteng Xu", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China + Beijing Key Laboratory of Big Data Management and Analysis Methods", + "bibtex": "@article{Wang_Cheng_Yuan_Xu_2023, title={Hierarchical Contrastive Learning for Temporal Point Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26211}, DOI={10.1609/aaai.v37i8.26211}, abstractNote={As an important sequential model, the temporal point process (TPP) plays a central role in real-world sequence modeling and analysis, whose learning is often based on the maximum likelihood estimation (MLE). However, due to imperfect observations, such as incomplete and sparse sequences that are common in practice, the MLE of TPP models often suffers from overfitting and leads to unsatisfactory generalization power. In this work, we develop a novel hierarchical contrastive (HCL) learning method for temporal point processes, which provides a new regularizer of MLE. In principle, our HCL considers the noise contrastive estimation (NCE) problem at the event-level and at the sequence-level jointly. Given a sequence, the event-level NCE maximizes the probability of each observed event given its history while penalizing the conditional probabilities of the unobserved events. At the same time, we generate positive and negative event sequences from the observed sequence and maximize the discrepancy between their likelihoods through the sequence-level NCE. Instead of using time-consuming simulation methods, we generate the positive and negative sequences via a simple but efficient model-guided thinning process. Experimental results show that the MLE method assisted by the HCL regularizer outperforms classic MLE and other contrastive learning methods in learning various TPP models consistently. The code is available at https://github.com/qingmeiwangdaily/HCL_TPP.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Qingmei and Cheng, Minjie and Yuan, Shen and Xu, Hongteng}, year={2023}, month={Jun.}, pages={10166-10174} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26211/25983", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26211", + "pdf_size": 237974, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=40303268422191053&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/qingmeiwangdaily/HCL TPP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Big Data Management and Analysis", + "aff_unique_url": "http://www.ruc.edu.cn;", + "aff_unique_abbr": "RUC;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26576", + "title": "Hierarchical Event Grounding", + "track": "main", + "status": "Technical", + "abstract": "Event grounding aims at linking mention references in text corpora to events from a knowledge base (KB). Previous work on this task focused primarily on linking to a single KB event, thereby overlooking the hierarchical aspects of events. Events in documents are typically described at various levels of spatio-temporal granularity. These hierarchical relations are utilized in downstream tasks of narrative understanding and schema construction. In this work, we present an extension to the event grounding task that requires tackling hierarchical event structures from the KB. Our proposed task involves linking a mention reference to a set of event labels from a subevent hierarchy in the KB. We propose a retrieval methodology that leverages event hierarchy through an auxiliary hierarchical loss. On an automatically created multilingual dataset from Wikipedia and Wikidata, our experiments demonstrate the effectiveness of the hierarchical loss against retrieve and re-rank baselines. Furthermore, we demonstrate the systems' ability to aid hierarchical discovery among unseen events. Code is available at https://github.com/JefferyO/Hierarchical-Event-Grounding", + "primary_area": "speech natural language processing", + "author": "Jiefu Ou; Adithya Pratapa; Rishubh Gupta; Teruko Mitamura", + "authorids": "", + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "bibtex": "@article{Ou_Pratapa_Gupta_Mitamura_2023, title={Hierarchical Event Grounding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26576}, DOI={10.1609/aaai.v37i11.26576}, abstractNote={Event grounding aims at linking mention references in text corpora to events from a knowledge base (KB). Previous work on this task focused primarily on linking to a single KB event, thereby overlooking the hierarchical aspects of events. Events in documents are typically described at various levels of spatio-temporal granularity. These hierarchical relations are utilized in downstream tasks of narrative understanding and schema construction. In this work, we present an extension to the event grounding task that requires tackling hierarchical event structures from the KB. Our proposed task involves linking a mention reference to a set of event labels from a subevent hierarchy in the KB. We propose a retrieval methodology that leverages event hierarchy through an auxiliary hierarchical loss. On an automatically created multilingual dataset from Wikipedia and Wikidata, our experiments demonstrate the effectiveness of the hierarchical loss against retrieve and re-rank baselines. Furthermore, we demonstrate the systems\u2019 ability to aid hierarchical discovery among unseen events. Code is available at https://github.com/JefferyO/Hierarchical-Event-Grounding}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ou, Jiefu and Pratapa, Adithya and Gupta, Rishubh and Mitamura, Teruko}, year={2023}, month={Jun.}, pages={13437-13445} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26576/26348", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26576", + "pdf_size": 131512, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13904586395694550555&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "https://github.com/JefferyO/Hierarchical-Event-Grounding", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26387", + "title": "Hierarchical Mean-Field Deep Reinforcement Learning for Large-Scale Multiagent Systems", + "track": "main", + "status": "Technical", + "abstract": "Learning for efficient coordination in large-scale multiagent systems suffers from the problem of the curse of dimensionality due to the exponential growth of agent interactions. Mean-Field (MF)-based methods address this issue by transforming the interactions within the whole system into a single agent played with the average effect of its neighbors. However, considering the neighbors merely by their average may ignore the varying influences of each neighbor, and learning with this kind of local average effect would likely lead to inferior system performance due to lack of an efficient coordination mechanism in the whole population level. In this work, we propose a Hierarchical Mean-Field (HMF) learning framework to further improve the performance of existing MF methods. The basic idea is to approximate the average effect for a sub-group of agents by considering their different influences within the sub-group, and realize population-level coordination through the interactions among different sub-groups. Empirical studies show that HMF significantly outperforms existing baselines on both challenging cooperative and mixed cooperative-competitive tasks with different scales of agent populations.", + "primary_area": "multiagent systems", + "author": "Chao Yu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Pengcheng Laboratory, Shenzhen, China", + "bibtex": "@article{Yu_2023, title={Hierarchical Mean-Field Deep Reinforcement Learning for Large-Scale Multiagent Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26387}, DOI={10.1609/aaai.v37i10.26387}, abstractNote={Learning for efficient coordination in large-scale multiagent systems suffers from the problem of the curse of dimensionality due to the exponential growth of agent interactions. Mean-Field (MF)-based methods address this issue by transforming the interactions within the whole system into a single agent played with the average effect of its neighbors. However, considering the neighbors merely by their average may ignore the varying influences of each neighbor, and learning with this kind of local average effect would likely lead to inferior system performance due to lack of an efficient coordination mechanism in the whole population level. In this work, we propose a Hierarchical Mean-Field (HMF) learning framework to further improve the performance of existing MF methods. The basic idea is to approximate the average effect for a sub-group of agents by considering their different influences within the sub-group, and realize population-level coordination through the interactions among different sub-groups. Empirical studies show that HMF significantly outperforms existing baselines on both challenging cooperative and mixed cooperative-competitive tasks with different scales of agent populations.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Chao}, year={2023}, month={Jun.}, pages={11744-11752} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26387/26159", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26387", + "pdf_size": 1087893, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12415812167560467643&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.sysu.edu.cn", + "email": "mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "Sun Yat-sen University;Pengcheng Laboratory", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.sysu.edu.cn;", + "aff_unique_abbr": "SYSU;", + "aff_campus_unique_index": "0+1", + "aff_campus_unique": "Guangzhou;Shenzhen", + "aff_country_unique_index": "0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26520", + "title": "Hierarchical Text Classification as Sub-hierarchy Sequence Generation", + "track": "main", + "status": "Technical", + "abstract": "Hierarchical text classification (HTC) is essential for various real applications. However, HTC models are challenging to develop because they often require processing a large volume of documents and labels with hierarchical taxonomy. Recent HTC models based on deep learning have attempted to incorporate hierarchy information into a model structure. Consequently, these models are challenging to implement when the model parameters increase for a large-scale hierarchy because the model structure depends on the hierarchy size. To solve this problem, we formulate HTC as a sub-hierarchy sequence generation to incorporate hierarchy information into a target label sequence instead of the model structure. Subsequently, we propose the Hierarchy DECoder (HiDEC), which decodes a text sequence into a sub-hierarchy sequence using recursive hierarchy decoding, classifying all parents at the same level into children at once. In addition, HiDEC is trained to use hierarchical path information from a root to each leaf in a sub-hierarchy composed of the labels of a target document via an attention mechanism and hierarchy-aware masking. HiDEC achieved state-of-the-art performance with significantly fewer model parameters than existing models on benchmark datasets, such as RCV1-v2, NYT, and EURLEX57K.", + "primary_area": "speech natural language processing", + "author": "SangHun Im; GiBaeg Kim; Heung-Seon Oh; Seongung Jo; Dong Hwan Kim", + "authorids": "", + "aff": "School of Computer Science and Engineering, Korea University of Technology and Education (KOREATECH); School of Computer Science and Engineering, Korea University of Technology and Education (KOREATECH); School of Computer Science and Engineering, Korea University of Technology and Education (KOREATECH); School of Computer Science and Engineering, Korea University of Technology and Education (KOREATECH); School of Computer Science and Engineering, Korea University of Technology and Education (KOREATECH)", + "bibtex": "@article{Im_Kim_Oh_Jo_Kim_2023, title={Hierarchical Text Classification as Sub-hierarchy Sequence Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26520}, DOI={10.1609/aaai.v37i11.26520}, abstractNote={Hierarchical text classification (HTC) is essential for various real applications. However, HTC models are challenging to develop because they often require processing a large volume of documents and labels with hierarchical taxonomy. Recent HTC models based on deep learning have attempted to incorporate hierarchy information into a model structure. Consequently, these models are challenging to implement when the model parameters increase for a large-scale hierarchy because the model structure depends on the hierarchy size. To solve this problem, we formulate HTC as a sub-hierarchy sequence generation to incorporate hierarchy information into a target label sequence instead of the model structure. Subsequently, we propose the Hierarchy DECoder (HiDEC), which decodes a text sequence into a sub-hierarchy sequence using recursive hierarchy decoding, classifying all parents at the same level into children at once. In addition, HiDEC is trained to use hierarchical path information from a root to each leaf in a sub-hierarchy composed of the labels of a target document via an attention mechanism and hierarchy-aware masking. HiDEC achieved state-of-the-art performance with significantly fewer model parameters than existing models on benchmark datasets, such as RCV1-v2, NYT, and EURLEX57K.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Im, SangHun and Kim, GiBaeg and Oh, Heung-Seon and Jo, Seongung and Kim, Dong Hwan}, year={2023}, month={Jun.}, pages={12933-12941} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26520/26292", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26520", + "pdf_size": 493250, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6030960791068268791&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr", + "email": "koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr;koreatech.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Korea University of Technology and Education", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.koreatech.ac.kr", + "aff_unique_abbr": "KOREATECH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26335", + "title": "High-Dimensional Dueling Optimization with Preference Embedding", + "track": "main", + "status": "Technical", + "abstract": "In many scenarios of black-box optimization, evaluating the objective function values of solutions is expensive, while comparing a pair of solutions is relatively cheap, which yields the dueling black-box optimization. The side effect of dueling optimization is that it doubles the dimension of solution space and exacerbates the dimensionality scalability issue of black-box optimization, e.g., Bayesian optimization. To address this issue, the existing dueling optimization methods fix one solution when dueling throughout the optimization process, but it may reduce their efficacy. Fortunately, it has been observed that, in recommendation systems, the dueling results are mainly determined by the latent human preferences. In this paper, we abstract this phenomenon as the preferential intrinsic dimension and inject it into the dueling Bayesian optimization, resulting in the preferential embedding dueling Bayesian optimization (PE-DBO). PE-DBO decouples optimization and pairwise comparison via the preferential embedding matrix. Optimization is performed in the preferential intrinsic subspace with much lower dimensionality, while pairwise comparison is completed in the original dueling solution space. Theoretically, we disclose that the preference function can be approximately preserved in the lower-dimensional preferential intrinsic subspace. Experiment results verify that, on molecule discovery and web page recommendation dueling optimization tasks, the preferential intrinsic dimension exists and PE-DBO is superior in scalability compared with that of the state-of-the-art (SOTA) methods.", + "primary_area": "machine learning iv", + "author": "Yangwenhui Zhang; Hong Qian; Xiang Shu; Aimin Zhou", + "authorids": "", + "aff": "Shanghai Institute of AI for Education and School of Computer Science and Technology, East China Normal University, Shanghai 200062, China; Shanghai Institute of AI for Education and School of Computer Science and Technology, East China Normal University, Shanghai 200062, China; Shanghai Institute of AI for Education and School of Computer Science and Technology, East China Normal University, Shanghai 200062, China; Shanghai Institute of AI for Education and School of Computer Science and Technology, East China Normal University, Shanghai 200062, China", + "bibtex": "@article{Zhang_Qian_Shu_Zhou_2023, title={High-Dimensional Dueling Optimization with Preference Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26335}, DOI={10.1609/aaai.v37i9.26335}, abstractNote={In many scenarios of black-box optimization, evaluating the objective function values of solutions is expensive, while comparing a pair of solutions is relatively cheap, which yields the dueling black-box optimization. The side effect of dueling optimization is that it doubles the dimension of solution space and exacerbates the dimensionality scalability issue of black-box optimization, e.g., Bayesian optimization. To address this issue, the existing dueling optimization methods fix one solution when dueling throughout the optimization process, but it may reduce their efficacy. Fortunately, it has been observed that, in recommendation systems, the dueling results are mainly determined by the latent human preferences. In this paper, we abstract this phenomenon as the preferential intrinsic dimension and inject it into the dueling Bayesian optimization, resulting in the preferential embedding dueling Bayesian optimization (PE-DBO). PE-DBO decouples optimization and pairwise comparison via the preferential embedding matrix. Optimization is performed in the preferential intrinsic subspace with much lower dimensionality, while pairwise comparison is completed in the original dueling solution space. Theoretically, we disclose that the preference function can be approximately preserved in the lower-dimensional preferential intrinsic subspace. Experiment results verify that, on molecule discovery and web page recommendation dueling optimization tasks, the preferential intrinsic dimension exists and PE-DBO is superior in scalability compared with that of the state-of-the-art (SOTA) methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yangwenhui and Qian, Hong and Shu, Xiang and Zhou, Aimin}, year={2023}, month={Jun.}, pages={11280-11288} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26335/26107", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26335", + "pdf_size": 415708, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6357384304835779572&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 3, + "aff_domain": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26306", + "title": "High-Level Semantic Feature Matters Few-Shot Unsupervised Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "In few-shot unsupervised domain adaptation (FS-UDA), most existing methods followed the few-shot learning (FSL) methods to leverage the low-level local features (learned from conventional convolutional models, e.g., ResNet) for classification. However, the goal of FS-UDA and FSL are relevant yet distinct, since FS-UDA aims to classify the samples in target domain rather than source domain. We found that the local features are insufficient to FS-UDA, which could introduce noise or bias against classification, and not be used to effectively align the domains. To address the above issues, we aim to refine the local features to be more discriminative and relevant to classification. Thus, we propose a novel task-specific semantic feature learning method (TSECS) for FS-UDA. TSECS learns high-level semantic features for image-to-class similarity measurement. Based on the high-level features, we design a cross-domain self-training strategy to leverage the few labeled samples in source domain to build the classifier in target domain. In addition, we minimize the KL divergence of the high-level feature distributions between source and target domains to shorten the distance of the samples between the two domains. Extensive experiments on DomainNet show that the proposed method significantly outperforms SOTA methods in FS-UDA by a large margin (i.e., ~10%).", + "primary_area": "machine learning iv", + "author": "Lei Yu; Wanqi Yang; Shengqi Huang; Lei Wang; Ming Yang", + "authorids": "", + "aff": "School of Computer and Electronic Information, Nanjing Normal University, China; School of Computer and Electronic Information, Nanjing Normal University, China; School of Computer and Electronic Information, Nanjing Normal University, China; School of Computing and Information Technology, University of Wollongong, Australia; School of Computer and Electronic Information, Nanjing Normal University, China", + "bibtex": "@article{Yu_Yang_Huang_Wang_Yang_2023, title={High-Level Semantic Feature Matters Few-Shot Unsupervised Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26306}, DOI={10.1609/aaai.v37i9.26306}, abstractNote={In few-shot unsupervised domain adaptation (FS-UDA), most existing methods followed the few-shot learning (FSL) methods to leverage the low-level local features (learned from conventional convolutional models, e.g., ResNet) for classification. However, the goal of FS-UDA and FSL are relevant yet distinct, since FS-UDA aims to classify the samples in target domain rather than source domain. We found that the local features are insufficient to FS-UDA, which could introduce noise or bias against classification, and not be used to effectively align the domains. To address the above issues, we aim to refine the local features to be more discriminative and relevant to classification. Thus, we propose a novel task-specific semantic feature learning method (TSECS) for FS-UDA. TSECS learns high-level semantic features for image-to-class similarity measurement. Based on the high-level features, we design a cross-domain self-training strategy to leverage the few labeled samples in source domain to build the classifier in target domain. In addition, we minimize the KL divergence of the high-level feature distributions between source and target domains to shorten the distance of the samples between the two domains. Extensive experiments on DomainNet show that the proposed method significantly outperforms SOTA methods in FS-UDA by a large margin (i.e., ~10%).}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Lei and Yang, Wanqi and Huang, Shengqi and Wang, Lei and Yang, Ming}, year={2023}, month={Jun.}, pages={11025-11033} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26306/26078", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26306", + "pdf_size": 1552864, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11323758135388318270&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "njnu.edu.cn;njnu.edu.cn;njnu.edu.cn;uow.edu.au;njnu.edu.cn", + "email": "njnu.edu.cn;njnu.edu.cn;njnu.edu.cn;uow.edu.au;njnu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Nanjing Normal University;University of Wollongong", + "aff_unique_dep": "School of Computer and Electronic Information;School of Computing and Information Technology", + "aff_unique_url": "http://www.nju.edu.cn;https://www.uow.edu.au", + "aff_unique_abbr": "NNU;UOW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25371", + "title": "High-Resolution GAN Inversion for Degraded Images in Large Diverse Datasets", + "track": "main", + "status": "Technical", + "abstract": "The last decades are marked by massive and diverse image data, which shows increasingly high resolution and quality. However, some images we obtained may be corrupted, affecting the perception and the application of downstream tasks. A generic method for generating a high-quality image from the degraded one is in demand. In this paper, we present a novel GAN inversion framework that utilizes the powerful generative ability of StyleGAN-XL for this problem. To ease the inversion challenge with StyleGAN-XL, Clustering \\& Regularize Inversion (CRI) is proposed. Specifically, the latent space is firstly divided into finer-grained sub-spaces by clustering. Instead of initializing the inversion with the average latent vector, we approximate a centroid latent vector from the clusters, which generates an image close to the input image. Then, an offset with a regularization term is introduced to keep the inverted latent vector within a certain range. We validate our CRI scheme on multiple restoration tasks (i.e., inpainting, colorization, and super-resolution) of complex natural images, and show preferable quantitative and qualitative results. We further demonstrate our technique is robust in terms of data and different GAN models. To our best knowledge, we are the first to adopt StyleGAN-XL for generating high-quality natural images from diverse degraded inputs. Code is available at https://github.com/Booooooooooo/CRI.", + "primary_area": "computer vision iii", + "author": "Yanbo Wang; Chuming Lin; Donghao Luo; Ying Tai; Zhizhong Zhang; Yuan Xie", + "authorids": "", + "aff": "School of Computer Science and Technology, East China Normal University; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; School of Computer Science and Technology, East China Normal University; School of Computer Science and Technology, East China Normal University", + "bibtex": "@article{Wang_Lin_Luo_Tai_Zhang_Xie_2023, title={High-Resolution GAN Inversion for Degraded Images in Large Diverse Datasets}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25371}, DOI={10.1609/aaai.v37i3.25371}, abstractNote={The last decades are marked by massive and diverse image data, which shows increasingly high resolution and quality. However, some images we obtained may be corrupted, affecting the perception and the application of downstream tasks. A generic method for generating a high-quality image from the degraded one is in demand. In this paper, we present a novel GAN inversion framework that utilizes the powerful generative ability of StyleGAN-XL for this problem. To ease the inversion challenge with StyleGAN-XL, Clustering \\& Regularize Inversion (CRI) is proposed. Specifically, the latent space is firstly divided into finer-grained sub-spaces by clustering. Instead of initializing the inversion with the average latent vector, we approximate a centroid latent vector from the clusters, which generates an image close to the input image. Then, an offset with a regularization term is introduced to keep the inverted latent vector within a certain range. We validate our CRI scheme on multiple restoration tasks (i.e., inpainting, colorization, and super-resolution) of complex natural images, and show preferable quantitative and qualitative results. We further demonstrate our technique is robust in terms of data and different GAN models. To our best knowledge, we are the first to adopt StyleGAN-XL for generating high-quality natural images from diverse degraded inputs. Code is available at https://github.com/Booooooooooo/CRI.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yanbo and Lin, Chuming and Luo, Donghao and Tai, Ying and Zhang, Zhizhong and Xie, Yuan}, year={2023}, month={Jun.}, pages={2716-2723} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25371/25143", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25371", + "pdf_size": 7987185, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11451112072622708973&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "stu.ecnu.edu.cn;tencent.com;tencent.com;tencent.com;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;tencent.com;tencent.com;tencent.com;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "https://github.com/Booooooooooo/CRI", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;0", + "aff_unique_norm": "East China Normal University;Tencent", + "aff_unique_dep": "School of Computer Science and Technology;Youtu Lab", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "ECNU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25167", + "title": "High-Resolution Iterative Feedback Network for Camouflaged Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Spotting camouflaged objects that are visually assimilated into the background is tricky for both object detection algorithms and humans who are usually confused or cheated by the perfectly intrinsic similarities between the foreground objects and the background surroundings. To tackle this challenge, we aim to extract the high-resolution texture details to avoid the detail degradation that causes blurred vision in edges and boundaries. We introduce a novel HitNet to refine the low-resolution representations by high-resolution features in an iterative feedback manner, essentially a global loop-based connection among the multi-scale resolutions. To design better feedback feature \ufb02ow and avoid the feature corruption caused by recurrent path, an iterative feedback strategy is proposed to impose more constraints on each feedback connection. Extensive experiments on four challenging datasets demonstrate that our HitNet breaks the performance bottleneck and achieves significant improvements compared with 29 state-of-the-art methods. In addition, to address the data scarcity in camouflaged scenarios, we provide an application example to convert the salient objects to camouflaged objects, thereby generating more camouflaged training samples from the diverse salient object datasets. Code will be made publicly available.", + "primary_area": "computer vision i", + "author": "Xiaobin Hu; Shuo Wang; Xuebin Qin; Hang Dai; Wenqi Ren; Donghao Luo; Ying Tai; Ling Shao", + "authorids": "", + "aff": "Tencent Youtu Lab; ETH Zurich; Mohamed bin Zayed University of Artificial Intelligence; University of Glasgow; Sun Yat-sen University; Tencent Youtu Lab; Tencent Youtu Lab; Terminus Group", + "bibtex": "@article{Hu_Wang_Qin_Dai_Ren_Luo_Tai_Shao_2023, title={High-Resolution Iterative Feedback Network for Camouflaged Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25167}, DOI={10.1609/aaai.v37i1.25167}, abstractNote={Spotting camouflaged objects that are visually assimilated into the background is tricky for both object detection algorithms and humans who are usually confused or cheated by the perfectly intrinsic similarities between the foreground objects and the background surroundings. To tackle this challenge, we aim to extract the high-resolution texture details to avoid the detail degradation that causes blurred vision in edges and boundaries. We introduce a novel HitNet to refine the low-resolution representations by high-resolution features in an iterative feedback manner, essentially a global loop-based connection among the multi-scale resolutions. To design better feedback feature \ufb02ow and avoid the feature corruption caused by recurrent path, an iterative feedback strategy is proposed to impose more constraints on each feedback connection. Extensive experiments on four challenging datasets demonstrate that our HitNet breaks the performance bottleneck and achieves significant improvements compared with 29 state-of-the-art methods. In addition, to address the data scarcity in camouflaged scenarios, we provide an application example to convert the salient objects to camouflaged objects, thereby generating more camouflaged training samples from the diverse salient object datasets. Code will be made publicly available.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Xiaobin and Wang, Shuo and Qin, Xuebin and Dai, Hang and Ren, Wenqi and Luo, Donghao and Tai, Ying and Shao, Ling}, year={2023}, month={Jun.}, pages={881-889} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25167/24939", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25167", + "pdf_size": 2905656, + "gs_citation": 128, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18150876621497898971&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "tencent.com;gmail.com;ualberta.ca;glasgow.ac.uk;mail.sysu.edu.cn;tencent.com;tencent.com;ieee.org", + "email": "tencent.com;gmail.com;ualberta.ca;glasgow.ac.uk;mail.sysu.edu.cn;tencent.com;tencent.com;ieee.org", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;4;0;0;5", + "aff_unique_norm": "Tencent;ETH Zurich;Mohamed bin Zayed University of Artificial Intelligence;University of Glasgow;Sun Yat-sen University;Terminus Group", + "aff_unique_dep": "Youtu Lab;;;;;", + "aff_unique_url": "https://www.tencent.com;https://www.ethz.ch;https://www.mbzuai.ac.ae;https://www.gla.ac.uk;http://www.sysu.edu.cn/;", + "aff_unique_abbr": "Tencent;ETHZ;MBZUAI;Glasgow;SYSU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3;0;0;0", + "aff_country_unique": "China;Switzerland;United Arab Emirates;United Kingdom;" + }, + { + "id": "article-26869", + "title": "High-Throughput, High-Performance Deep Learning-Driven Light Guide Plate Surface Visual Quality Inspection Tailored for Real-World Manufacturing Environments", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Light guide plates are essential optical components widely used in a diverse range of applications ranging from medical lighting fixtures to back-lit TV displays. An essential step in the manufacturing of light guide plates is the quality inspection of defects such as scratches, bright/dark spots, and impurities. This is mainly done in industry through manual visual inspection for plate pattern irregularities, which is time-consuming and prone to human error and thus act as a significant barrier to high-throughput production. Advances in deep learning-driven computer vision has led to the exploration of automated visual quality inspection of light guide plates to improve inspection consistency, accuracy, and efficiency. However, given the computational constraints and high-throughput nature of real-world manufacturing environments, the widespread adoption of deep learning-driven visual inspection systems for inspecting light guide plates in real-world manufacturing environments has been greatly limited due to high computational requirements and integration challenges of existing deep learning approaches in research literature. In this work, we introduce a fully-integrated, high-throughput, high-performance deep learning-driven workflow for light guide plate surface visual quality inspection (VQI) tailored for real-world manufacturing environments. To enable automated VQI on the edge computing within the fully-integrated VQI system, a highly compact deep anti-aliased attention condenser neural network (which we name Light-DefectNet) tailored specifically for light guide plate surface defect detection in resource-constrained scenarios was created via machine-driven design exploration with computational and \u201cbest-practices\u201d constraints as well as L1 paired classification discrepancy loss. Experiments show that Light-DetectNet achieves a detection accuracy of \u223c98.2% on the LGPSDD benchmark while having just 770K parameters\n(\u223c33\u00d7 and \u223c6.9\u00d7 lower than ResNet-50 and EfficientNet-B0, respectively) and \u223c93M FLOPs (\u223c88\u00d7 and \u223c8.4\u00d7 lower than ResNet-50 and EfficientNet-B0, respectively) and \u223c8.8\u00d7 faster inference speed than EfficientNet-B0 on an embedded ARM processor. As such, the proposed deep learning-driven workflow, integrated with the aforementioned LightDefectNet neural network, is highly suited for high-throughput, high-performance light plate surface VQI within real-world manufacturing environments.", + "primary_area": "emerging applications of ai", + "author": "Carol Xu; Mahmoud Famouri; Gautam Bathla; Mohammad Javad Shafiee; Alexander Wong", + "authorids": "", + "aff": "DarwinAI, Waterloo, Ontario, Canada; DarwinAI, Waterloo, Ontario, Canada; DarwinAI, Waterloo, Ontario, Canada; DarwinAI, Waterloo, Ontario, Canada + University of Waterloo, Waterloo, Ontario, Canada; DarwinAI, Waterloo, Ontario, Canada + University of Waterloo, Waterloo, Ontario, Canada", + "bibtex": "@article{Xu_Famouri_Bathla_Shafiee_Wong_2024, title={High-Throughput, High-Performance Deep Learning-Driven Light Guide Plate Surface Visual Quality Inspection Tailored for Real-World Manufacturing Environments}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26869}, DOI={10.1609/aaai.v37i13.26869}, abstractNote={Light guide plates are essential optical components widely used in a diverse range of applications ranging from medical lighting fixtures to back-lit TV displays. An essential step in the manufacturing of light guide plates is the quality inspection of defects such as scratches, bright/dark spots, and impurities. This is mainly done in industry through manual visual inspection for plate pattern irregularities, which is time-consuming and prone to human error and thus act as a significant barrier to high-throughput production. Advances in deep learning-driven computer vision has led to the exploration of automated visual quality inspection of light guide plates to improve inspection consistency, accuracy, and efficiency. However, given the computational constraints and high-throughput nature of real-world manufacturing environments, the widespread adoption of deep learning-driven visual inspection systems for inspecting light guide plates in real-world manufacturing environments has been greatly limited due to high computational requirements and integration challenges of existing deep learning approaches in research literature. In this work, we introduce a fully-integrated, high-throughput, high-performance deep learning-driven workflow for light guide plate surface visual quality inspection (VQI) tailored for real-world manufacturing environments. To enable automated VQI on the edge computing within the fully-integrated VQI system, a highly compact deep anti-aliased attention condenser neural network (which we name Light-DefectNet) tailored specifically for light guide plate surface defect detection in resource-constrained scenarios was created via machine-driven design exploration with computational and \u201cbest-practices\u201d constraints as well as L1 paired classification discrepancy loss. Experiments show that Light-DetectNet achieves a detection accuracy of \u223c98.2% on the LGPSDD benchmark while having just 770K parameters\n(\u223c33\u00d7 and \u223c6.9\u00d7 lower than ResNet-50 and EfficientNet-B0, respectively) and \u223c93M FLOPs (\u223c88\u00d7 and \u223c8.4\u00d7 lower than ResNet-50 and EfficientNet-B0, respectively) and \u223c8.8\u00d7 faster inference speed than EfficientNet-B0 on an embedded ARM processor. As such, the proposed deep learning-driven workflow, integrated with the aforementioned LightDefectNet neural network, is highly suited for high-throughput, high-performance light plate surface VQI within real-world manufacturing environments.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Carol and Famouri, Mahmoud and Bathla, Gautam and Shafiee, Mohammad Javad and Wong, Alexander}, year={2024}, month={Jul.}, pages={15745-15751} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26869/26641", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26869", + "pdf_size": 847584, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7846604662523987493&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "uwaterloo.ca; ; ; ; ", + "email": "uwaterloo.ca; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+1;0+1", + "aff_unique_norm": "DarwinAI;University of Waterloo", + "aff_unique_dep": ";", + "aff_unique_url": ";https://uwaterloo.ca", + "aff_unique_abbr": ";UW", + "aff_campus_unique_index": "0;0;0;0+0;0+0", + "aff_campus_unique": "Waterloo", + "aff_country_unique_index": "0;0;0;0+0;0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26797", + "title": "Holistic Adversarial Robustness of Deep Learning Models", + "track": "senior member presentation summary papers", + "status": "Technical", + "abstract": "Adversarial robustness studies the worst-case performance of a machine learning model to ensure safety and reliability. With the proliferation of deep-learning-based technology, the potential risks associated with model development and deployment can be amplified and become dreadful vulnerabilities. This paper provides a comprehensive overview of research topics and foundational principles of research methods for adversarial robustness of deep learning models, including attacks, defenses, verification, and novel applications.", + "primary_area": "", + "author": "Pin-Yu Chen; Sijia Liu", + "authorids": "", + "aff": "IBM Research; Michigan State University", + "bibtex": "@article{Chen_Liu_2024, title={Holistic Adversarial Robustness of Deep Learning Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26797}, DOI={10.1609/aaai.v37i13.26797}, abstractNote={Adversarial robustness studies the worst-case performance of a machine learning model to ensure safety and reliability. With the proliferation of deep-learning-based technology, the potential risks associated with model development and deployment can be amplified and become dreadful vulnerabilities. This paper provides a comprehensive overview of research topics and foundational principles of research methods for adversarial robustness of deep learning models, including attacks, defenses, verification, and novel applications.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Pin-Yu and Liu, Sijia}, year={2024}, month={Jul.}, pages={15411-15420} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26797/26569", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26797", + "pdf_size": 468307, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12984681955843661681&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "ibm.com;msu.edu", + "email": "ibm.com;msu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "IBM;Michigan State University", + "aff_unique_dep": "IBM Research;", + "aff_unique_url": "https://www.ibm.com/research;https://www.msu.edu", + "aff_unique_abbr": "IBM;MSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25705", + "title": "How to Cut a Discrete Cake Fairly", + "track": "main", + "status": "Technical", + "abstract": "Cake-cutting is a fundamental model of dividing a heterogeneous resource, such as land, broadcast time, and advertisement space. In this study, we consider the problem of dividing indivisible goods fairly under the connectivity constraints of a path. We prove that a connected division of indivisible items satisfying a discrete counterpart of envy-freeness, called envy-freeness up to one good (EF1), always exists for any number of agents n with monotone valuations. Our result settles an open question raised by Bil\u00f2 et al. (2019), who proved that an EF1 connected division always exists for four agents with monotone valuations. Moreover, the proof can be extended to show the following (1) ``secretive\" and (2) ``extra\" versions: (1) for n agents with monotone valuations, the path can be divided into n connected bundles such that an EF1 assignment of the remaining bundles can be made to the other agents for any selection made by the \u201csecretive agent\u201d; (2) for n+1 agents with monotone valuations, the path can be divided into n connected bundles such that when any ``extra agent\u201d leaves, an EF1 assignment of the bundles can be made to the remaining agents.", + "primary_area": "game theory and economic paradigms", + "author": "Ayumi Igarashi", + "authorids": "", + "aff": "The University of Tokyo", + "bibtex": "@article{Igarashi_2023, title={How to Cut a Discrete Cake Fairly}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25705}, DOI={10.1609/aaai.v37i5.25705}, abstractNote={Cake-cutting is a fundamental model of dividing a heterogeneous resource, such as land, broadcast time, and advertisement space. In this study, we consider the problem of dividing indivisible goods fairly under the connectivity constraints of a path. We prove that a connected division of indivisible items satisfying a discrete counterpart of envy-freeness, called envy-freeness up to one good (EF1), always exists for any number of agents n with monotone valuations. Our result settles an open question raised by Bil\u00f2 et al. (2019), who proved that an EF1 connected division always exists for four agents with monotone valuations. Moreover, the proof can be extended to show the following (1) ``secretive" and (2) ``extra" versions: (1) for n agents with monotone valuations, the path can be divided into n connected bundles such that an EF1 assignment of the remaining bundles can be made to the other agents for any selection made by the \u201csecretive agent\u201d; (2) for n+1 agents with monotone valuations, the path can be divided into n connected bundles such that when any ``extra agent\u201d leaves, an EF1 assignment of the bundles can be made to the remaining agents.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Igarashi, Ayumi}, year={2023}, month={Jun.}, pages={5681-5688} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25705/25477", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25705", + "pdf_size": 152159, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1898545036116533154&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mist.i.u-tokyo.ac.jp", + "email": "mist.i.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Tokyo", + "aff_unique_dep": "", + "aff_unique_url": "https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "UTokyo", + "aff_country_unique_index": "0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26467", + "title": "Human Assisted Learning by Evolutionary Multi-Objective Optimization", + "track": "main", + "status": "Technical", + "abstract": "Machine learning models have liberated manpower greatly in many real-world tasks, but their predictions are still worse than humans on some specific instances. To improve the performance, it is natural to optimize machine learning models to take decisions for most instances while delivering a few tricky instances to humans, resulting in the problem of Human Assisted Learning (HAL). Previous works mainly formulated HAL as a constrained optimization problem that tries to find a limited subset of instances for human decision such that the sum of model and human errors can be minimized; and employed the greedy algorithms, whose performance, however, may be limited due to the greedy nature. In this paper, we propose a new framework HAL-EMO based on Evolutionary Multi-objective Optimization, which reformulates HAL as a bi-objective optimization problem that minimizes the number of selected instances for human decision and the total errors simultaneously, and employs a Multi-Objective Evolutionary Algorithm (MOEA) to solve it. We implement HAL-EMO using two MOEAs, the popular NSGA-II as well as the theoretically grounded GSEMO. We also propose a specific MOEA, called BSEMO, with biased selection and balanced mutation for HAL-EMO, and prove that for human assisted regression and classification, HAL-EMO using BSEMO can achieve better and same theoretical guarantees than previous greedy algorithms, respectively. Experiments on the tasks of medical diagnosis and content moderation show the superiority of HAL-EMO (with either NSGA-II, GSEMO or BSEMO) over previous algorithms, and that using BSEMO leads to the best performance of HAL-EMO.", + "primary_area": "search and optimization", + "author": "Dan-Xuan Liu; Xin Mu; Chao Qian", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; Peng Cheng Laboratory, Shenzhen 518000, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Liu_Mu_Qian_2023, title={Human Assisted Learning by Evolutionary Multi-Objective Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26467}, DOI={10.1609/aaai.v37i10.26467}, abstractNote={Machine learning models have liberated manpower greatly in many real-world tasks, but their predictions are still worse than humans on some specific instances. To improve the performance, it is natural to optimize machine learning models to take decisions for most instances while delivering a few tricky instances to humans, resulting in the problem of Human Assisted Learning (HAL). Previous works mainly formulated HAL as a constrained optimization problem that tries to find a limited subset of instances for human decision such that the sum of model and human errors can be minimized; and employed the greedy algorithms, whose performance, however, may be limited due to the greedy nature. In this paper, we propose a new framework HAL-EMO based on Evolutionary Multi-objective Optimization, which reformulates HAL as a bi-objective optimization problem that minimizes the number of selected instances for human decision and the total errors simultaneously, and employs a Multi-Objective Evolutionary Algorithm (MOEA) to solve it. We implement HAL-EMO using two MOEAs, the popular NSGA-II as well as the theoretically grounded GSEMO. We also propose a specific MOEA, called BSEMO, with biased selection and balanced mutation for HAL-EMO, and prove that for human assisted regression and classification, HAL-EMO using BSEMO can achieve better and same theoretical guarantees than previous greedy algorithms, respectively. Experiments on the tasks of medical diagnosis and content moderation show the superiority of HAL-EMO (with either NSGA-II, GSEMO or BSEMO) over previous algorithms, and that using BSEMO leads to the best performance of HAL-EMO.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Dan-Xuan and Mu, Xin and Qian, Chao}, year={2023}, month={Jun.}, pages={12453-12461} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26467/26239", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26467", + "pdf_size": 3084223, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6841497438403473596&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;pcl.ac.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;pcl.ac.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Nanjing University;Peng Cheng Laboratory", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;", + "aff_unique_abbr": "Nanjing University;", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Nanjing;Shenzhen", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25754", + "title": "Human Joint Kinematics Diffusion-Refinement for Stochastic Motion Prediction", + "track": "main", + "status": "Technical", + "abstract": "Stochastic human motion prediction aims to forecast multiple plausible future motions given a single pose sequence from the past. Most previous works focus on designing elaborate losses to improve the accuracy, while the diversity is typically characterized by randomly sampling a set of latent variables from the latent prior, which is then decoded into possible motions. This joint training of sampling and decoding, however, suffers from posterior collapse as the learned latent variables tend to be ignored by a strong decoder, leading to limited diversity. Alternatively, inspired by the diffusion process in nonequilibrium thermodynamics, we propose MotionDiff, a diffusion probabilistic model to treat the kinematics of human joints as heated particles, which will diffuse from original states to a noise distribution. This process not only offers a natural way to obtain the \"whitened'' latents without any trainable parameters, but also introduces a new noise in each diffusion step, both of which facilitate more diverse motions. Human motion prediction is then regarded as the reverse diffusion process that converts the noise distribution into realistic future motions conditioned on the observed sequence. Specifically, MotionDiff consists of two parts: a spatial-temporal transformer-based diffusion network to generate diverse yet plausible motions, and a flexible refinement network to further enable geometric losses and align with the ground truth. Experimental results on two datasets demonstrate that our model yields the competitive performance in terms of both diversity and accuracy.", + "primary_area": "humans and ai", + "author": "Dong Wei; Huaijiang Sun; Bin Li; Jianfeng Lu; Weiqing Li; Xiaoning Sun; Shengxiang Hu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; Tianjin AiForward Science and Technology Co., Ltd., Tianjin, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Wei_Sun_Li_Lu_Li_Sun_Hu_2023, title={Human Joint Kinematics Diffusion-Refinement for Stochastic Motion Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25754}, DOI={10.1609/aaai.v37i5.25754}, abstractNote={Stochastic human motion prediction aims to forecast multiple plausible future motions given a single pose sequence from the past. Most previous works focus on designing elaborate losses to improve the accuracy, while the diversity is typically characterized by randomly sampling a set of latent variables from the latent prior, which is then decoded into possible motions. This joint training of sampling and decoding, however, suffers from posterior collapse as the learned latent variables tend to be ignored by a strong decoder, leading to limited diversity. Alternatively, inspired by the diffusion process in nonequilibrium thermodynamics, we propose MotionDiff, a diffusion probabilistic model to treat the kinematics of human joints as heated particles, which will diffuse from original states to a noise distribution. This process not only offers a natural way to obtain the "whitened\u2019\u2019 latents without any trainable parameters, but also introduces a new noise in each diffusion step, both of which facilitate more diverse motions. Human motion prediction is then regarded as the reverse diffusion process that converts the noise distribution into realistic future motions conditioned on the observed sequence. Specifically, MotionDiff consists of two parts: a spatial-temporal transformer-based diffusion network to generate diverse yet plausible motions, and a flexible refinement network to further enable geometric losses and align with the ground truth. Experimental results on two datasets demonstrate that our model yields the competitive performance in terms of both diversity and accuracy.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wei, Dong and Sun, Huaijiang and Li, Bin and Lu, Jianfeng and Li, Weiqing and Sun, Xiaoning and Hu, Shengxiang}, year={2023}, month={Jun.}, pages={6110-6118} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25754/25526", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25754", + "pdf_size": 1288088, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5646116590386622007&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "njust.edu.cn;njust.edu.cn;aiforward.com;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;aiforward.com;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;0", + "aff_unique_norm": "Nanjing University of Science and Technology;Tianjin AiForward Science and Technology Co., Ltd.", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.nust.edu.cn;", + "aff_unique_abbr": "NUST;", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26678", + "title": "Human Mobility Modeling during the COVID-19 Pandemic via Deep Graph Diffusion Infomax", + "track": "aaai special track", + "status": "Technical", + "abstract": "Non-Pharmaceutical Interventions (NPIs), such as social gathering restrictions, have shown effectiveness to slow the transmission of COVID-19 by reducing the contact of people. To support policy-makers, multiple studies have first modelled human mobility via macro indicators (e.g., average daily travel distance) and then study the effectiveness of NPIs. In this work, we focus on mobility modelling and, from a micro perspective, aim to predict locations that will be visited by COVID-19 cases. Since NPIs generally cause economic and societal loss, such a prediction benefits governments when they design and evaluate them. However, in real-world situations, strict privacy data protection regulations result in severe data sparsity problems (i.e., limited case and location information).\nTo address these challenges and jointly model variables including a geometric graph, a set of diffusions and a set of locations, we propose a model named Deep Graph Diffusion Infomax (DGDI). We show the maximization of DGDI can be bounded by two tractable components: a univariate Mutual Information (MI) between geometric graph and diffusion representation, and a univariate MI between diffusion representation and location representation. To facilitate the research of COVID-19 prediction, we present two benchmarks that contain geometric graphs and location histories of COVID-19 cases. Extensive experiments on the two benchmarks show that DGDI significantly outperforms other competing methods.", + "primary_area": "ai for social impact", + "author": "Yang Liu; Yu Rong; Zhuoning Guo; Nuo Chen; Tingyang Xu; Fugee Tsung; Jia Li", + "authorids": "", + "aff": "The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology; Tencent AI Lab; The Hong Kong University of Science and Technology (Guangzhou); The Hong Kong University of Science and Technology (Guangzhou); Tencent AI Lab; The Hong Kong University of Science and Technology+The Hong Kong University of Science and Technology (Guangzhou); The Hong Kong University of Science and Technology+The Hong Kong University of Science and Technology (Guangzhou)", + "bibtex": "@article{Liu_Rong_Guo_Chen_Xu_Tsung_Li_2023, title={Human Mobility Modeling during the COVID-19 Pandemic via Deep Graph Diffusion Infomax}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26678}, DOI={10.1609/aaai.v37i12.26678}, abstractNote={Non-Pharmaceutical Interventions (NPIs), such as social gathering restrictions, have shown effectiveness to slow the transmission of COVID-19 by reducing the contact of people. To support policy-makers, multiple studies have first modelled human mobility via macro indicators (e.g., average daily travel distance) and then study the effectiveness of NPIs. In this work, we focus on mobility modelling and, from a micro perspective, aim to predict locations that will be visited by COVID-19 cases. Since NPIs generally cause economic and societal loss, such a prediction benefits governments when they design and evaluate them. However, in real-world situations, strict privacy data protection regulations result in severe data sparsity problems (i.e., limited case and location information).\nTo address these challenges and jointly model variables including a geometric graph, a set of diffusions and a set of locations, we propose a model named Deep Graph Diffusion Infomax (DGDI). We show the maximization of DGDI can be bounded by two tractable components: a univariate Mutual Information (MI) between geometric graph and diffusion representation, and a univariate MI between diffusion representation and location representation. To facilitate the research of COVID-19 prediction, we present two benchmarks that contain geometric graphs and location histories of COVID-19 cases. Extensive experiments on the two benchmarks show that DGDI significantly outperforms other competing methods.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yang and Rong, Yu and Guo, Zhuoning and Chen, Nuo and Xu, Tingyang and Tsung, Fugee and Li, Jia}, year={2023}, month={Jun.}, pages={14347-14355} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26678/26450", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26678", + "pdf_size": 332861, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18000810190985147860&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 7, + "aff_domain": "connect.ust.hk;hotmail.com;connect.hkust-gz.edu.cn;connect.hkust-gz.edu.cn;tencent.com;ust.hk;ust.hk", + "email": "connect.ust.hk;hotmail.com;connect.hkust-gz.edu.cn;connect.hkust-gz.edu.cn;tencent.com;ust.hk;ust.hk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;0;0;2;1+0;1+0", + "aff_unique_norm": "The Hong Kong University of Science and Technology;Hong Kong University of Science and Technology;Tencent", + "aff_unique_dep": ";;Tencent AI Lab", + "aff_unique_url": "https://www.ust.hk;https://www.ust.hk;https://ai.tencent.com", + "aff_unique_abbr": "HKUST;HKUST;Tencent AI Lab", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26822", + "title": "Human-Aware AI \u2013 A Foundational Framework for Human-AI Interaction", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "We are living through a revolutionary moment in AI history. We are seeing the development of impressive new AI systems at a rate that was unimaginable just a few years ago. However, AI's true potential to transform society remains unrealized, in no small part due to the inability of current systems to work effectively with people. A major hurdle to achieving such coordination is the inherent asymmetry between the AI system and its users. In this talk, I will discuss how the framework of Human-Aware AI (HAAI) provides us with the tools required to bridge this gap and support fluent and intuitive coordination between the AI system and its users.", + "primary_area": "", + "author": "Sarath Sreedharan", + "authorids": "", + "aff": "Colorado State University", + "bibtex": "@article{Sreedharan_2024, title={Human-Aware AI \u2013 A Foundational Framework for Human-AI Interaction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26822}, DOI={10.1609/aaai.v37i13.26822}, abstractNote={We are living through a revolutionary moment in AI history. We are seeing the development of impressive new AI systems at a rate that was unimaginable just a few years ago. However, AI\u2019s true potential to transform society remains unrealized, in no small part due to the inability of current systems to work effectively with people. A major hurdle to achieving such coordination is the inherent asymmetry between the AI system and its users. In this talk, I will discuss how the framework of Human-Aware AI (HAAI) provides us with the tools required to bridge this gap and support fluent and intuitive coordination between the AI system and its users.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sreedharan, Sarath}, year={2024}, month={Jul.}, pages={15455-15455} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26822/26594", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26822", + "pdf_size": 728408, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12792542931849642895&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "colostate.edu", + "email": "colostate.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Colorado State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.colostate.edu", + "aff_unique_abbr": "CSU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25589", + "title": "Human-Instructed Deep Hierarchical Generative Learning for Automated Urban Planning", + "track": "main", + "status": "Technical", + "abstract": "The essential task of urban planning is to generate the optimal land-use configuration of a target area. However, traditional urban planning is time-consuming and labor-intensive. Deep generative learning gives us hope that we can automate this planning process and come up with the ideal urban plans. While remarkable achievements have been obtained, they have exhibited limitations in lacking awareness of: 1) the hierarchical dependencies between functional zones and spatial grids; 2) the peer dependencies among functional zones; and 3) human regulations to ensure the usability of generated configurations. To address these limitations, we develop a novel human-instructed deep hierarchical generative model. We rethink the urban planning generative task from a unique functionality perspective, where we summarize planning requirements into different functionality projections for better urban plan generation. To this end, we develop a three-stage generation process from a target area to zones to grids. The first stage is to label the grids of a target area with latent functionalities to discover functional zones. The second stage is to perceive the planning requirements to form urban functionality projections. We propose a novel module: functionalizer to project the embedding of human instructions and geospatial contexts to the zone-level plan to obtain such projections. Each projection includes the information of land-use portfolios and the structural dependencies across spatial grids in terms of a specific urban function. The third stage is to leverage multi-attentions to model the zone-zone peer dependencies of the functionality projections to generate grid-level land-use configurations. Finally, we present extensive experiments to demonstrate the effectiveness of our framework.", + "primary_area": "data mining and knowledge management", + "author": "Dongjie Wang; Lingfei Wu; Denghui Zhang; Jingbo Zhou; Leilei Sun; Yanjie Fu", + "authorids": "", + "aff": "University of Central Florida; Pinterest; Rutgers University; Baidu Research; Beihang University; University of Central Florida", + "bibtex": "@article{Wang_Wu_Zhang_Zhou_Sun_Fu_2023, title={Human-Instructed Deep Hierarchical Generative Learning for Automated Urban Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25589}, DOI={10.1609/aaai.v37i4.25589}, abstractNote={The essential task of urban planning is to generate the optimal land-use configuration of a target area. However, traditional urban planning is time-consuming and labor-intensive. Deep generative learning gives us hope that we can automate this planning process and come up with the ideal urban plans. While remarkable achievements have been obtained, they have exhibited limitations in lacking awareness of: 1) the hierarchical dependencies between functional zones and spatial grids; 2) the peer dependencies among functional zones; and 3) human regulations to ensure the usability of generated configurations. To address these limitations, we develop a novel human-instructed deep hierarchical generative model. We rethink the urban planning generative task from a unique functionality perspective, where we summarize planning requirements into different functionality projections for better urban plan generation. To this end, we develop a three-stage generation process from a target area to zones to grids. The first stage is to label the grids of a target area with latent functionalities to discover functional zones. The second stage is to perceive the planning requirements to form urban functionality projections. We propose a novel module: functionalizer to project the embedding of human instructions and geospatial contexts to the zone-level plan to obtain such projections. Each projection includes the information of land-use portfolios and the structural dependencies across spatial grids in terms of a specific urban function. The third stage is to leverage multi-attentions to model the zone-zone peer dependencies of the functionality projections to generate grid-level land-use configurations. Finally, we present extensive experiments to demonstrate the effectiveness of our framework.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Dongjie and Wu, Lingfei and Zhang, Denghui and Zhou, Jingbo and Sun, Leilei and Fu, Yanjie}, year={2023}, month={Jun.}, pages={4660-4667} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25589/25361", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25589", + "pdf_size": 4010569, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16346787086852701477&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "knights.ucf.edu;email.wm.edu;gamil.com;baidu.com;buaa.edu.cn;ucf.edu", + "email": "knights.ucf.edu;email.wm.edu;gamil.com;baidu.com;buaa.edu.cn;ucf.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;0", + "aff_unique_norm": "University of Central Florida;Pinterest;Rutgers University;Baidu;Beihang University", + "aff_unique_dep": ";;;Baidu Research;", + "aff_unique_url": "https://www.ucf.edu;https://www.pinterest.com;https://www.rutgers.edu;https://research.baidu.com;http://www.buaa.edu.cn/", + "aff_unique_abbr": "UCF;Pinterest;Rutgers;Baidu;BUAA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-25747", + "title": "Human-in-the-Loop Vehicle ReID", + "track": "main", + "status": "Technical", + "abstract": "Vehicle ReID has been an active topic in computer vision, with a substantial number of deep neural models proposed as end-to-end solutions. In this paper, we solve the problem from a new perspective and present an interesting variant called human-in-the-loop vehicle ReID to leverage interactive (and possibly wrong) human feedback signal for performance enhancement. Such human-machine cooperation mode is orthogonal to existing ReID models. To avoid incremental training overhead, we propose an Interaction ReID Network (IRIN) that can directly accept the feedback signal as an input and adjust the embedding of query image in an online fashion. IRIN is offline trained by simulating the human interaction process, with multiple optimization strategies to fully exploit the feedback signal. Experimental results show that even by interacting with flawed feedback generated by non-experts, IRIN still outperforms state-of-the-art ReID models by a considerable margin. If the feedback contains no false positive, IRIN boosts the mAP in Veri776 from 81.6% to 95.2% with only 5 rounds of interaction per query image.", + "primary_area": "humans and ai", + "author": "Zepeng Li; Dongxiang Zhang; Yanyan Shen; Gang Chen", + "authorids": "", + "aff": "Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Key Lab of Intelligent Computing Based Big Data of Zhejiang Province, Zhejiang University", + "bibtex": "@article{Li_Zhang_Shen_Chen_2023, title={Human-in-the-Loop Vehicle ReID}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25747}, DOI={10.1609/aaai.v37i5.25747}, abstractNote={Vehicle ReID has been an active topic in computer vision, with a substantial number of deep neural models proposed as end-to-end solutions. In this paper, we solve the problem from a new perspective and present an interesting variant called human-in-the-loop vehicle ReID to leverage interactive (and possibly wrong) human feedback signal for performance enhancement. Such human-machine cooperation mode is orthogonal to existing ReID models. To avoid incremental training overhead, we propose an Interaction ReID Network (IRIN) that can directly accept the feedback signal as an input and adjust the embedding of query image in an online fashion. IRIN is offline trained by simulating the human interaction process, with multiple optimization strategies to fully exploit the feedback signal. Experimental results show that even by interacting with flawed feedback generated by non-experts, IRIN still outperforms state-of-the-art ReID models by a considerable margin. If the feedback contains no false positive, IRIN boosts the mAP in Veri776 from 81.6% to 95.2% with only 5 rounds of interaction per query image.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zepeng and Zhang, Dongxiang and Shen, Yanyan and Chen, Gang}, year={2023}, month={Jun.}, pages={6048-6055} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25747/25519", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25747", + "pdf_size": 3658337, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4483318563503918356&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;sjtu.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;sjtu.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Zhejiang University;Shanghai Jiao Tong University", + "aff_unique_dep": "Key Lab of Intelligent Computing Based Big Data of Zhejiang Province;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.zju.edu.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "ZJU;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25111", + "title": "Hybrid CNN-Transformer Feature Fusion for Single Image Deraining", + "track": "main", + "status": "Technical", + "abstract": "Since rain streaks exhibit diverse geometric appearances and irregular overlapped phenomena, these complex characteristics challenge the design of an effective single image deraining model. To this end, rich local-global information representations are increasingly indispensable for better satisfying rain removal. In this paper, we propose a lightweight Hybrid CNN-Transformer Feature Fusion Network (dubbed as HCT-FFN) in a stage-by-stage progressive manner, which can harmonize these two architectures to help image restoration by leveraging their individual learning strengths. Specifically, we stack a sequence of the degradation-aware mixture of experts (DaMoE) modules in the CNN-based stage, where appropriate local experts adaptively enable the model to emphasize spatially-varying rain distribution features. As for the Transformer-based stage, a background-aware vision Transformer (BaViT) module is employed to complement spatially-long feature dependencies of images, so as to achieve global texture recovery while preserving the required structure. Considering the indeterminate knowledge discrepancy among CNN features and Transformer features, we introduce an interactive fusion branch at adjacent stages to further facilitate the reconstruction of high-quality deraining results. Extensive evaluations show the effectiveness and extensibility of our developed HCT-FFN. The source code is available at https://github.com/cschenxiang/HCT-FFN.", + "primary_area": "computer vision i", + "author": "Xiang Chen; Jinshan Pan; Jiyang Lu; Zhentao Fan; Hao Li", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanjing University of Science and Technology; School of Computer Science and Engineering, Nanjing University of Science and Technology; College of Electronic Information Engineering, Shenyang Aerospace University; College of Electronic Information Engineering, Shenyang Aerospace University; School of Computer Science and Engineering, Nanjing University of Science and Technology", + "bibtex": "@article{Chen_Pan_Lu_Fan_Li_2023, title={Hybrid CNN-Transformer Feature Fusion for Single Image Deraining}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25111}, DOI={10.1609/aaai.v37i1.25111}, abstractNote={Since rain streaks exhibit diverse geometric appearances and irregular overlapped phenomena, these complex characteristics challenge the design of an effective single image deraining model. To this end, rich local-global information representations are increasingly indispensable for better satisfying rain removal. In this paper, we propose a lightweight Hybrid CNN-Transformer Feature Fusion Network (dubbed as HCT-FFN) in a stage-by-stage progressive manner, which can harmonize these two architectures to help image restoration by leveraging their individual learning strengths. Specifically, we stack a sequence of the degradation-aware mixture of experts (DaMoE) modules in the CNN-based stage, where appropriate local experts adaptively enable the model to emphasize spatially-varying rain distribution features. As for the Transformer-based stage, a background-aware vision Transformer (BaViT) module is employed to complement spatially-long feature dependencies of images, so as to achieve global texture recovery while preserving the required structure. Considering the indeterminate knowledge discrepancy among CNN features and Transformer features, we introduce an interactive fusion branch at adjacent stages to further facilitate the reconstruction of high-quality deraining results. Extensive evaluations show the effectiveness and extensibility of our developed HCT-FFN. The source code is available at https://github.com/cschenxiang/HCT-FFN.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xiang and Pan, Jinshan and Lu, Jiyang and Fan, Zhentao and Li, Hao}, year={2023}, month={Jun.}, pages={378-386} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25111/24883", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25111", + "pdf_size": 5521977, + "gs_citation": 59, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16298392267643004171&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "njust.edu.cn;njust.edu.cn;stu.sau.edu.cn;stu.sau.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;stu.sau.edu.cn;stu.sau.edu.cn;njust.edu.cn", + "github": "https://github.com/cschenxiang/HCT-FFN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;1;0", + "aff_unique_norm": "Nanjing University of Science and Technology;Shenyang Aerospace University", + "aff_unique_dep": "School of Computer Science and Engineering;College of Electronic Information Engineering", + "aff_unique_url": "http://www.nust.edu.cn;http://www.syau.edu.cn", + "aff_unique_abbr": "NUST;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25519", + "title": "Hybrid Learning with New Value Function for the Maximum Common Induced Subgraph Problem", + "track": "main", + "status": "Technical", + "abstract": "Maximum Common Induced Subgraph (MCIS) is an important NP-hard problem with wide real-world applications. An efficient class of MCIS algorithms uses Branch-and-Bound (BnB), consisting in successively selecting vertices to match and pruning when it is discovered that a solution better than the best solution found so far does not exist. The method of selecting the vertices to match is essential for the performance of BnB. In this paper, we propose a new value function and a hybrid selection strategy used in reinforcement learning to define a new vertex selection method, and propose a new BnB algorithm, called McSplitDAL, for MCIS. Extensive experiments show that McSplitDAL significantly improves the current best BnB algorithms, McSplit+LL and McSplit+RL. An empirical analysis is also performed to illustrate why the new value function and the hybrid selection strategy are effective.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yanli Liu; Jiming Zhao; Chu-Min Li; Hua Jiang; Kun He", + "authorids": "", + "aff": "School of Science, Wuhan University of Science and Technology, China; School of Science, Wuhan University of Science and Technology, China; MIS, Universit \u00b4e de Picardie Jules Verne, France; Engineering Research Center of Cyberspace &School of Software, Yunnan University, China; School of Computer Science and Technology, Huazhong University of Science and Technology, China", + "bibtex": "@article{Liu_Zhao_Li_Jiang_He_2023, title={Hybrid Learning with New Value Function for the Maximum Common Induced Subgraph Problem}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25519}, DOI={10.1609/aaai.v37i4.25519}, abstractNote={Maximum Common Induced Subgraph (MCIS) is an important NP-hard problem with wide real-world applications. An efficient class of MCIS algorithms uses Branch-and-Bound (BnB), consisting in successively selecting vertices to match and pruning when it is discovered that a solution better than the best solution found so far does not exist. The method of selecting the vertices to match is essential for the performance of BnB. In this paper, we propose a new value function and a hybrid selection strategy used in reinforcement learning to define a new vertex selection method, and propose a new BnB algorithm, called McSplitDAL, for MCIS. Extensive experiments show that McSplitDAL significantly improves the current best BnB algorithms, McSplit+LL and McSplit+RL. An empirical analysis is also performed to illustrate why the new value function and the hybrid selection strategy are effective.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yanli and Zhao, Jiming and Li, Chu-Min and Jiang, Hua and He, Kun}, year={2023}, month={Jun.}, pages={4044-4051} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25519/25291", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25519", + "pdf_size": 797840, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17642276854346903365&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "wust.edu.cn;wust.edu.cn;u-picardie.fr;ynu.edu.cn;hust.edu.cn", + "email": "wust.edu.cn;wust.edu.cn;u-picardie.fr;ynu.edu.cn;hust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;3", + "aff_unique_norm": "Wuhan University of Science and Technology;Universit\u00e9 de Picardie Jules Verne;Yunnan University;Huazhong University of Science and Technology", + "aff_unique_dep": "School of Science;MIS;School of Software;School of Computer Science and Technology", + "aff_unique_url": ";https://www.univ-ji.fr;http://www.ynu.edu.cn;http://www.hust.edu.cn", + "aff_unique_abbr": ";;;HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;France" + }, + { + "id": "article-25333", + "title": "Hybrid Pixel-Unshuffled Network for Lightweight Image Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Convolutional neural network (CNN) has achieved great success on image super-resolution (SR). However, most deep CNN-based SR models take massive computations to obtain high performance. Downsampling features for multi-resolution fusion is an efficient and effective way to improve the performance of visual recognition. Still, it is counter-intuitive in the SR task, which needs to project a low-resolution input to high-resolution. In this paper, we propose a novel Hybrid Pixel-Unshuffled Network (HPUN) by introducing an efficient and effective downsampling module into the SR task. The network contains pixel-unshuffled downsampling and Self-Residual Depthwise Separable Convolutions. Specifically, we utilize pixel-unshuffle operation to downsample the input features and use grouped convolution to reduce the channels. Besides, we enhance the depthwise convolution's performance by adding the input feature to its output. The comparison findings demonstrate that, with fewer parameters and computational costs, our HPUN achieves and surpasses the state-of-the-art performance on SISR. All results are provided in the github https://github.com/Sun1992/HPUN.", + "primary_area": "computer vision ii", + "author": "Bin Sun; Yulun Zhang; Songyao Jiang; Yun Fu", + "authorids": "", + "aff": "Northeastern University, Boston, MA, USA+AInnovation Labs Inc., Boston, MA, USA; ETH Z\u00fcrich, Z\u00fcrich, Switzerland; Northeastern University, Boston, MA, USA; Northeastern University, Boston, MA, USA+AInnovation Labs Inc., Boston, MA, USA", + "bibtex": "@article{Sun_Zhang_Jiang_Fu_2023, title={Hybrid Pixel-Unshuffled Network for Lightweight Image Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25333}, DOI={10.1609/aaai.v37i2.25333}, abstractNote={Convolutional neural network (CNN) has achieved great success on image super-resolution (SR). However, most deep CNN-based SR models take massive computations to obtain high performance. Downsampling features for multi-resolution fusion is an efficient and effective way to improve the performance of visual recognition. Still, it is counter-intuitive in the SR task, which needs to project a low-resolution input to high-resolution. In this paper, we propose a novel Hybrid Pixel-Unshuffled Network (HPUN) by introducing an efficient and effective downsampling module into the SR task. The network contains pixel-unshuffled downsampling and Self-Residual Depthwise Separable Convolutions. Specifically, we utilize pixel-unshuffle operation to downsample the input features and use grouped convolution to reduce the channels. Besides, we enhance the depthwise convolution\u2019s performance by adding the input feature to its output. The comparison findings demonstrate that, with fewer parameters and computational costs, our HPUN achieves and surpasses the state-of-the-art performance on SISR. All results are provided in the github https://github.com/Sun1992/HPUN.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Bin and Zhang, Yulun and Jiang, Songyao and Fu, Yun}, year={2023}, month={Jun.}, pages={2375-2383} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25333/25105", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25333", + "pdf_size": 827266, + "gs_citation": 82, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4550287002514014844&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "northeastern.edu;gmail.com;gmail.com;ece.neu.edu", + "email": "northeastern.edu;gmail.com;gmail.com;ece.neu.edu", + "github": "https://github.com/Sun1992/HPUN", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;0+1", + "aff_unique_norm": "Northeastern University;AInnovation Labs Inc.;ETH Z\u00fcrich", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.northeastern.edu;;https://www.ethz.ch", + "aff_unique_abbr": "NEU;;ETHZ", + "aff_campus_unique_index": "0+0;1;0;0+0", + "aff_campus_unique": "Boston;Z\u00fcrich", + "aff_country_unique_index": "0+0;1;0;0+0", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "article-25240", + "title": "HybridCap: Inertia-Aid Monocular Capture of Challenging Human Motions", + "track": "main", + "status": "Technical", + "abstract": "Monocular 3D motion capture (mocap) is beneficial to many applications. The use of a single camera, however, often fails to handle occlusions of different body parts and hence it is limited to capture relatively simple movements. We present a light-weight, hybrid mocap technique called HybridCap that augments the camera with only 4 Inertial Measurement Units (IMUs) in a novel learning-and-optimization framework. We first employ a weakly-supervised and hierarchical motion inference module based on cooperative pure residual recurrent blocks that serve as limb, body and root trackers as well as an inverse kinematics solver. Our network effectively narrows the search space of plausible motions via coarse-to-fine pose estimation and manages to tackle challenging movements with high efficiency. We further develop a hybrid optimization scheme that combines inertial feedback and visual cues to improve tracking accuracy. Extensive experiments on various datasets demonstrate HybridCap can robustly handle challenging movements ranging from fitness actions to Latin dance. It also achieves real-time performance up to 60 fps with state-of-the-art accuracy.", + "primary_area": "computer vision ii", + "author": "Han Liang; Yannan He; Chengfeng Zhao; Mutian Li; Jingya Wang; Jingyi Yu; Lan Xu", + "authorids": "", + "aff": "School of Information Science and Technology, ShanghaiTech University; School of Information Science and Technology, ShanghaiTech University; School of Information Science and Technology, ShanghaiTech University; School of Information Science and Technology, ShanghaiTech University; School of Information Science and Technology, ShanghaiTech University + Shanghai Frontiers Science Center of Human-centered Artificial Intelligence; School of Information Science and Technology, ShanghaiTech University + Shanghai Frontiers Science Center of Human-centered Artificial Intelligence; School of Information Science and Technology, ShanghaiTech University + Shanghai Frontiers Science Center of Human-centered Artificial Intelligence", + "bibtex": "@article{Liang_He_Zhao_Li_Wang_Yu_Xu_2023, title={HybridCap: Inertia-Aid Monocular Capture of Challenging Human Motions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25240}, DOI={10.1609/aaai.v37i2.25240}, abstractNote={Monocular 3D motion capture (mocap) is beneficial to many applications. The use of a single camera, however, often fails to handle occlusions of different body parts and hence it is limited to capture relatively simple movements. We present a light-weight, hybrid mocap technique called HybridCap that augments the camera with only 4 Inertial Measurement Units (IMUs) in a novel learning-and-optimization framework. We first employ a weakly-supervised and hierarchical motion inference module based on cooperative pure residual recurrent blocks that serve as limb, body and root trackers as well as an inverse kinematics solver. Our network effectively narrows the search space of plausible motions via coarse-to-fine pose estimation and manages to tackle challenging movements with high efficiency. We further develop a hybrid optimization scheme that combines inertial feedback and visual cues to improve tracking accuracy. Extensive experiments on various datasets demonstrate HybridCap can robustly handle challenging movements ranging from fitness actions to Latin dance. It also achieves real-time performance up to 60 fps with state-of-the-art accuracy.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Han and He, Yannan and Zhao, Chengfeng and Li, Mutian and Wang, Jingya and Yu, Jingyi and Xu, Lan}, year={2023}, month={Jun.}, pages={1539-1548} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25240/25012", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25240", + "pdf_size": 19899394, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16933138477544226404&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0+1;0+1;0+1", + "aff_unique_norm": "ShanghaiTech University;Shanghai Frontiers Science Center", + "aff_unique_dep": "School of Information Science and Technology;Human-centered Artificial Intelligence", + "aff_unique_url": "https://www.shanghaitech.edu.cn;http://www.sfsc.ustc.edu.cn", + "aff_unique_abbr": "ShanghaiTech;", + "aff_campus_unique_index": "0;0;0;0;0+0;0+0;0+0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26569", + "title": "HybridPrompt: Bridging Language Models and Human Priors in Prompt Tuning for Visual Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Visual Question Answering (VQA) aims to answer the natural language question about a given image by understanding multimodal content. However, the answer quality of most existing visual-language pre-training (VLP) methods is still limited, mainly due to: (1) Incompatibility. Upstream pre-training tasks are generally incompatible with downstream question answering tasks, which makes the knowledge from the language model not well transferable to downstream tasks, and greatly limits their performance in few-shot scenarios; (2) Under-fitting. They generally do not integrate human priors to compensate for universal knowledge from language models, so as to fit the challenging VQA problem and generate reliable answers. To address these issues, we propose HybridPrompt, a cloze- and verify-style hybrid prompt framework with bridging language models and human priors in prompt tuning for VQA. Specifically, we first modify the input questions into the cloze-style prompts to narrow the gap between upstream pre-training tasks and downstream VQA task, which ensures that the universal knowledge in the language model can be better transferred to subsequent human prior-guided prompt tuning. Then, we imitate the cognitive process of human brain to introduce topic and sample related priors to construct a dynamic learnable prompt template for human prior-guided prompt learning. Finally, we add fixed-length learnable free-parameters to further enhance the generalizability and scalability of prompt learning in the VQA model. Experimental results verify the effectiveness of HybridPrompt, showing that it achieves competitive performance against previous methods on widely-used VQAv2 dataset and obtains new state-of-the-art results. Our code is released at: https://github.com/zhizhi111/hybrid.", + "primary_area": "speech natural language processing", + "author": "Zhiyuan Ma; Zhihuan Yu; Jianjun Li; Guohui Li", + "authorids": "", + "aff": "Huazhong University of Science and Technology (HUST), China; Huazhong University of Science and Technology (HUST), China; Huazhong University of Science and Technology (HUST), China; Huazhong University of Science and Technology (HUST), China", + "bibtex": "@article{Ma_Yu_Li_Li_2023, title={HybridPrompt: Bridging Language Models and Human Priors in Prompt Tuning for Visual Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26569}, DOI={10.1609/aaai.v37i11.26569}, abstractNote={Visual Question Answering (VQA) aims to answer the natural language question about a given image by understanding multimodal content. However, the answer quality of most existing visual-language pre-training (VLP) methods is still limited, mainly due to: (1) Incompatibility. Upstream pre-training tasks are generally incompatible with downstream question answering tasks, which makes the knowledge from the language model not well transferable to downstream tasks, and greatly limits their performance in few-shot scenarios; (2) Under-fitting. They generally do not integrate human priors to compensate for universal knowledge from language models, so as to fit the challenging VQA problem and generate reliable answers. To address these issues, we propose HybridPrompt, a cloze- and verify-style hybrid prompt framework with bridging language models and human priors in prompt tuning for VQA. Specifically, we first modify the input questions into the cloze-style prompts to narrow the gap between upstream pre-training tasks and downstream VQA task, which ensures that the universal knowledge in the language model can be better transferred to subsequent human prior-guided prompt tuning. Then, we imitate the cognitive process of human brain to introduce topic and sample related priors to construct a dynamic learnable prompt template for human prior-guided prompt learning. Finally, we add fixed-length learnable free-parameters to further enhance the generalizability and scalability of prompt learning in the VQA model. Experimental results verify the effectiveness of HybridPrompt, showing that it achieves competitive performance against previous methods on widely-used VQAv2 dataset and obtains new state-of-the-art results. Our code is released at: https://github.com/zhizhi111/hybrid.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Zhiyuan and Yu, Zhihuan and Li, Jianjun and Li, Guohui}, year={2023}, month={Jun.}, pages={13371-13379} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26569/26341", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26569", + "pdf_size": 2310656, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9039047381424543143&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/zhizhi111/hybrid", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26097", + "title": "HyperJump: Accelerating HyperBand via Risk Modelling", + "track": "main", + "status": "Technical", + "abstract": "In the literature on hyper-parameter tuning, a number of recent solutions rely on low-fidelity observations (e.g., training with sub-sampled datasets) to identify promising configurations to be tested via high-fidelity observations (e.g., using the full dataset). Among these, HyperBand is arguably one of the most popular solutions, due to its efficiency and theoretically provable robustness. In this work, we introduce HyperJump, a new approach that builds on HyperBand\u2019s robust search strategy and complements it with novel model-based risk analysis techniques that accelerate the search by skipping the evaluation of low risk configurations, i.e., configurations that are likely to be eventually discarded by HyperBand. We evaluate HyperJump on a suite of hyper-parameter optimization problems and show that it provides over one-order of magnitude speed-ups, both in sequential and parallel deployments, on a variety of deep-learning, kernel-based learning and neural architectural search problems when compared to HyperBand and to several state-of-the-art optimizers.", + "primary_area": "machine learning iii", + "author": "Pedro Mendes; Maria Casimiro; Paolo Romano; David Garlan", + "authorids": "", + "aff": "INESC-ID and Instituto Superior T \u00b4ecnico, Universidade de Lisboa+Software and Societal Systems Department, Carnegie Mellon University; INESC-ID and Instituto Superior T \u00b4ecnico, Universidade de Lisboa+Software and Societal Systems Department, Carnegie Mellon University; INESC-ID and Instituto Superior T \u00b4ecnico, Universidade de Lisboa; Software and Societal Systems Department, Carnegie Mellon University", + "bibtex": "@article{Mendes_Casimiro_Romano_Garlan_2023, title={HyperJump: Accelerating HyperBand via Risk Modelling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26097}, DOI={10.1609/aaai.v37i8.26097}, abstractNote={In the literature on hyper-parameter tuning, a number of recent solutions rely on low-fidelity observations (e.g., training with sub-sampled datasets) to identify promising configurations to be tested via high-fidelity observations (e.g., using the full dataset). Among these, HyperBand is arguably one of the most popular solutions, due to its efficiency and theoretically provable robustness. In this work, we introduce HyperJump, a new approach that builds on HyperBand\u2019s robust search strategy and complements it with novel model-based risk analysis techniques that accelerate the search by skipping the evaluation of low risk configurations, i.e., configurations that are likely to be eventually discarded by HyperBand. We evaluate HyperJump on a suite of hyper-parameter optimization problems and show that it provides over one-order of magnitude speed-ups, both in sequential and parallel deployments, on a variety of deep-learning, kernel-based learning and neural architectural search problems when compared to HyperBand and to several state-of-the-art optimizers.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mendes, Pedro and Casimiro, Maria and Romano, Paolo and Garlan, David}, year={2023}, month={Jun.}, pages={9143-9152} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26097/25869", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26097", + "pdf_size": 2551706, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=855255415074292022&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;inesc-id.pt;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;inesc-id.pt;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;1", + "aff_unique_norm": "Universidade de Lisboa;Carnegie Mellon University", + "aff_unique_dep": "INESC-ID, Instituto Superior T\u00e9cnico;Software and Societal Systems Department", + "aff_unique_url": "https://www.ulusiada.pt;https://www.cmu.edu", + "aff_unique_abbr": "UL;CMU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0;1", + "aff_country_unique": "Portugal;United States" + }, + { + "id": "article-26146", + "title": "Hypernetworks for Zero-Shot Transfer in Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In this paper, hypernetworks are trained to generate behaviors across a range of unseen task conditions, via a novel TD-based training objective and data from a set of near-optimal RL solutions for training tasks. This work relates to meta RL, contextual RL, and transfer learning, with a particular focus on zero-shot performance at test time, enabled by knowledge of the task parameters (also known as context). Our technical approach is based upon viewing each RL algorithm as a mapping from the MDP specifics to the near-optimal value function and policy and seek to approximate it with a hypernetwork that can generate near-optimal value functions and policies, given the parameters of the MDP. We show that, under certain conditions, this mapping can be considered as a supervised learning problem. We empirically evaluate the effectiveness of our method for zero-shot transfer to new reward and transition dynamics on a series of continuous control tasks from DeepMind Control Suite. Our method demonstrates significant improvements over baselines from multitask and meta RL approaches.", + "primary_area": "machine learning iii", + "author": "Sahand Rezaei-Shoshtari; Charlotte Morissette; Francois R. Hogan; Gregory Dudek; David Meger", + "authorids": "", + "aff": "McGill University+Mila - Qu \u00b4ebec AI Institute+Samsung AI Center Montreal; McGill University+Mila - Qu \u00b4ebec AI Institute+Samsung AI Center Montreal; Samsung AI Center Montreal; McGill University+Mila - Qu \u00b4ebec AI Institute+Samsung AI Center Montreal; McGill University+Mila - Qu \u00b4ebec AI Institute+Samsung AI Center Montreal", + "bibtex": "@article{Rezaei-Shoshtari_Morissette_Hogan_Dudek_Meger_2023, title={Hypernetworks for Zero-Shot Transfer in Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26146}, DOI={10.1609/aaai.v37i8.26146}, abstractNote={In this paper, hypernetworks are trained to generate behaviors across a range of unseen task conditions, via a novel TD-based training objective and data from a set of near-optimal RL solutions for training tasks. This work relates to meta RL, contextual RL, and transfer learning, with a particular focus on zero-shot performance at test time, enabled by knowledge of the task parameters (also known as context). Our technical approach is based upon viewing each RL algorithm as a mapping from the MDP specifics to the near-optimal value function and policy and seek to approximate it with a hypernetwork that can generate near-optimal value functions and policies, given the parameters of the MDP. We show that, under certain conditions, this mapping can be considered as a supervised learning problem. We empirically evaluate the effectiveness of our method for zero-shot transfer to new reward and transition dynamics on a series of continuous control tasks from DeepMind Control Suite. Our method demonstrates significant improvements over baselines from multitask and meta RL approaches.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rezaei-Shoshtari, Sahand and Morissette, Charlotte and Hogan, Francois R. and Dudek, Gregory and Meger, David}, year={2023}, month={Jun.}, pages={9579-9587} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26146/25918", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26146", + "pdf_size": 1314143, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15859417980958071938&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "cim.mcgill.ca; ; ; ; ", + "email": "cim.mcgill.ca; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;0+1+2;2;0+1+2;0+1+2", + "aff_unique_norm": "McGill University;Mila - Quebec AI Institute;Samsung AI Center", + "aff_unique_dep": ";AI Institute;AI Center", + "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec;https://www.samsung.com/global/innovation/ai-research-centers/", + "aff_unique_abbr": "McGill;Mila;Samsung AI", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0+0;0+0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25251", + "title": "Hypotheses Tree Building for One-Shot Temporal Sentence Localization", + "track": "main", + "status": "Technical", + "abstract": "Given an untrimmed video, temporal sentence localization (TSL) aims to localize a specific segment according to a given sentence query. Though respectable works have made decent achievements in this task, they severely rely on dense video frame annotations, which require a tremendous amount of human effort to collect. In this paper, we target another more practical and challenging setting: one-shot temporal sentence localization (one-shot TSL), which learns to retrieve the query information among the entire video with only one annotated frame. Particularly, we propose an effective and novel tree-structure baseline for one-shot TSL, called Multiple Hypotheses Segment Tree (MHST), to capture the query-aware discriminative frame-wise information under the insufficient annotations. Each video frame is taken as the leaf-node, and the adjacent frames sharing the same visual-linguistic semantics will be merged into the upper non-leaf node for tree building. At last, each root node is an individual segment hypothesis containing the consecutive frames of its leaf-nodes. During the tree construction, we also introduce a pruning strategy to eliminate the interference of query-irrelevant nodes. With our designed self-supervised loss functions, our MHST is able to generate high-quality segment hypotheses for ranking and selection with the query. Experiments on two challenging datasets demonstrate that MHST achieves competitive performance compared to existing methods.", + "primary_area": "computer vision ii", + "author": "Daizong Liu; Xiang Fang; Pan Zhou; Xing Di; Weining Lu; Yu Cheng", + "authorids": "", + "aff": "School of Cyber Science and Engineering, Huazhong University of Science and Technology + Wangxuan Institute of Computer Technology, Peking University; Nanyang Technological University; School of Cyber Science and Engineering, Huazhong University of Science and Technology; ProtagoLabs Inc; Tsinghua University; Microsoft Research", + "bibtex": "@article{Liu_Fang_Zhou_Di_Lu_Cheng_2023, title={Hypotheses Tree Building for One-Shot Temporal Sentence Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25251}, DOI={10.1609/aaai.v37i2.25251}, abstractNote={Given an untrimmed video, temporal sentence localization (TSL) aims to localize a specific segment according to a given sentence query. Though respectable works have made decent achievements in this task, they severely rely on dense video frame annotations, which require a tremendous amount of human effort to collect. In this paper, we target another more practical and challenging setting: one-shot temporal sentence localization (one-shot TSL), which learns to retrieve the query information among the entire video with only one annotated frame. Particularly, we propose an effective and novel tree-structure baseline for one-shot TSL, called Multiple Hypotheses Segment Tree (MHST), to capture the query-aware discriminative frame-wise information under the insufficient annotations. Each video frame is taken as the leaf-node, and the adjacent frames sharing the same visual-linguistic semantics will be merged into the upper non-leaf node for tree building. At last, each root node is an individual segment hypothesis containing the consecutive frames of its leaf-nodes. During the tree construction, we also introduce a pruning strategy to eliminate the interference of query-irrelevant nodes. With our designed self-supervised loss functions, our MHST is able to generate high-quality segment hypotheses for ranking and selection with the query. Experiments on two challenging datasets demonstrate that MHST achieves competitive performance compared to existing methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Daizong and Fang, Xiang and Zhou, Pan and Di, Xing and Lu, Weining and Cheng, Yu}, year={2023}, month={Jun.}, pages={1640-1648} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25251/25023", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25251", + "pdf_size": 994980, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=266855324827856509&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "hust.edu.cn;gmail.com;hust.edu.cn;protagolabs.com;tsinghua.edu.cn;microsoft.com", + "email": "hust.edu.cn;gmail.com;hust.edu.cn;protagolabs.com;tsinghua.edu.cn;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;0;3;4;5", + "aff_unique_norm": "Huazhong University of Science and Technology;Peking University;Nanyang Technological University;ProtagoLabs;Tsinghua University;Microsoft Corporation", + "aff_unique_dep": "School of Cyber Science and Engineering;Wangxuan Institute of Computer Technology;;;;Microsoft Research", + "aff_unique_url": "http://www.hust.edu.cn;http://www.pku.edu.cn;https://www.ntu.edu.sg;;https://www.tsinghua.edu.cn;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "HUST;PKU;NTU;;THU;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;0;0;3", + "aff_country_unique": "China;Singapore;;United States" + }, + { + "id": "article-25454", + "title": "IKOL: Inverse Kinematics Optimization Layer for 3D Human Pose and Shape Estimation via Gauss-Newton Differentiation", + "track": "main", + "status": "Technical", + "abstract": "This paper presents an inverse kinematic optimization layer (IKOL) for 3D human pose and shape estimation that leverages the strength of both optimization- and regression-based methods within an end-to-end framework. IKOL involves a nonconvex optimization that establishes an implicit mapping from an image\u2019s 3D keypoints and body shapes to the relative body-part rotations. The 3D keypoints and the body shapes are the inputs and the relative body-part rotations are the solutions. However, this procedure is implicit and hard to make differentiable. So, to overcome this issue, we designed a Gauss-Newton differentiation (GN-Diff) procedure to differentiate IKOL. GN-Diff iteratively linearizes the nonconvex objective function to obtain Gauss-Newton directions with closed form solutions. Then, an automatic differentiation procedure is directly applied to generate a Jacobian matrix for end-to-end training. Notably, the GN-Diff procedure works fast because it does not rely on a time-consuming implicit differentiation procedure. The twist rotation and shape parameters are learned from the neural networks and, as a result, IKOL has a much lower computational overhead than most existing optimization-based methods. Additionally, compared to existing regression-based methods, IKOL provides a more accurate mesh-image correspondence. This is because it iteratively reduces the distance between the keypoints and also enhances the reliability of the pose structures. Extensive experiments demonstrate the superiority of our proposed framework over a wide range of 3D human pose and shape estimation methods. Code is available at https://github.com/Juzezhang/IKOL", + "primary_area": "computer vision iii", + "author": "Juze Zhang; Ye Shi; Yuexin Ma; Lan Xu; Jingyi Yu; Jingya Wang", + "authorids": "", + "aff": "ShanghaiTech University + Shanghai Advanced Research Institute, Chinese Academy of Sciences + University of Chinese Academy of Sciences + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "bibtex": "@article{Zhang_Shi_Ma_Xu_Yu_Wang_2023, title={IKOL: Inverse Kinematics Optimization Layer for 3D Human Pose and Shape Estimation via Gauss-Newton Differentiation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25454}, DOI={10.1609/aaai.v37i3.25454}, abstractNote={This paper presents an inverse kinematic optimization layer (IKOL) for 3D human pose and shape estimation that leverages the strength of both optimization- and regression-based methods within an end-to-end framework. IKOL involves a nonconvex optimization that establishes an implicit mapping from an image\u2019s 3D keypoints and body shapes to the relative body-part rotations. The 3D keypoints and the body shapes are the inputs and the relative body-part rotations are the solutions. However, this procedure is implicit and hard to make differentiable. So, to overcome this issue, we designed a Gauss-Newton differentiation (GN-Diff) procedure to differentiate IKOL. GN-Diff iteratively linearizes the nonconvex objective function to obtain Gauss-Newton directions with closed form solutions. Then, an automatic differentiation procedure is directly applied to generate a Jacobian matrix for end-to-end training. Notably, the GN-Diff procedure works fast because it does not rely on a time-consuming implicit differentiation procedure. The twist rotation and shape parameters are learned from the neural networks and, as a result, IKOL has a much lower computational overhead than most existing optimization-based methods. Additionally, compared to existing regression-based methods, IKOL provides a more accurate mesh-image correspondence. This is because it iteratively reduces the distance between the keypoints and also enhances the reliability of the pose structures. Extensive experiments demonstrate the superiority of our proposed framework over a wide range of 3D human pose and shape estimation methods. Code is available at https://github.com/Juzezhang/IKOL}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Juze and Shi, Ye and Ma, Yuexin and Xu, Lan and Yu, Jingyi and Wang, Jingya}, year={2023}, month={Jun.}, pages={3454-3462} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25454/25226", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25454", + "pdf_size": 2388545, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11774735750458977950&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "github": "https://github.com/Juzezhang/IKOL", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2+3;0+3;0+3;0+3;0+3;0+3", + "aff_unique_norm": "ShanghaiTech University;Chinese Academy of Sciences;University of Chinese Academy of Sciences;Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_unique_dep": ";Shanghai Advanced Research Institute;;", + "aff_unique_url": "https://www.shanghaitech.edu.cn;http://www.sari.cas.cn;http://www.ucas.ac.cn;", + "aff_unique_abbr": "ShanghaiTech;SARI;UCAS;", + "aff_campus_unique_index": "1;;;;;", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0+0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26358", + "title": "ILSGAN: Independent Layer Synthesis for Unsupervised Foreground-Background Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised foreground-background segmentation aims at extracting salient objects from cluttered backgrounds, where Generative Adversarial Network (GAN) approaches, especially layered GANs, show great promise. However, without human annotations, they are typically prone to produce foreground and background layers with non-negligible semantic and visual confusion, dubbed \"information leakage\", resulting in notable degeneration of the generated segmentation mask. To alleviate this issue, we propose a simple-yet-effective explicit layer independence modeling approach, termed Independent Layer Synthesis GAN (ILSGAN), pursuing independent foreground-background layer generation by encouraging their discrepancy. Specifically, it targets minimizing the mutual information between visible and invisible regions of the foreground and background to spur interlayer independence. Through in-depth theoretical and experimental analyses, we justify that explicit layer independence modeling is critical to suppressing information leakage and contributes to impressive segmentation performance gains. Also, our ILSGAN achieves strong state-of-the-art generation quality and segmentation performance on complex real-world data.", + "primary_area": "machine learning iv", + "author": "Qiran Zou; Yu Yang; Wing Yin Cheung; Chang Liu; Xiangyang Ji", + "authorids": "", + "aff": "Tsinghua University, BNRist; Tsinghua University, BNRist; Tsinghua University, BNRist; Tsinghua University, BNRist; Tsinghua University, BNRist", + "bibtex": "@article{Zou_Yang_Cheung_Liu_Ji_2023, title={ILSGAN: Independent Layer Synthesis for Unsupervised Foreground-Background Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26358}, DOI={10.1609/aaai.v37i9.26358}, abstractNote={Unsupervised foreground-background segmentation aims at extracting salient objects from cluttered backgrounds, where Generative Adversarial Network (GAN) approaches, especially layered GANs, show great promise. However, without human annotations, they are typically prone to produce foreground and background layers with non-negligible semantic and visual confusion, dubbed "information leakage", resulting in notable degeneration of the generated segmentation mask. To alleviate this issue, we propose a simple-yet-effective explicit layer independence modeling approach, termed Independent Layer Synthesis GAN (ILSGAN), pursuing independent foreground-background layer generation by encouraging their discrepancy. Specifically, it targets minimizing the mutual information between visible and invisible regions of the foreground and background to spur interlayer independence. Through in-depth theoretical and experimental analyses, we justify that explicit layer independence modeling is critical to suppressing information leakage and contributes to impressive segmentation performance gains. Also, our ILSGAN achieves strong state-of-the-art generation quality and segmentation performance on complex real-world data.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zou, Qiran and Yang, Yu and Cheung, Wing Yin and Liu, Chang and Ji, Xiangyang}, year={2023}, month={Jun.}, pages={11488-11496} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26358/26130", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26358", + "pdf_size": 5706087, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7816938871898083353&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;foxmail.com;mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "gmail.com;foxmail.com;mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "BNRist", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26942", + "title": "IdProv: Identity-Based Provenance for Synthetic Image Generation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent advancements in Generative Adversarial Networks (GANs) have made it possible to obtain high-quality face images of synthetic identities. These networks see large amounts of real faces in order to learn to generate realistic looking synthetic images. However, the concept of a synthetic identity for these images is not very well-defined. In this work, we verify identity leakage from the training set containing real images into the latent space and propose a novel method, IdProv, that uses image composition to trace the source of identity signals in the generated image.", + "primary_area": "", + "author": "Harshil Bhatia; Jaisidh Singh; Gaurav Sangwan; Aparna Bharati; Richa Singh; Mayank Vatsa", + "authorids": "", + "aff": "IIT Jodhpur, India; IIT Jodhpur, India; IIT Jodhpur, India; Lehigh University, USA; IIT Jodhpur, India; IIT Jodhpur, India", + "bibtex": "@article{Bhatia_Singh_Sangwan_Bharati_Singh_Vatsa_2024, title={IdProv: Identity-Based Provenance for Synthetic Image Generation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26942}, DOI={10.1609/aaai.v37i13.26942}, abstractNote={Recent advancements in Generative Adversarial Networks (GANs) have made it possible to obtain high-quality face images of synthetic identities. These networks see large amounts of real faces in order to learn to generate realistic looking synthetic images. However, the concept of a synthetic identity for these images is not very well-defined. In this work, we verify identity leakage from the training set containing real images into the latent space and propose a novel method, IdProv, that uses image composition to trace the source of identity signals in the generated image.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bhatia, Harshil and Singh, Jaisidh and Sangwan, Gaurav and Bharati, Aparna and Singh, Richa and Vatsa, Mayank}, year={2024}, month={Jul.}, pages={16164-16165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26942/26714", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26942", + "pdf_size": 1243651, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10809308446246028749&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iitj.ac.in;iitj.ac.in;iitj.ac.in;lehigh.edu;iitj.ac.in;iitj.ac.in", + "email": "iitj.ac.in;iitj.ac.in;iitj.ac.in;lehigh.edu;iitj.ac.in;iitj.ac.in", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Indian Institute of Technology Jodhpur;Lehigh University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitj.ac.in;https://www.lehigh.edu", + "aff_unique_abbr": "IIT Jodhpur;Lehigh", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-26442", + "title": "Identification and Estimation of the Probabilities of Potential Outcome Types Using Covariate Information in Studies with Non-compliance", + "track": "main", + "status": "Technical", + "abstract": "We propose novel identification conditions and a statistical estimation method for the probabilities of potential outcome types using covariate information in randomized trials in which the treatment assignment is randomized but subject compliance is not perfect. Different from existing studies, the proposed identification conditions do not require strict assumptions such as the assumption of monotonicity. When the probabilities of potential outcome types are identifiable through the proposed conditions, the problem of estimating the probabilities of potential outcome types is reduced to that of singular models. Thus, the probabilities cannot be evaluated using standard statistical likelihood-based estimation methods. Rather, the proposed identification conditions show that we can derive consistent estimators of the probabilities of potential outcome types via the method of moments, which leads to the asymptotic normality of the proposed estimators through the delta method under regular conditions. We also propose a new statistical estimation method based on the bounded constrained augmented Lagrangian method to derive more efficient estimators than can be derived through the method of moments.", + "primary_area": "reasoning under uncertainty", + "author": "Yuta Kawakami; Ryusei Shingaki; Manabu Kuroki", + "authorids": "", + "aff": "Department of Mathematics, Physics, Electrical Engineering and Computer Science, Graduate School of Engineering Science, Yokohama National University, Yokohama, Japan; Department of Mathematics, Physics, Electrical Engineering and Computer Science, Graduate School of Engineering Science, Yokohama National University, Yokohama, Japan; Department of Mathematics, Physics, Electrical Engineering and Computer Science, Graduate School of Engineering Science, Yokohama National University, Yokohama, Japan", + "bibtex": "@article{Kawakami_Shingaki_Kuroki_2023, title={Identification and Estimation of the Probabilities of Potential Outcome Types Using Covariate Information in Studies with Non-compliance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26442}, DOI={10.1609/aaai.v37i10.26442}, abstractNote={We propose novel identification conditions and a statistical estimation method for the probabilities of potential outcome types using covariate information in randomized trials in which the treatment assignment is randomized but subject compliance is not perfect. Different from existing studies, the proposed identification conditions do not require strict assumptions such as the assumption of monotonicity. When the probabilities of potential outcome types are identifiable through the proposed conditions, the problem of estimating the probabilities of potential outcome types is reduced to that of singular models. Thus, the probabilities cannot be evaluated using standard statistical likelihood-based estimation methods. Rather, the proposed identification conditions show that we can derive consistent estimators of the probabilities of potential outcome types via the method of moments, which leads to the asymptotic normality of the proposed estimators through the delta method under regular conditions. We also propose a new statistical estimation method based on the bounded constrained augmented Lagrangian method to derive more efficient estimators than can be derived through the method of moments.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kawakami, Yuta and Shingaki, Ryusei and Kuroki, Manabu}, year={2023}, month={Jun.}, pages={12234-12242} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26442/26214", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26442", + "pdf_size": 245198, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15012849803378924355&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "ynu.jp; ; ", + "email": "ynu.jp; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Yokohama National University", + "aff_unique_dep": "Department of Mathematics, Physics, Electrical Engineering and Computer Science, Graduate School of Engineering Science", + "aff_unique_url": "https://www.yokohama-nu.ac.jp", + "aff_unique_abbr": "YNU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Yokohama", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26610", + "title": "Identify Event Causality with Knowledge and Analogy", + "track": "main", + "status": "Technical", + "abstract": "Event causality identification (ECI) aims to identify the\ncausal relationship between events, which plays a crucial role\nin deep text understanding. Due to the diversity of real-world\ncausality events and difficulty in obtaining sufficient training\ndata, existing ECI approaches have poor generalizability and\nstruggle to identify the relation between seldom seen events.\nIn this paper, we propose to utilize both external knowledge\nand internal analogy to improve ECI. On the one hand, we\nutilize a commonsense knowledge graph called ConceptNet\nto enrich the description of an event sample and reveal the\ncommonalities or associations between different events. On\nthe other hand, we retrieve similar events as analogy exam-\nples and glean useful experiences from such analogous neigh-\nbors to better identify the relationship between a new event\npair. By better understanding different events through exter-\nnal knowledge and making an analogy with similar events, we\ncan alleviate the data sparsity issue and improve model gener-\nalizability. Extensive evaluations on two benchmark datasets\nshow that our model outperforms other baseline methods by\naround 18% on the F1-value on average", + "primary_area": "speech natural language processing", + "author": "Sifan Wu; Ruihui Zhao; Yefeng Zheng; Jian Pei; Bang Liu", + "authorids": "", + "aff": "RALI & Mila, University of Montreal; Tencent Jarvis Lab; Tencent Jarvis Lab; Duke University; RALI & Mila, University of Montreal", + "bibtex": "@article{Wu_Zhao_Zheng_Pei_Liu_2023, title={Identify Event Causality with Knowledge and Analogy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26610}, DOI={10.1609/aaai.v37i11.26610}, abstractNote={Event causality identification (ECI) aims to identify the\ncausal relationship between events, which plays a crucial role\nin deep text understanding. Due to the diversity of real-world\ncausality events and difficulty in obtaining sufficient training\ndata, existing ECI approaches have poor generalizability and\nstruggle to identify the relation between seldom seen events.\nIn this paper, we propose to utilize both external knowledge\nand internal analogy to improve ECI. On the one hand, we\nutilize a commonsense knowledge graph called ConceptNet\nto enrich the description of an event sample and reveal the\ncommonalities or associations between different events. On\nthe other hand, we retrieve similar events as analogy exam-\nples and glean useful experiences from such analogous neigh-\nbors to better identify the relationship between a new event\npair. By better understanding different events through exter-\nnal knowledge and making an analogy with similar events, we\ncan alleviate the data sparsity issue and improve model gener-\nalizability. Extensive evaluations on two benchmark datasets\nshow that our model outperforms other baseline methods by\naround 18% on the F1-value on average}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Sifan and Zhao, Ruihui and Zheng, Yefeng and Pei, Jian and Liu, Bang}, year={2023}, month={Jun.}, pages={13745-13753} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26610/26382", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26610", + "pdf_size": 993371, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12197119034022850459&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "umontreal.ca;ruri.waseda.jp;tencent.com;duke.edu;umontreal.ca", + "email": "umontreal.ca;ruri.waseda.jp;tencent.com;duke.edu;umontreal.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;0", + "aff_unique_norm": "University of Montreal;Tencent;Duke University", + "aff_unique_dep": "RALI & Mila;Jarvis Lab;", + "aff_unique_url": "https://www.mila.quebec;https://www.tencent.com;https://www.duke.edu", + "aff_unique_abbr": "UdeM;Tencent;Duke", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Montreal;", + "aff_country_unique_index": "0;1;1;2;0", + "aff_country_unique": "Canada;China;United States" + }, + { + "id": "article-25987", + "title": "Identifying Selection Bias from Observational Data", + "track": "main", + "status": "Technical", + "abstract": "Access to a representative sample from the population is an assumption that underpins all of machine learning. Selection effects can cause observations to instead come from a subpopulation, by which our inferences may be subject to bias. \nIt is therefore important to know whether or not a sample is affected by selection effects. We study under which conditions we can identify selection bias and give results for both parametric and non-parametric families of distributions. Based on these results we develop two practical methods to determine whether or not an observed sample comes from a distribution subject to selection bias. Through extensive evaluation on synthetic and real world data we verify that our methods beat the state of the art both in detecting as well as characterizing selection bias.", + "primary_area": "machine learning ii", + "author": "David Kaltenpoth; Jilles Vreeken", + "authorids": "", + "aff": "CISPA Helmholtz Center for Information Security, Germany; CISPA Helmholtz Center for Information Security, Germany", + "bibtex": "@article{Kaltenpoth_Vreeken_2023, title={Identifying Selection Bias from Observational Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25987}, DOI={10.1609/aaai.v37i7.25987}, abstractNote={Access to a representative sample from the population is an assumption that underpins all of machine learning. Selection effects can cause observations to instead come from a subpopulation, by which our inferences may be subject to bias. It is therefore important to know whether or not a sample is affected by selection effects. We study under which conditions we can identify selection bias and give results for both parametric and non-parametric families of distributions. Based on these results we develop two practical methods to determine whether or not an observed sample comes from a distribution subject to selection bias. Through extensive evaluation on synthetic and real world data we verify that our methods beat the state of the art both in detecting as well as characterizing selection bias.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kaltenpoth, David and Vreeken, Jilles}, year={2023}, month={Jun.}, pages={8177-8185} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25987/25759", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25987", + "pdf_size": 283756, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3576310455916737154&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cispa.de;cispa.de", + "email": "cispa.de;cispa.de", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "CISPA Helmholtz Center for Information Security", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cispa.de/", + "aff_unique_abbr": "CISPA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25634", + "title": "Identifying and Eliminating Majority Illusion in Social Networks", + "track": "main", + "status": "Technical", + "abstract": "Majority illusion occurs in a social network when the majority of the network vertices belong to a certain type but the majority of each vertex's neighbours belong to a different type, therefore creating the wrong perception, i.e., the illusion, that the majority type is different from the actual one. From a system engineering point of view, this motivates the search for algorithms to detect and, where possible, correct this undesirable phenomenon. In this paper we initiate the computational study of majority illusion in social networks, providing NP-hardness and parametrised complexity results for its occurrence and elimination.", + "primary_area": "domain s of application", + "author": "Umberto Grandi; Lawqueen Kanesh; Grzegorz Lisowski; Ramanujan Sridharan; Paolo Turrini", + "authorids": "", + "aff": "University of Toulouse, France; IIT Jodhpur, India; University of Warwick, UK; University of Warwick, UK; University of Warwick, UK", + "bibtex": "@article{Grandi_Kanesh_Lisowski_Sridharan_Turrini_2023, title={Identifying and Eliminating Majority Illusion in Social Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25634}, DOI={10.1609/aaai.v37i4.25634}, abstractNote={Majority illusion occurs in a social network when the majority of the network vertices belong to a certain type but the majority of each vertex\u2019s neighbours belong to a different type, therefore creating the wrong perception, i.e., the illusion, that the majority type is different from the actual one. From a system engineering point of view, this motivates the search for algorithms to detect and, where possible, correct this undesirable phenomenon. In this paper we initiate the computational study of majority illusion in social networks, providing NP-hardness and parametrised complexity results for its occurrence and elimination.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Grandi, Umberto and Kanesh, Lawqueen and Lisowski, Grzegorz and Sridharan, Ramanujan and Turrini, Paolo}, year={2023}, month={Jun.}, pages={5062-5069} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25634/25406", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25634", + "pdf_size": 156123, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4277646165870961097&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "irit.fr;itj.ac.in;warwick.ac.uk;warwick.ac.uk;warwick.ac.uk", + "email": "irit.fr;itj.ac.in;warwick.ac.uk;warwick.ac.uk;warwick.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;2", + "aff_unique_norm": "University of Toulouse;Indian Institute of Technology Jodhpur;University of Warwick", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.univ-toulouse.fr;https://www.iitj.ac.in;https://www.warwick.ac.uk", + "aff_unique_abbr": "UT;IIT Jodhpur;Warwick", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;2;2", + "aff_country_unique": "France;India;United Kingdom" + }, + { + "id": "article-26319", + "title": "ImGCL: Revisiting Graph Contrastive Learning on Imbalanced Node Classification", + "track": "main", + "status": "Technical", + "abstract": "Graph contrastive learning (GCL) has attracted a surge of attention due to its superior performance for learning node/graph representations without labels. However, in practice, the underlying class distribution of unlabeled nodes for the given graph is usually imbalanced. This highly imbalanced class distribution inevitably deteriorates the quality of learned node representations in GCL. Indeed, we empirically find that most state-of-the-art GCL methods cannot obtain discriminative representations and exhibit poor performance on imbalanced node classification. Motivated by this observation, we propose a principled GCL framework on Imbalanced node classification (ImGCL), which automatically and adaptively balances the representations learned from GCL without labels. Specifically, we first introduce the online clustering based progressively balanced sampling (PBS) method with theoretical rationale, which balances the training sets based on pseudo-labels obtained from learned representations in GCL. We then develop the node centrality based PBS method to better preserve the intrinsic structure of graphs, by upweighting the important nodes of the given graph. Extensive experiments on multiple imbalanced graph datasets and imbalanced settings demonstrate the effectiveness of our proposed framework, which significantly improves the performance of the recent state-of-the-art GCL methods. Further experimental ablations and analyses show that the ImGCL framework consistently improves the representation quality of nodes in under-represented (tail) classes.", + "primary_area": "machine learning iv", + "author": "Liang Zeng; Lanqing Li; Ziqi Gao; Peilin Zhao; Jian Li", + "authorids": "", + "aff": "Institute for Interdisciplinary Information Sciences (IIIS), Tsinghua University; Tencent AI Lab; Hong Kong University of Science and Technology; Tencent AI Lab; Institute for Interdisciplinary Information Sciences (IIIS), Tsinghua University", + "bibtex": "@article{Zeng_Li_Gao_Zhao_Li_2023, title={ImGCL: Revisiting Graph Contrastive Learning on Imbalanced Node Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26319}, DOI={10.1609/aaai.v37i9.26319}, abstractNote={Graph contrastive learning (GCL) has attracted a surge of attention due to its superior performance for learning node/graph representations without labels. However, in practice, the underlying class distribution of unlabeled nodes for the given graph is usually imbalanced. This highly imbalanced class distribution inevitably deteriorates the quality of learned node representations in GCL. Indeed, we empirically find that most state-of-the-art GCL methods cannot obtain discriminative representations and exhibit poor performance on imbalanced node classification. Motivated by this observation, we propose a principled GCL framework on Imbalanced node classification (ImGCL), which automatically and adaptively balances the representations learned from GCL without labels. Specifically, we first introduce the online clustering based progressively balanced sampling (PBS) method with theoretical rationale, which balances the training sets based on pseudo-labels obtained from learned representations in GCL. We then develop the node centrality based PBS method to better preserve the intrinsic structure of graphs, by upweighting the important nodes of the given graph. Extensive experiments on multiple imbalanced graph datasets and imbalanced settings demonstrate the effectiveness of our proposed framework, which significantly improves the performance of the recent state-of-the-art GCL methods. Further experimental ablations and analyses show that the ImGCL framework consistently improves the representation quality of nodes in under-represented (tail) classes.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Liang and Li, Lanqing and Gao, Ziqi and Zhao, Peilin and Li, Jian}, year={2023}, month={Jun.}, pages={11138-11146} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26319/26091", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26319", + "pdf_size": 532868, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14509868810446840742&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;tencent.com;tencent.com;connect.ust.hk;mail.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tencent.com;tencent.com;connect.ust.hk;mail.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;0", + "aff_unique_norm": "Tsinghua University;Tencent;Hong Kong University of Science and Technology", + "aff_unique_dep": "Institute for Interdisciplinary Information Sciences (IIIS);Tencent AI Lab;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://ai.tencent.com;https://www.ust.hk", + "aff_unique_abbr": "Tsinghua;Tencent AI Lab;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25452", + "title": "ImageNet Pre-training Also Transfers Non-robustness", + "track": "main", + "status": "Technical", + "abstract": "ImageNet pre-training has enabled state-of-the-art results on many tasks. In spite of its recognized contribution to generalization, we observed in this study that ImageNet pre-training also transfers adversarial non-robustness from pre-trained model into fine-tuned model in the downstream classification tasks. We first conducted experiments on various datasets and network backbones to uncover the adversarial non-robustness in fine-tuned model. Further analysis was conducted on examining the learned knowledge of fine-tuned model and standard model, and revealed that the reason leading to the non-robustness is the non-robust features transferred from ImageNet pre-trained model. Finally, we analyzed the preference for feature learning of the pre-trained model, explored the factors influencing robustness, and introduced a simple robust ImageNet pre-training solution. Our code is available at https://github.com/jiamingzhang94/ImageNet-Pretraining-transfers-non-robustness.", + "primary_area": "computer vision iii", + "author": "Jiaming Zhang; Jitao Sang; Qi Yi; Yunfan Yang; Huiwen Dong; Jian Yu", + "authorids": "", + "aff": "School of Computer and Information Technology, Beijing Jiaotong University; School of Computer and Information Technology, Beijing Jiaotong University + Peng Cheng Lab; School of Computer and Information Technology, Beijing Jiaotong University; School of Computer and Information Technology, Beijing Jiaotong University; BNU-UIC Institute of Artificial Intelligence and Future Networks, Beijing Normal University; School of Computer and Information Technology, Beijing Jiaotong University", + "bibtex": "@article{Zhang_Sang_Yi_Yang_Dong_Yu_2023, title={ImageNet Pre-training Also Transfers Non-robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25452}, DOI={10.1609/aaai.v37i3.25452}, abstractNote={ImageNet pre-training has enabled state-of-the-art results on many tasks. In spite of its recognized contribution to generalization, we observed in this study that ImageNet pre-training also transfers adversarial non-robustness from pre-trained model into fine-tuned model in the downstream classification tasks. We first conducted experiments on various datasets and network backbones to uncover the adversarial non-robustness in fine-tuned model. Further analysis was conducted on examining the learned knowledge of fine-tuned model and standard model, and revealed that the reason leading to the non-robustness is the non-robust features transferred from ImageNet pre-trained model. Finally, we analyzed the preference for feature learning of the pre-trained model, explored the factors influencing robustness, and introduced a simple robust ImageNet pre-training solution. Our code is available at https://github.com/jiamingzhang94/ImageNet-Pretraining-transfers-non-robustness.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jiaming and Sang, Jitao and Yi, Qi and Yang, Yunfan and Dong, Huiwen and Yu, Jian}, year={2023}, month={Jun.}, pages={3436-3444} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25452/25224", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25452", + "pdf_size": 694443, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7699133495452728044&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;mail.bnu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;mail.bnu.edu.cn;bjtu.edu.cn", + "github": "https://github.com/jiamingzhang94/ImageNet-Pretraining-transfers-non-robustness", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;2;0", + "aff_unique_norm": "Beijing Jiaotong University;Peng Cheng Lab;Beijing Normal University", + "aff_unique_dep": "School of Computer and Information Technology;;Institute of Artificial Intelligence and Future Networks", + "aff_unique_url": "http://www.bjtu.edu.cn;;http://www.bnu.edu.cn", + "aff_unique_abbr": "BJTU;;BNU", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26341", + "title": "Imbalanced Label Distribution Learning", + "track": "main", + "status": "Technical", + "abstract": "Label distribution covers a certain number of labels, representing the degree to which each label describes an instance. The learning process on the instances labeled by label distributions is called Label Distribution Learning (LDL). Although LDL has been applied successfully to many practical applications, one problem with existing LDL methods is that they are limited to data with balanced label information. However, annotation information in real-world data often exhibits imbalanced distributions, which significantly degrades the performance of existing methods. In this paper, we investigate the Imbalanced Label Distribution Learning (ILDL) problem. To handle this challenging problem, we delve into the characteristics of ILDL and empirically find that the representation distribution shift is the underlying reason for the performance degradation of existing methods. Inspired by this finding, we present a novel method named Representation Distribution Alignment (RDA). RDA aligns the distributions of feature representations and label representations to alleviate the impact of the distribution gap between the training set and the test set caused by the imbalance issue. Extensive experiments verify the superior performance of RDA. Our work fills the gap in benchmarks and techniques for practical ILDL problems.", + "primary_area": "machine learning iv", + "author": "Xingyu Zhao; Yuexuan An; Ning Xu; Jing Wang; Xin Geng", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University, Nanjing 211189, China+Key Laboratory of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing 211189, China; School of Computer Science and Engineering, Southeast University, Nanjing 211189, China+Key Laboratory of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing 211189, China; School of Computer Science and Engineering, Southeast University, Nanjing 211189, China+Key Laboratory of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing 211189, China; School of Computer Science and Engineering, Southeast University, Nanjing 211189, China+Key Laboratory of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing 211189, China; School of Computer Science and Engineering, Southeast University, Nanjing 211189, China+Key Laboratory of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing 211189, China", + "bibtex": "@article{Zhao_An_Xu_Wang_Geng_2023, title={Imbalanced Label Distribution Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26341}, DOI={10.1609/aaai.v37i9.26341}, abstractNote={Label distribution covers a certain number of labels, representing the degree to which each label describes an instance. The learning process on the instances labeled by label distributions is called Label Distribution Learning (LDL). Although LDL has been applied successfully to many practical applications, one problem with existing LDL methods is that they are limited to data with balanced label information. However, annotation information in real-world data often exhibits imbalanced distributions, which significantly degrades the performance of existing methods. In this paper, we investigate the Imbalanced Label Distribution Learning (ILDL) problem. To handle this challenging problem, we delve into the characteristics of ILDL and empirically find that the representation distribution shift is the underlying reason for the performance degradation of existing methods. Inspired by this finding, we present a novel method named Representation Distribution Alignment (RDA). RDA aligns the distributions of feature representations and label representations to alleviate the impact of the distribution gap between the training set and the test set caused by the imbalance issue. Extensive experiments verify the superior performance of RDA. Our work fills the gap in benchmarks and techniques for practical ILDL problems.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Xingyu and An, Yuexuan and Xu, Ning and Wang, Jing and Geng, Xin}, year={2023}, month={Jun.}, pages={11336-11344} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26341/26113", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26341", + "pdf_size": 3981706, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14823454923922905810&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25115", + "title": "Imperceptible Adversarial Attack via Invertible Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Adding perturbations via utilizing auxiliary gradient information or discarding existing details of the benign images are two common approaches for generating adversarial examples. Though visual imperceptibility is the desired property of adversarial examples, conventional adversarial attacks still generate traceable adversarial perturbations. In this paper, we introduce a novel Adversarial Attack via Invertible Neural Networks (AdvINN) method to produce robust and imperceptible adversarial examples. Specifically, AdvINN fully takes advantage of the information preservation property of Invertible Neural Networks and thereby generates adversarial examples by simultaneously adding class-specific semantic information of the target class and dropping discriminant information of the original class. Extensive experiments on CIFAR-10, CIFAR-100, and ImageNet-1K demonstrate that the proposed AdvINN method can produce less imperceptible adversarial images than the state-of-the-art methods and AdvINN yields more robust adversarial examples with high confidence compared to other adversarial attacks. Code is available at https://github.com/jjhuangcs/AdvINN.", + "primary_area": "computer vision i", + "author": "Zihan Chen; Ziyue Wang; Jun-Jie Huang; Wentao Zhao; Xiao Liu; Dejian Guan", + "authorids": "", + "aff": "College of Computer Science, National University of Defense Technology, Changsha, Hunan, China; College of Computer Science, National University of Defense Technology, Changsha, Hunan, China; College of Computer Science, National University of Defense Technology, Changsha, Hunan, China; College of Computer Science, National University of Defense Technology, Changsha, Hunan, China; College of Computer Science, National University of Defense Technology, Changsha, Hunan, China; College of Computer Science, National University of Defense Technology, Changsha, Hunan, China", + "bibtex": "@article{Chen_Wang_Huang_Zhao_Liu_Guan_2023, title={Imperceptible Adversarial Attack via Invertible Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25115}, DOI={10.1609/aaai.v37i1.25115}, abstractNote={Adding perturbations via utilizing auxiliary gradient information or discarding existing details of the benign images are two common approaches for generating adversarial examples. Though visual imperceptibility is the desired property of adversarial examples, conventional adversarial attacks still generate traceable adversarial perturbations. In this paper, we introduce a novel Adversarial Attack via Invertible Neural Networks (AdvINN) method to produce robust and imperceptible adversarial examples. Specifically, AdvINN fully takes advantage of the information preservation property of Invertible Neural Networks and thereby generates adversarial examples by simultaneously adding class-specific semantic information of the target class and dropping discriminant information of the original class. Extensive experiments on CIFAR-10, CIFAR-100, and ImageNet-1K demonstrate that the proposed AdvINN method can produce less imperceptible adversarial images than the state-of-the-art methods and AdvINN yields more robust adversarial examples with high confidence compared to other adversarial attacks. Code is available at https://github.com/jjhuangcs/AdvINN.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Zihan and Wang, Ziyue and Huang, Jun-Jie and Zhao, Wentao and Liu, Xiao and Guan, Dejian}, year={2023}, month={Jun.}, pages={414-424} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25115/24887", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25115", + "pdf_size": 12872912, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9199047056414021054&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn", + "email": "nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn;nudt.edu.cn", + "github": "https://github.com/jjhuangcs/AdvINN", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "National University of Defense Technology", + "aff_unique_dep": "College of Computer Science", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Changsha", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25802", + "title": "Implementing Bounded Revision via Lexicographic Revision and C-revision", + "track": "main", + "status": "Technical", + "abstract": "New information in the context of real life settings usually is accompanied by some kind of supplementary information that indicates context, reliability, or expertise of the information's source.\nBounded Revision (BR) displays an iterated belief revision mechanism that takes as input a new information accompanied by a reference sentence acting as supplementary information, which specifies the depth with which the new input shall be integrated in the posterior belief state. The reference sentence specifies which worlds in the prior belief state are affected by the change mechanism. We show that Bounded Revision can be characterized by three simple, yet elegant postulates and corresponds to a special case of a lexicographic revision, which inherits all relevant features of BR. Furthermore, we present methodological implementations of BR including conditional revision with c-revisions, making it directly usable for conditional revision tools.", + "primary_area": "knowledge representation and reasoning", + "author": "Meliha Sezgin; Gabriele Kern-Isberner", + "authorids": "", + "aff": "TU Dortmund University, Germany; TU Dortmund University, Germany", + "bibtex": "@article{Sezgin_Kern-Isberner_2023, title={Implementing Bounded Revision via Lexicographic Revision and C-revision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25802}, DOI={10.1609/aaai.v37i5.25802}, abstractNote={New information in the context of real life settings usually is accompanied by some kind of supplementary information that indicates context, reliability, or expertise of the information\u2019s source.\nBounded Revision (BR) displays an iterated belief revision mechanism that takes as input a new information accompanied by a reference sentence acting as supplementary information, which specifies the depth with which the new input shall be integrated in the posterior belief state. The reference sentence specifies which worlds in the prior belief state are affected by the change mechanism. We show that Bounded Revision can be characterized by three simple, yet elegant postulates and corresponds to a special case of a lexicographic revision, which inherits all relevant features of BR. Furthermore, we present methodological implementations of BR including conditional revision with c-revisions, making it directly usable for conditional revision tools.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sezgin, Meliha and Kern-Isberner, Gabriele}, year={2023}, month={Jun.}, pages={6525-6532} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25802/25574", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25802", + "pdf_size": 146008, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4978070376694316900&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "tu-dortmund.de;tu-dortmund.de", + "email": "tu-dortmund.de;tu-dortmund.de", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "TU Dortmund University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tu-dortmund.de", + "aff_unique_abbr": "TUDO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26716", + "title": "Implicit Bilevel Optimization: Differentiating through Bilevel Optimization Programming", + "track": "aaai special track", + "status": "Technical", + "abstract": "Bilevel Optimization Programming is used to model complex and conflicting interactions between agents, for example in Robust AI or Privacy preserving AI. Integrating bilevel mathematical programming within deep learning is thus an essential objective for the Machine Learning community. \nPreviously proposed approaches only consider single-level programming. In this paper, we extend existing single-level optimization programming approaches and thus propose Differentiating through Bilevel Optimization Programming (BiGrad) for end-to-end learning of models that use Bilevel Programming as a layer. \nBiGrad has wide applicability and can be used in modern machine learning frameworks. BiGrad is applicable to both continuous and combinatorial Bilevel optimization problems. We describe a class of gradient estimators for the combinatorial case which reduces the requirements in terms of computation complexity; for the case of the continuous variable, the gradient computation takes advantage of the push-back approach (i.e. vector-jacobian product) for an efficient implementation. Experiments show that the BiGrad successfully extends existing single-level approaches to Bilevel Programming.", + "primary_area": "safe and robust ai", + "author": "Francesco Alesiani", + "authorids": "", + "aff": "NEC Laboratories Europe, Heidelberg, Germany", + "bibtex": "@article{Alesiani_2023, title={Implicit Bilevel Optimization: Differentiating through Bilevel Optimization Programming}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26716}, DOI={10.1609/aaai.v37i12.26716}, abstractNote={Bilevel Optimization Programming is used to model complex and conflicting interactions between agents, for example in Robust AI or Privacy preserving AI. Integrating bilevel mathematical programming within deep learning is thus an essential objective for the Machine Learning community. Previously proposed approaches only consider single-level programming. In this paper, we extend existing single-level optimization programming approaches and thus propose Differentiating through Bilevel Optimization Programming (BiGrad) for end-to-end learning of models that use Bilevel Programming as a layer. BiGrad has wide applicability and can be used in modern machine learning frameworks. BiGrad is applicable to both continuous and combinatorial Bilevel optimization problems. We describe a class of gradient estimators for the combinatorial case which reduces the requirements in terms of computation complexity; for the case of the continuous variable, the gradient computation takes advantage of the push-back approach (i.e. vector-jacobian product) for an efficient implementation. Experiments show that the BiGrad successfully extends existing single-level approaches to Bilevel Programming.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alesiani, Francesco}, year={2023}, month={Jun.}, pages={14683-14691} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26716/26488", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26716", + "pdf_size": 345170, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1057777375525850636&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "neclab.eu", + "email": "neclab.eu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "NEC Laboratories Europe", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nec-labs.eu", + "aff_unique_abbr": "NEC Europe", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Heidelberg", + "aff_country_unique_index": "0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26046", + "title": "Implicit Stochastic Gradient Descent for Training Physics-Informed Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Physics-informed neural networks (PINNs) have effectively been demonstrated in solving forward and inverse differential equation problems,\nbut they are still trapped in training failures when the target functions to be approximated exhibit high-frequency or multi-scale features.\nIn this paper, we propose to employ implicit stochastic gradient descent (ISGD) method to train PINNs for improving the stability of training process.\nWe heuristically analyze how ISGD overcome stiffness in the gradient flow dynamics of PINNs, especially for problems with multi-scale solutions.\nWe theoretically prove that for two-layer fully connected neural networks with large hidden nodes, randomly initialized ISGD converges to a globally optimal solution for the quadratic loss function.\nEmpirical results demonstrate that ISGD works well in practice and compares favorably to other gradient-based optimization methods such as SGD and Adam, while can also effectively address the numerical stiffness in training dynamics via gradient descent.", + "primary_area": "machine learning ii", + "author": "Ye Li; Song-Can Chen; Sheng-Jun Huang", + "authorids": "", + "aff": "College of Computer Science and Technology/Artificial Intelligence, Nanjing University of Aeronautics and Astronautics; College of Computer Science and Technology/Artificial Intelligence, Nanjing University of Aeronautics and Astronautics; College of Computer Science and Technology/Artificial Intelligence, Nanjing University of Aeronautics and Astronautics", + "bibtex": "@article{Li_Chen_Huang_2023, title={Implicit Stochastic Gradient Descent for Training Physics-Informed Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26046}, DOI={10.1609/aaai.v37i7.26046}, abstractNote={Physics-informed neural networks (PINNs) have effectively been demonstrated in solving forward and inverse differential equation problems,\nbut they are still trapped in training failures when the target functions to be approximated exhibit high-frequency or multi-scale features.\nIn this paper, we propose to employ implicit stochastic gradient descent (ISGD) method to train PINNs for improving the stability of training process.\nWe heuristically analyze how ISGD overcome stiffness in the gradient flow dynamics of PINNs, especially for problems with multi-scale solutions.\nWe theoretically prove that for two-layer fully connected neural networks with large hidden nodes, randomly initialized ISGD converges to a globally optimal solution for the quadratic loss function.\nEmpirical results demonstrate that ISGD works well in practice and compares favorably to other gradient-based optimization methods such as SGD and Adam, while can also effectively address the numerical stiffness in training dynamics via gradient descent.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Ye and Chen, Song-Can and Huang, Sheng-Jun}, year={2023}, month={Jun.}, pages={8692-8700} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26046/25818", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26046", + "pdf_size": 428221, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18216546633700414071&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "nuaa.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "email": "nuaa.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "College of Computer Science and Technology/Artificial Intelligence", + "aff_unique_url": "http://www.nuaa.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26472", + "title": "Improved Algorithm for Regret Ratio Minimization in Multi-Objective Submodular Maximization", + "track": "main", + "status": "Technical", + "abstract": "Submodular maximization has attracted extensive attention due to its numerous applications in machine learning and artificial intelligence. Many real-world problems require maximizing multiple submodular objective functions at the same time. In such cases, a common approach is to select a representative subset of Pareto optimal solutions with different trade-offs among multiple objectives. To this end, in this paper, we investigate the regret ratio minimization (RRM) problem in multi-objective submodular maximization, which aims to find at most k solutions to best approximate all Pareto optimal solutions w.r.t. any linear combination of objective functions. We propose a novel HS-RRM algorithm by transforming RRM into HittingSet problems based on the notions of \u03b5-kernel and \u03b4-net, where any \u03b1-approximation algorithm for single-objective submodular maximization is used as an oracle. We improve upon the previous best-known bound on the maximum regret ratio (MRR) of the output of HS-RRM and show that the new bound is nearly asymptotically optimal for any fixed number d of objective functions. Experiments on real-world and synthetic data confirm that HS-RRM achieves lower MRRs than existing algorithms.", + "primary_area": "search and optimization", + "author": "Yanhao Wang; Jiping Zheng; Fanxu Meng", + "authorids": "", + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China + State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China", + "bibtex": "@article{Wang_Zheng_Meng_2023, title={Improved Algorithm for Regret Ratio Minimization in Multi-Objective Submodular Maximization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26472}, DOI={10.1609/aaai.v37i10.26472}, abstractNote={Submodular maximization has attracted extensive attention due to its numerous applications in machine learning and artificial intelligence. Many real-world problems require maximizing multiple submodular objective functions at the same time. In such cases, a common approach is to select a representative subset of Pareto optimal solutions with different trade-offs among multiple objectives. To this end, in this paper, we investigate the regret ratio minimization (RRM) problem in multi-objective submodular maximization, which aims to find at most k solutions to best approximate all Pareto optimal solutions w.r.t. any linear combination of objective functions. We propose a novel HS-RRM algorithm by transforming RRM into HittingSet problems based on the notions of \u03b5-kernel and \u03b4-net, where any \u03b1-approximation algorithm for single-objective submodular maximization is used as an oracle. We improve upon the previous best-known bound on the maximum regret ratio (MRR) of the output of HS-RRM and show that the new bound is nearly asymptotically optimal for any fixed number d of objective functions. Experiments on real-world and synthetic data confirm that HS-RRM achieves lower MRRs than existing algorithms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yanhao and Zheng, Jiping and Meng, Fanxu}, year={2023}, month={Jun.}, pages={12500-12508} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26472/26244", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26472", + "pdf_size": 268334, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9560472843365487909&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "dase.ecnu.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "email": "dase.ecnu.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;1", + "aff_unique_norm": "East China Normal University;Nanjing University of Aeronautics and Astronautics;Nanjing University", + "aff_unique_dep": "School of Data Science and Engineering;College of Computer Science and Technology;State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.ecnu.edu.cn;http://www.nuaa.edu.cn;http://www.nju.edu.cn", + "aff_unique_abbr": "ECNU;NUAA;NU", + "aff_campus_unique_index": "0;1+1;1", + "aff_campus_unique": "Shanghai;Nanjing", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25503", + "title": "Improved Algorithms for Maximum Satisfiability and Its Special Cases", + "track": "main", + "status": "Technical", + "abstract": "The Maximum Satisfiability (MAXSAT) problem is an optimization version of the Satisfiability problem (SAT) in which one is given a CNF formula with n variables and needs to find the maximum number of simultaneously satisfiable clauses. Recent works achieved significant progress in proving new upper bounds on the worst-case computational complexity of MAXSAT. All these works reduce general MAXSAT to a special case of MAXSAT where each variable appears a small number of times. So, it is important to design fast algorithms for (n,k)-MAXSAT to construct an efficient exact algorithm for MAXSAT. (n,k)-MAXSAT is a special case of MAXSAT where each variable appears at most k times in the input formula. \n\nFor the (n,3)-MAXSAT problem, we design a O*(1.1749^n) algorithm improving on the previous record running time of O*(1.191^n). For the (n,4)-MAXSAT problem, we construct a O*(1.3803^n) algorithm improving on the previous best running time of O*(1.4254^n). Using the results, we develop a O*(1.0911^L) algorithm for the MAXSAT where L is a length of the input formula which improves previous algorithm with O*(1.0927^L) running time.", + "primary_area": "constraint satisfaction and optimization", + "author": "Kirill Brilliantov; Vasily Alferov; Ivan Bliznets", + "authorids": "", + "aff": "Constructor University; Independent Researcher; Utrecht University", + "bibtex": "@article{Brilliantov_Alferov_Bliznets_2023, title={Improved Algorithms for Maximum Satisfiability and Its Special Cases}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25503}, DOI={10.1609/aaai.v37i4.25503}, abstractNote={The Maximum Satisfiability (MAXSAT) problem is an optimization version of the Satisfiability problem (SAT) in which one is given a CNF formula with n variables and needs to find the maximum number of simultaneously satisfiable clauses. Recent works achieved significant progress in proving new upper bounds on the worst-case computational complexity of MAXSAT. All these works reduce general MAXSAT to a special case of MAXSAT where each variable appears a small number of times. So, it is important to design fast algorithms for (n,k)-MAXSAT to construct an efficient exact algorithm for MAXSAT. (n,k)-MAXSAT is a special case of MAXSAT where each variable appears at most k times in the input formula. For the (n,3)-MAXSAT problem, we design a O*(1.1749^n) algorithm improving on the previous record running time of O*(1.191^n). For the (n,4)-MAXSAT problem, we construct a O*(1.3803^n) algorithm improving on the previous best running time of O*(1.4254^n). Using the results, we develop a O*(1.0911^L) algorithm for the MAXSAT where L is a length of the input formula which improves previous algorithm with O*(1.0927^L) running time.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brilliantov, Kirill and Alferov, Vasily and Bliznets, Ivan}, year={2023}, month={Jun.}, pages={3898-3905} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25503/25275", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25503", + "pdf_size": 148556, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17827064901266409196&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com;gmail.com/i.bliznets", + "email": "gmail.com;gmail.com;gmail.com/i.bliznets", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Constructor University;Independent Researcher;Utrecht University", + "aff_unique_dep": ";;", + "aff_unique_url": ";;https://www.uu.nl", + "aff_unique_abbr": ";;UU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1", + "aff_country_unique": ";Netherlands" + }, + { + "id": "article-26035", + "title": "Improved Kernel Alignment Regret Bound for Online Kernel Learning", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we improve the kernel alignment regret bound for online kernel learning in the regime of the Hinge loss function. Previous algorithm achieves a regret of O((A_TT ln T)^{1/4}) at a computational complexity (space and per-round time) of O((A_TT ln T)^{1/2}), where A_T is called kernel alignment. We propose an algorithm whose regret bound and computational complexity are better than previous results. Our results depend on the decay rate of eigenvalues of the kernel matrix. If the eigenvalues of the kernel matrix decay exponentially, then our algorithm enjoys a regret of O((A_T)^{1/2}) at a computational complexity of O((ln T)^2). Otherwise, our algorithm enjoys a regret of O((A_TT)^{1/4}) at a computational complexity of O((A_TT)^{1/2}). We extend our algorithm to batch learning and obtain a O(T^{-1}(E[A_T])^{1/2}) excess risk bound which improves the previous O(T^{-1/2}) bound.", + "primary_area": "machine learning ii", + "author": "Junfan Li; Shizhong Liao", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin 300350, China; College of Intelligence and Computing, Tianjin University, Tianjin 300350, China", + "bibtex": "@article{Li_Liao_2023, title={Improved Kernel Alignment Regret Bound for Online Kernel Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26035}, DOI={10.1609/aaai.v37i7.26035}, abstractNote={In this paper, we improve the kernel alignment regret bound for online kernel learning in the regime of the Hinge loss function. Previous algorithm achieves a regret of O((A_TT ln T)^{1/4}) at a computational complexity (space and per-round time) of O((A_TT ln T)^{1/2}), where A_T is called kernel alignment. We propose an algorithm whose regret bound and computational complexity are better than previous results. Our results depend on the decay rate of eigenvalues of the kernel matrix. If the eigenvalues of the kernel matrix decay exponentially, then our algorithm enjoys a regret of O((A_T)^{1/2}) at a computational complexity of O((ln T)^2). Otherwise, our algorithm enjoys a regret of O((A_TT)^{1/4}) at a computational complexity of O((A_TT)^{1/2}). We extend our algorithm to batch learning and obtain a O(T^{-1}(E[A_T])^{1/2}) excess risk bound which improves the previous O(T^{-1/2}) bound.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Junfan and Liao, Shizhong}, year={2023}, month={Jun.}, pages={8597-8604} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26035/25807", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26035", + "pdf_size": 200507, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12282334561515201169&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "College of Intelligence and Computing", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "Tianjin University", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Tianjin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26398", + "title": "Improvement-Focused Causal Recourse (ICR)", + "track": "main", + "status": "Technical", + "abstract": "Algorithmic recourse recommendations inform stakeholders of how to act to revert unfavorable decisions. However, existing methods may recommend actions that lead to acceptance (i.e., revert the model's decision) but do not lead to improvement (i.e., may not revert the underlying real-world state). To recommend such actions is to recommend fooling the predictor. We introduce a novel method, Improvement-Focused Causal Recourse (ICR), which involves a conceptual shift: Firstly, we require ICR recommendations to guide toward improvement. Secondly, we do not tailor the recommendations to be accepted by a specific predictor. Instead, we leverage causal knowledge to design decision systems that predict accurately pre- and post-recourse, such that improvement guarantees translate into acceptance guarantees. Curiously, optimal pre-recourse classifiers are robust to ICR actions and thus suitable post-recourse. In semi-synthetic experiments, we demonstrate that given correct causal knowledge ICR, in contrast to existing approaches, guides toward both acceptance and improvement.", + "primary_area": "philosophy and ethics of ai", + "author": "Gunnar K\u00f6nig; Timo Freiesleben; Moritz Grosse-Wentrup", + "authorids": "", + "aff": "Munich Center for Machine Learning (MCML), LMU Munich+Research Group Neuroinformatics, University of Vienna+Data Science @ Uni Vienna, Vienna CogSciHub; Munich Center for Mathematical Philosophy (MCMP), LMU Munich+Cluster of Excellence Machine Learning, University of T\u00fcbingen+Graduate School of Systemic Neurosciences, LMU Munich; Research Group Neuroinformatics, University of Vienna+Data Science @ Uni Vienna, Vienna CogSciHub", + "bibtex": "@article{K\u00f6nig_Freiesleben_Grosse-Wentrup_2023, title={Improvement-Focused Causal Recourse (ICR)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26398}, DOI={10.1609/aaai.v37i10.26398}, abstractNote={Algorithmic recourse recommendations inform stakeholders of how to act to revert unfavorable decisions. However, existing methods may recommend actions that lead to acceptance (i.e., revert the model\u2019s decision) but do not lead to improvement (i.e., may not revert the underlying real-world state). To recommend such actions is to recommend fooling the predictor. We introduce a novel method, Improvement-Focused Causal Recourse (ICR), which involves a conceptual shift: Firstly, we require ICR recommendations to guide toward improvement. Secondly, we do not tailor the recommendations to be accepted by a specific predictor. Instead, we leverage causal knowledge to design decision systems that predict accurately pre- and post-recourse, such that improvement guarantees translate into acceptance guarantees. Curiously, optimal pre-recourse classifiers are robust to ICR actions and thus suitable post-recourse. In semi-synthetic experiments, we demonstrate that given correct causal knowledge ICR, in contrast to existing approaches, guides toward both acceptance and improvement.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={K\u00f6nig, Gunnar and Freiesleben, Timo and Grosse-Wentrup, Moritz}, year={2023}, month={Jun.}, pages={11847-11855} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26398/26170", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26398", + "pdf_size": 418003, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15143730328833492494&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "pm.me; ; ", + "email": "pm.me; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+1;0+2+0;1+1", + "aff_unique_norm": "LMU Munich;University of Vienna;University of T\u00fcbingen", + "aff_unique_dep": "Munich Center for Machine Learning;Research Group Neuroinformatics;Cluster of Excellence Machine Learning", + "aff_unique_url": "https://www.lmu.de;https://www.univie.ac.at;https://www.uni-tuebingen.de", + "aff_unique_abbr": "LMU;Uni Vienna;", + "aff_campus_unique_index": "0+2;0+0;2", + "aff_campus_unique": "Munich;;Vienna", + "aff_country_unique_index": "0+1+1;0+0+0;1+1", + "aff_country_unique": "Germany;Austria" + }, + { + "id": "article-27006", + "title": "Improving Adversarial Robustness to Sensitivity and Invariance Attacks with Deep Metric Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Intentionally crafted adversarial samples have effectively exploited weaknesses in deep neural networks. A standard method in adversarial robustness assumes a framework to defend against samples crafted by minimally perturbing a sample such that its corresponding model output changes. These sensitivity attacks exploit the model's sensitivity toward task-irrelevant features. Another form of adversarial sample can be crafted via invariance attacks, which exploit the model underestimating the importance of relevant features. Previous literature has indicated a tradeoff in defending against both attack types within a strictly L-p bounded defense. To promote robustness toward both types of attacks beyond Euclidean distance metrics, we use metric learning to frame adversarial regularization as an optimal transport problem. Our preliminary results indicate that regularizing over invariant perturbations in our framework improves both invariant and sensitivity defense.", + "primary_area": "", + "author": "Anaelia Ovalle; Evan Czyzycki; Cho-Jui Hsieh", + "authorids": "", + "aff": "University of California, Los Angeles; University of California, Los Angeles; University of California, Los Angeles", + "bibtex": "@article{Ovalle_Czyzycki_Hsieh_2024, title={Improving Adversarial Robustness to Sensitivity and Invariance Attacks with Deep Metric Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27006}, DOI={10.1609/aaai.v37i13.27006}, abstractNote={Intentionally crafted adversarial samples have effectively exploited weaknesses in deep neural networks. A standard method in adversarial robustness assumes a framework to defend against samples crafted by minimally perturbing a sample such that its corresponding model output changes. These sensitivity attacks exploit the model\u2019s sensitivity toward task-irrelevant features. Another form of adversarial sample can be crafted via invariance attacks, which exploit the model underestimating the importance of relevant features. Previous literature has indicated a tradeoff in defending against both attack types within a strictly L-p bounded defense. To promote robustness toward both types of attacks beyond Euclidean distance metrics, we use metric learning to frame adversarial regularization as an optimal transport problem. Our preliminary results indicate that regularizing over invariant perturbations in our framework improves both invariant and sensitivity defense.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ovalle, Anaelia and Czyzycki, Evan and Hsieh, Cho-Jui}, year={2024}, month={Jul.}, pages={16292-16293} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27006/26778", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27006", + "pdf_size": 165767, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:g58pBxVEhi8J:scholar.google.com/&scioq=Improving+Adversarial+Robustness+to+Sensitivity+and+Invariance+Attacks+with+Deep+Metric+Learning+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, Los Angeles", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucla.edu", + "aff_unique_abbr": "UCLA", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26738", + "title": "Improving Adversarial Robustness with Self-Paced Hard-Class Pair Reweighting", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep Neural Networks are vulnerable to adversarial attacks. Among many defense strategies, adversarial training with untargeted attacks is one of the most effective methods. Theoretically, adversarial perturbation in untargeted attacks can be added along arbitrary directions and the predicted labels of untargeted attacks should be unpredictable. However, we find that the naturally imbalanced inter-class semantic similarity makes those hard-class pairs become virtual targets of each other. This study investigates the impact of such closely-coupled classes on adversarial attacks and develops a self-paced reweighting strategy in adversarial training accordingly. Specifically, we propose to upweight hard-class pair losses in model optimization, which prompts learning discriminative features from hard classes. We further incorporate a term to quantify hard-class pair consistency in adversarial training, which greatly boosts model robustness. Extensive experiments show that the proposed adversarial training method achieves superior robustness performance over state-of-the-art defenses against a wide range of adversarial attacks. The code of the proposed SPAT is published at https://github.com/puerrrr/Self-Paced-Adversarial-Training.", + "primary_area": "safe and robust ai", + "author": "Pengyue Hou; Jie Han; Xingyu Li", + "authorids": "", + "aff": "University of Alberta; University of Alberta; University of Alberta", + "bibtex": "@article{Hou_Han_Li_2023, title={Improving Adversarial Robustness with Self-Paced Hard-Class Pair Reweighting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26738}, DOI={10.1609/aaai.v37i12.26738}, abstractNote={Deep Neural Networks are vulnerable to adversarial attacks. Among many defense strategies, adversarial training with untargeted attacks is one of the most effective methods. Theoretically, adversarial perturbation in untargeted attacks can be added along arbitrary directions and the predicted labels of untargeted attacks should be unpredictable. However, we find that the naturally imbalanced inter-class semantic similarity makes those hard-class pairs become virtual targets of each other. This study investigates the impact of such closely-coupled classes on adversarial attacks and develops a self-paced reweighting strategy in adversarial training accordingly. Specifically, we propose to upweight hard-class pair losses in model optimization, which prompts learning discriminative features from hard classes. We further incorporate a term to quantify hard-class pair consistency in adversarial training, which greatly boosts model robustness. Extensive experiments show that the proposed adversarial training method achieves superior robustness performance over state-of-the-art defenses against a wide range of adversarial attacks. The code of the proposed SPAT is published at https://github.com/puerrrr/Self-Paced-Adversarial-Training.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hou, Pengyue and Han, Jie and Li, Xingyu}, year={2023}, month={Jun.}, pages={14883-14891} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26738/26510", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26738", + "pdf_size": 270419, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13942080190968587616&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ualberta.ca;ualberta.ca;ualberta.ca", + "email": "ualberta.ca;ualberta.ca;ualberta.ca", + "github": "https://github.com/puerrrr/Self-Paced-Adversarial-Training", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Alberta", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ualberta.ca", + "aff_unique_abbr": "UAlberta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26624", + "title": "Improving Biomedical Entity Linking with Cross-Entity Interaction", + "track": "main", + "status": "Technical", + "abstract": "Biomedical entity linking (EL) is the task of linking mentions in a biomedical document to corresponding entities in a knowledge base (KB). The challenge in biomedical EL lies in leveraging mention context to select the most appropriate entity among possible candidates. Although some EL models achieve competitive results by retrieving candidate entities and then exploiting context to re-rank them, these re-ranking models concatenate mention context with one candidate at a time. They lack fine-grained interaction among candidates, and potentially cannot handle ambiguous mentions when facing candidates both with high lexical similarity. We cope with this issue using a re-ranking model based on prompt tuning, which represents mention context and all candidates at once, letting candidates in comparison attend to each other. We also propose a KB-enhanced self-supervised pretraining strategy. Instead of large-scale pretraining on biomedical EL data in previous work, we use masked language modeling with synonyms from KB. Our method achieves state-of-the-art results on 3 biomedical EL datasets: NCBI disease, BC5CDR and COMETA, showing the effectiveness of cross-entity interaction and KB-enhanced pretraining strategy. Code is available at https://github.com/HITsz-TMG/Prompt-BioEL.", + "primary_area": "speech natural language processing", + "author": "Zhenran Xu; Yulin Chen; Baotian Hu", + "authorids": "", + "aff": "Harbin Institute of Technology (Shenzhen), Shenzhen, China; Harbin Institute of Technology (Shenzhen), Shenzhen, China; Harbin Institute of Technology (Shenzhen), Shenzhen, China", + "bibtex": "@article{Xu_Chen_Hu_2023, title={Improving Biomedical Entity Linking with Cross-Entity Interaction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26624}, DOI={10.1609/aaai.v37i11.26624}, abstractNote={Biomedical entity linking (EL) is the task of linking mentions in a biomedical document to corresponding entities in a knowledge base (KB). The challenge in biomedical EL lies in leveraging mention context to select the most appropriate entity among possible candidates. Although some EL models achieve competitive results by retrieving candidate entities and then exploiting context to re-rank them, these re-ranking models concatenate mention context with one candidate at a time. They lack fine-grained interaction among candidates, and potentially cannot handle ambiguous mentions when facing candidates both with high lexical similarity. We cope with this issue using a re-ranking model based on prompt tuning, which represents mention context and all candidates at once, letting candidates in comparison attend to each other. We also propose a KB-enhanced self-supervised pretraining strategy. Instead of large-scale pretraining on biomedical EL data in previous work, we use masked language modeling with synonyms from KB. Our method achieves state-of-the-art results on 3 biomedical EL datasets: NCBI disease, BC5CDR and COMETA, showing the effectiveness of cross-entity interaction and KB-enhanced pretraining strategy. Code is available at https://github.com/HITsz-TMG/Prompt-BioEL.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zhenran and Chen, Yulin and Hu, Baotian}, year={2023}, month={Jun.}, pages={13869-13877} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26624/26396", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26624", + "pdf_size": 413792, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4858273045382063044&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.hit.edu.cn;stu.hit.edu.cn;hit.edu.cn", + "email": "stu.hit.edu.cn;stu.hit.edu.cn;hit.edu.cn", + "github": "https://github.com/HITsz-TMG/Prompt-BioEL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://en.hhit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25124", + "title": "Improving Crowded Object Detection via Copy-Paste", + "track": "main", + "status": "Technical", + "abstract": "Crowdedness caused by overlapping among similar objects is a ubiquitous challenge in the field of 2D visual object detection. In this paper, we first underline two main effects of the crowdedness issue: 1) IoU-confidence correlation disturbances (ICD) and 2) confused de-duplication (CDD). Then we explore a pathway of cracking these nuts from the perspective of data augmentation. Primarily, a particular copy- paste scheme is proposed towards making crowded scenes. Based on this operation, we first design a \"consensus learning\" method to further resist the ICD problem and then find out the pasting process naturally reveals a pseudo \"depth\" of object in the scene, which can be potentially used for alleviating CDD dilemma. Both methods are derived from magical using of the copy-pasting without extra cost for hand-labeling. Experiments show that our approach can easily improve the state-of-the-art detector in typical crowded detection task by more than 2% without any bells and whistles. Moreover, this work can outperform existing data augmentation strategies in crowded scenario.", + "primary_area": "computer vision i", + "author": "Jiangfan Deng; Dewen Fan; Xiaosong Qiu; Feng Zhou", + "authorids": "", + "aff": "Algorithm Research, Aibee Inc.; Algorithm Research, Aibee Inc.; Algorithm Research, Aibee Inc.; Algorithm Research, Aibee Inc.", + "bibtex": "@article{Deng_Fan_Qiu_Zhou_2023, title={Improving Crowded Object Detection via Copy-Paste}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25124}, DOI={10.1609/aaai.v37i1.25124}, abstractNote={Crowdedness caused by overlapping among similar objects is a ubiquitous challenge in the field of 2D visual object detection. In this paper, we first underline two main effects of the crowdedness issue: 1) IoU-confidence correlation disturbances (ICD) and 2) confused de-duplication (CDD). Then we explore a pathway of cracking these nuts from the perspective of data augmentation. Primarily, a particular copy- paste scheme is proposed towards making crowded scenes. Based on this operation, we first design a "consensus learning" method to further resist the ICD problem and then find out the pasting process naturally reveals a pseudo "depth" of object in the scene, which can be potentially used for alleviating CDD dilemma. Both methods are derived from magical using of the copy-pasting without extra cost for hand-labeling. Experiments show that our approach can easily improve the state-of-the-art detector in typical crowded detection task by more than 2% without any bells and whistles. Moreover, this work can outperform existing data augmentation strategies in crowded scenario.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Jiangfan and Fan, Dewen and Qiu, Xiaosong and Zhou, Feng}, year={2023}, month={Jun.}, pages={497-505} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25124/24896", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25124", + "pdf_size": 3670824, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8403335905705760352&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "foxmail.com;aibee.com;aibee.com;aibee.com", + "email": "foxmail.com;aibee.com;aibee.com;aibee.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Aibee Inc.", + "aff_unique_dep": "Algorithm Research", + "aff_unique_url": "https://www.aibee.com", + "aff_unique_abbr": "Aibee", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27043", + "title": "Improving Dialogue Intent Classification with a Knowledge-Enhanced Multifactor Graph Model (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Although current Graph Neural Network (GNN) based models achieved good performances in Dialogue Intent Classification (DIC), they leaf the inherent domain-specific knowledge out of consideration, leading to the lack of ability of acquiring fine-grained semantic information. In this paper, we propose a Knowledge-Enhanced Multifactor Graph (KEMG) Model for DIC. We firstly present a knowledge-aware utterance encoder with the help of a domain-specific knowledge graph, fusing token-level and entity-level semantic information, then design a heterogeneous dialogue graph encoder by explicitly modeling several factors that matter to contextual modeling of dialogues. Experiment results show that our proposed method outperforms other GNN-based methods on a dataset collected from a real-world online customer service dialogue system on the e-commerce website, JD.", + "primary_area": "", + "author": "Huinan Xu; Jinhui Pang; Shuangyong Song; Bo Zou", + "authorids": "", + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Department of Big Data and AI, China Telecom; JD AI Research", + "bibtex": "@article{Xu_Pang_Song_Zou_2024, title={Improving Dialogue Intent Classification with a Knowledge-Enhanced Multifactor Graph Model (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27043}, DOI={10.1609/aaai.v37i13.27043}, abstractNote={Although current Graph Neural Network (GNN) based models achieved good performances in Dialogue Intent Classification (DIC), they leaf the inherent domain-specific knowledge out of consideration, leading to the lack of ability of acquiring fine-grained semantic information. In this paper, we propose a Knowledge-Enhanced Multifactor Graph (KEMG) Model for DIC. We firstly present a knowledge-aware utterance encoder with the help of a domain-specific knowledge graph, fusing token-level and entity-level semantic information, then design a heterogeneous dialogue graph encoder by explicitly modeling several factors that matter to contextual modeling of dialogues. Experiment results show that our proposed method outperforms other GNN-based methods on a dataset collected from a real-world online customer service dialogue system on the e-commerce website, JD.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Huinan and Pang, Jinhui and Song, Shuangyong and Zou, Bo}, year={2024}, month={Jul.}, pages={16366-16367} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27043/26815", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27043", + "pdf_size": 217091, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15433923342116080972&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "bit.edu.cn;bit.edu.cn;chinatelecom.cn;jd.com", + "email": "bit.edu.cn;bit.edu.cn;chinatelecom.cn;jd.com", + "github": "", + "project": "https://www.jd.com/", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "Beijing Institute of Technology;China Telecom;JD AI Research", + "aff_unique_dep": ";Department of Big Data and AI;", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.chinatelecom.com.cn;https://www.jd.com", + "aff_unique_abbr": "BIT;CT;JD AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26644", + "title": "Improving Distantly Supervised Relation Extraction by Natural Language Inference", + "track": "main", + "status": "Technical", + "abstract": "To reduce human annotations for relation extraction (RE) tasks, distantly supervised approaches have been proposed, while struggling with low performance. In this work, we propose a novel DSRE-NLI framework, which considers both distant supervision from existing knowledge bases and indirect supervision from pretrained language models for other tasks. DSRE-NLI energizes an off-the-shelf natural language inference (NLI) engine with a semi-automatic relation verbalization (SARV) mechanism to provide indirect supervision and further consolidates the distant annotations to benefit multi-classification RE models. The NLI-based indirect supervision acquires only one relation verbalization template from humans as a semantically general template for each relationship, and then the template set is enriched by high-quality textual patterns automatically mined from the distantly annotated corpus. With two simple and effective data consolidation strategies, the quality of training data is substantially improved. Extensive experiments demonstrate that the proposed framework significantly improves the SOTA performance (up to 7.73% of F1) on distantly supervised RE benchmark datasets. Our code is available at https://github.com/kangISU/DSRE-NLI.", + "primary_area": "speech natural language processing", + "author": "Kang Zhou; Qiao Qiao; Yuepei Li; Qi Li", + "authorids": "", + "aff": "Department of Computer Science, Iowa State University, Ames, Iowa, USA; Department of Computer Science, Iowa State University, Ames, Iowa, USA; Department of Computer Science, Iowa State University, Ames, Iowa, USA; Department of Computer Science, Iowa State University, Ames, Iowa, USA", + "bibtex": "@article{Zhou_Qiao_Li_Li_2023, title={Improving Distantly Supervised Relation Extraction by Natural Language Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26644}, DOI={10.1609/aaai.v37i11.26644}, abstractNote={To reduce human annotations for relation extraction (RE) tasks, distantly supervised approaches have been proposed, while struggling with low performance. In this work, we propose a novel DSRE-NLI framework, which considers both distant supervision from existing knowledge bases and indirect supervision from pretrained language models for other tasks. DSRE-NLI energizes an off-the-shelf natural language inference (NLI) engine with a semi-automatic relation verbalization (SARV) mechanism to provide indirect supervision and further consolidates the distant annotations to benefit multi-classification RE models. The NLI-based indirect supervision acquires only one relation verbalization template from humans as a semantically general template for each relationship, and then the template set is enriched by high-quality textual patterns automatically mined from the distantly annotated corpus. With two simple and effective data consolidation strategies, the quality of training data is substantially improved. Extensive experiments demonstrate that the proposed framework significantly improves the SOTA performance (up to 7.73% of F1) on distantly supervised RE benchmark datasets. Our code is available at https://github.com/kangISU/DSRE-NLI.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Kang and Qiao, Qiao and Li, Yuepei and Li, Qi}, year={2023}, month={Jun.}, pages={14047-14055} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26644/26416", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26644", + "pdf_size": 651635, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15841685090332365697&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "iastate.edu;iastate.edu;iastate.edu;iastate.edu", + "email": "iastate.edu;iastate.edu;iastate.edu;iastate.edu", + "github": "https://github.com/kangISU/DSRE-NLI", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Iowa State University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.iastate.edu", + "aff_unique_abbr": "ISU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Ames", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25107", + "title": "Improving Dynamic HDR Imaging with Fusion Transformer", + "track": "main", + "status": "Technical", + "abstract": "Reconstructing a High Dynamic Range (HDR) image from several Low Dynamic Range (LDR) images with different exposures is a challenging task, especially in the presence of camera and object motion. Though existing models using convolutional neural networks (CNNs) have made great progress, challenges still exist, e.g., ghosting artifacts. Transformers, originating from the field of natural language processing, have shown success in computer vision tasks, due to their ability to address a large receptive field even within a single layer. In this paper, we propose a transformer model for HDR imaging. Our pipeline includes three steps: alignment, fusion, and reconstruction. The key component is the HDR transformer module. Through experiments and ablation studies, we demonstrate that our model outperforms the state-of-the-art by large margins on several popular public datasets.", + "primary_area": "computer vision i", + "author": "Rufeng Chen; Bolun Zheng; Hua Zhang; Quan Chen; Chenggang Yan; Gregory Slabaugh; Shanxin Yuan", + "authorids": "", + "aff": "Hangzhou Dianzi University; Hangzhou Dianzi University; Hangzhou Dianzi University; Hangzhou Dianzi University; Hangzhou Dianzi University; Queen Mary University of London; Queen Mary University of London", + "bibtex": "@article{Chen_Zheng_Zhang_Chen_Yan_Slabaugh_Yuan_2023, title={Improving Dynamic HDR Imaging with Fusion Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25107}, DOI={10.1609/aaai.v37i1.25107}, abstractNote={Reconstructing a High Dynamic Range (HDR) image from several Low Dynamic Range (LDR) images with different exposures is a challenging task, especially in the presence of camera and object motion. Though existing models using convolutional neural networks (CNNs) have made great progress, challenges still exist, e.g., ghosting artifacts. Transformers, originating from the field of natural language processing, have shown success in computer vision tasks, due to their ability to address a large receptive field even within a single layer. In this paper, we propose a transformer model for HDR imaging. Our pipeline includes three steps: alignment, fusion, and reconstruction. The key component is the HDR transformer module. Through experiments and ablation studies, we demonstrate that our model outperforms the state-of-the-art by large margins on several popular public datasets.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Rufeng and Zheng, Bolun and Zhang, Hua and Chen, Quan and Yan, Chenggang and Slabaugh, Gregory and Yuan, Shanxin}, year={2023}, month={Jun.}, pages={340-349} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25107/24879", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25107", + "pdf_size": 8613901, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15752454890708516994&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;qmul.ac.uk;qmul.ac.uk", + "email": "hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;hdu.edu.cn;qmul.ac.uk;qmul.ac.uk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;1", + "aff_unique_norm": "Hangzhou Dianzi University;Queen Mary University of London", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hdu.edu.cn/;https://www.qmul.ac.uk", + "aff_unique_abbr": "HGHDU;QMUL", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";London", + "aff_country_unique_index": "0;0;0;0;0;1;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26637", + "title": "Improving End-to-End Speech Translation by Leveraging Auxiliary Speech and Text Data", + "track": "main", + "status": "Technical", + "abstract": "We present a method for introducing a text encoder into pre-trained end-to-end speech translation systems. It enhances the ability of adapting one modality (i.e., source-language speech) to another (i.e., source-language text). Thus, the speech translation model can learn from both unlabeled and labeled data, especially when the source-language text data is abundant. Beyond this, we present a denoising method to build a robust text encoder that can deal with both normal and noisy text data. Our system sets new state-of-the-arts on the MuST-C En-De, En-Fr, and LibriSpeech En-Fr tasks.", + "primary_area": "speech natural language processing", + "author": "Yuhao Zhang; Chen Xu; Bojie Hu; Chunliang Zhang; Tong Xiao; Jingbo Zhu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China; Tencent Minority-Mandarin Translation, Beijing, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China", + "bibtex": "@article{Zhang_Xu_Hu_Zhang_Xiao_Zhu_2023, title={Improving End-to-End Speech Translation by Leveraging Auxiliary Speech and Text Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26637}, DOI={10.1609/aaai.v37i11.26637}, abstractNote={We present a method for introducing a text encoder into pre-trained end-to-end speech translation systems. It enhances the ability of adapting one modality (i.e., source-language speech) to another (i.e., source-language text). Thus, the speech translation model can learn from both unlabeled and labeled data, especially when the source-language text data is abundant. Beyond this, we present a denoising method to build a robust text encoder that can deal with both normal and noisy text data. Our system sets new state-of-the-arts on the MuST-C En-De, En-Fr, and LibriSpeech En-Fr tasks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yuhao and Xu, Chen and Hu, Bojie and Zhang, Chunliang and Xiao, Tong and Zhu, Jingbo}, year={2023}, month={Jun.}, pages={13984-13992} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26637/26409", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26637", + "pdf_size": 155936, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16710328976530011953&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;outlook.com;tencent.com;mail.neu.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "email": "gmail.com;outlook.com;tencent.com;mail.neu.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0+2;0+2;0+2", + "aff_unique_norm": "Northeastern University;Tencent;NiuTrans Research", + "aff_unique_dep": "School of Computer Science and Engineering;Minority-Mandarin Translation;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.tencent.com;", + "aff_unique_abbr": "NEU;Tencent;", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Shenyang;Beijing;", + "aff_country_unique_index": "0;0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26652", + "title": "Improving Fairness in Information Exposure by Adding Links", + "track": "aaai special track", + "status": "Technical", + "abstract": "Fairness in influence maximization has been a very active research topic recently. Most works in this context study the question of how to find seeding strategies (deterministic or probabilistic) such that nodes or communities in the network get their fair share of coverage. Different fairness criteria have been used in this context. All these works assume that the entity that is spreading the information has an inherent interest in spreading the information fairly, otherwise why would they want to use the developed fair algorithms? This assumption may however be flawed in reality -- the spreading entity may be purely efficiency-oriented. In this paper we propose to study two optimization problems with the goal to modify the network structure by adding links in such a way that efficiency-oriented information spreading becomes automatically fair. We study the proposed optimization problems both from a theoretical and experimental perspective, that is, we give several hardness and hardness of approximation results, provide efficient algorithms for some special cases, and more importantly provide heuristics for solving one of the problems in practice. In our experimental study we then first compare the proposed heuristics against each other and establish the most successful one. In a second experiment, we then show that our approach can be very successful in practice. That is, we show that already after adding a few edges to the networks the greedy algorithm that purely maximizes spread surpasses all fairness-tailored algorithms in terms of ex-post fairness. Maybe surprisingly, we even show that our approach achieves ex-post fairness values that are comparable or even better than the ex-ante fairness values of the currently most efficient algorithms that optimize ex-ante fairness.", + "primary_area": "ai for social impact", + "author": "Ruben Becker; Gianlorenzo D'Angelo; Sajjad Ghobadi", + "authorids": "", + "aff": "Ca\u2019 Foscari University of Venice, Italy; Gran Sasso Science Institute, L\u2019Aquila, Italy; Gran Sasso Science Institute, L\u2019Aquila, Italy", + "bibtex": "@article{Becker_D\u2019Angelo_Ghobadi_2023, title={Improving Fairness in Information Exposure by Adding Links}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26652}, DOI={10.1609/aaai.v37i12.26652}, abstractNote={Fairness in influence maximization has been a very active research topic recently. Most works in this context study the question of how to find seeding strategies (deterministic or probabilistic) such that nodes or communities in the network get their fair share of coverage. Different fairness criteria have been used in this context. All these works assume that the entity that is spreading the information has an inherent interest in spreading the information fairly, otherwise why would they want to use the developed fair algorithms? This assumption may however be flawed in reality -- the spreading entity may be purely efficiency-oriented. In this paper we propose to study two optimization problems with the goal to modify the network structure by adding links in such a way that efficiency-oriented information spreading becomes automatically fair. We study the proposed optimization problems both from a theoretical and experimental perspective, that is, we give several hardness and hardness of approximation results, provide efficient algorithms for some special cases, and more importantly provide heuristics for solving one of the problems in practice. In our experimental study we then first compare the proposed heuristics against each other and establish the most successful one. In a second experiment, we then show that our approach can be very successful in practice. That is, we show that already after adding a few edges to the networks the greedy algorithm that purely maximizes spread surpasses all fairness-tailored algorithms in terms of ex-post fairness. Maybe surprisingly, we even show that our approach achieves ex-post fairness values that are comparable or even better than the ex-ante fairness values of the currently most efficient algorithms that optimize ex-ante fairness.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Becker, Ruben and D\u2019Angelo, Gianlorenzo and Ghobadi, Sajjad}, year={2023}, month={Jun.}, pages={14119-14126} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26652/26424", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26652", + "pdf_size": 657658, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1790831174383451881&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "unive.it;gssi.it;gssi.it", + "email": "unive.it;gssi.it;gssi.it", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Ca\u2019 Foscari University of Venice;Gran Sasso Science Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unive.it;https://www.gssi.it", + "aff_unique_abbr": "Ca\u2019 Foscari;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";L\u2019Aquila", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26661", + "title": "Improving Interpretability of Deep Sequential Knowledge Tracing Models with Question-centric Cognitive Representations", + "track": "aaai special track", + "status": "Technical", + "abstract": "Knowledge tracing (KT) is a crucial technique to predict students\u2019 future performance by observing their historical learning processes. Due to the powerful representation ability of deep neural networks, remarkable progress has been made by using deep learning techniques to solve the KT problem. The majority of existing approaches rely on the homogeneous question assumption that questions have equivalent contributions if they share the same set of knowledge components. Unfortunately, this assumption is inaccurate in real-world educational scenarios. Furthermore, it is very challenging to interpret the prediction results from the existing deep learning based KT models. Therefore, in this paper, we present QIKT, a question-centric interpretable KT model to address the above challenges. The proposed QIKT approach explicitly models students\u2019 knowledge state variations at a \ufb01ne-grained level with question-sensitive cognitive representations that are jointly learned from a question-centric knowledge acquisition module and a question-centric problem solving module. Meanwhile, the QIKT utilizes an item response theory based prediction layer to generate interpretable prediction results. The proposed QIKT model is evaluated on three public real-world educational datasets. The results demonstrate that our approach is superior on the KT prediction task, and it outperforms a wide range of deep learning based KT models in terms of prediction accuracy with better model interpretability. To encourage reproducible results, we have provided all the datasets and code at https://pykt.org/.", + "primary_area": "ai for social impact", + "author": "Jiahao Chen; Zitao Liu; Shuyan Huang; Qiongqiong Liu; Weiqi Luo", + "authorids": "", + "aff": "TAL Education Group, Beijing, China; Guangdong Institute of Smart Education, Jinan University, Guangzhou, China; TAL Education Group, Beijing, China; TAL Education Group, Beijing, China; Guangdong Institute of Smart Education, Jinan University, Guangzhou, China", + "bibtex": "@article{Chen_Liu_Huang_Liu_Luo_2023, title={Improving Interpretability of Deep Sequential Knowledge Tracing Models with Question-centric Cognitive Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26661}, DOI={10.1609/aaai.v37i12.26661}, abstractNote={Knowledge tracing (KT) is a crucial technique to predict students\u2019 future performance by observing their historical learning processes. Due to the powerful representation ability of deep neural networks, remarkable progress has been made by using deep learning techniques to solve the KT problem. The majority of existing approaches rely on the homogeneous question assumption that questions have equivalent contributions if they share the same set of knowledge components. Unfortunately, this assumption is inaccurate in real-world educational scenarios. Furthermore, it is very challenging to interpret the prediction results from the existing deep learning based KT models. Therefore, in this paper, we present QIKT, a question-centric interpretable KT model to address the above challenges. The proposed QIKT approach explicitly models students\u2019 knowledge state variations at a \ufb01ne-grained level with question-sensitive cognitive representations that are jointly learned from a question-centric knowledge acquisition module and a question-centric problem solving module. Meanwhile, the QIKT utilizes an item response theory based prediction layer to generate interpretable prediction results. The proposed QIKT model is evaluated on three public real-world educational datasets. The results demonstrate that our approach is superior on the KT prediction task, and it outperforms a wide range of deep learning based KT models in terms of prediction accuracy with better model interpretability. To encourage reproducible results, we have provided all the datasets and code at https://pykt.org/.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jiahao and Liu, Zitao and Huang, Shuyan and Liu, Qiongqiong and Luo, Weiqi}, year={2023}, month={Jun.}, pages={14196-14204} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26661/26433", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26661", + "pdf_size": 776240, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10134574777710973358&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "tal.com;jnu.edu.cn;tal.com;tal.com;jnu.edu.cn", + "email": "tal.com;jnu.edu.cn;tal.com;tal.com;jnu.edu.cn", + "github": "", + "project": "https://pykt.org/", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "TAL Education Group;Jinan University", + "aff_unique_dep": ";Guangdong Institute of Smart Education", + "aff_unique_url": "https://www.tal.com;http://www.jnu.edu.cn", + "aff_unique_abbr": "TAL;JNU", + "aff_campus_unique_index": "0;1;0;0;1", + "aff_campus_unique": "Beijing;Guangzhou", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26586", + "title": "Improving Interpretability via Explicit Word Interaction Graph Layer", + "track": "main", + "status": "Technical", + "abstract": "Recent NLP literature has seen growing interest in improving model interpretability. Along this direction, we propose a trainable neural network layer that learns a global interaction graph between words and then selects more informative words using the learned word interactions. Our layer, we call WIGRAPH, can plug into any neural network-based NLP text classifiers right after its word embedding layer. Across multiple SOTA NLP models and various NLP datasets, we demonstrate that adding the WIGRAPH layer substantially improves NLP models' interpretability and enhances models' prediction performance at the same time.", + "primary_area": "speech natural language processing", + "author": "Arshdeep Sekhon; Hanjie Chen; Aman Shrivastava; Zhe Wang; Yangfeng Ji; Yanjun Qi", + "authorids": "", + "aff": "University of Virginia, Charlottesville, USA; University of Virginia, Charlottesville, USA; University of Virginia, Charlottesville, USA; University of Virginia, Charlottesville, USA; University of Virginia, Charlottesville, USA; University of Virginia, Charlottesville, USA", + "bibtex": "@article{Sekhon_Chen_Shrivastava_Wang_Ji_Qi_2023, title={Improving Interpretability via Explicit Word Interaction Graph Layer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26586}, DOI={10.1609/aaai.v37i11.26586}, abstractNote={Recent NLP literature has seen growing interest in improving model interpretability. Along this direction, we propose a trainable neural network layer that learns a global interaction graph between words and then selects more informative words using the learned word interactions. Our layer, we call WIGRAPH, can plug into any neural network-based NLP text classifiers right after its word embedding layer. Across multiple SOTA NLP models and various NLP datasets, we demonstrate that adding the WIGRAPH layer substantially improves NLP models\u2019 interpretability and enhances models\u2019 prediction performance at the same time.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sekhon, Arshdeep and Chen, Hanjie and Shrivastava, Aman and Wang, Zhe and Ji, Yangfeng and Qi, Yanjun}, year={2023}, month={Jun.}, pages={13528-13537} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26586/26358", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26586", + "pdf_size": 288086, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=72007859824363674&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Charlottesville", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25951", + "title": "Improving Long-Horizon Imitation through Instruction Prediction", + "track": "main", + "status": "Technical", + "abstract": "Complex, long-horizon planning and its combinatorial nature pose steep challenges for learning-based agents. Difficulties in such settings are exacerbated in low data regimes where over-fitting stifles generalization and compounding errors hurt accuracy. In this work, we explore the use of an often unused source of auxiliary supervision: language. Inspired by recent advances in transformer-based models, we train agents with an instruction prediction loss that encourages learning temporally extended representations that operate at a high level of abstraction. Concretely, we demonstrate that instruction modeling significantly improves performance in planning environments when training with a limited number of demonstrations on the BabyAI and Crafter benchmarks. In further analysis we find that instruction modeling is most important for tasks that require complex reasoning, while understandably offering smaller gains in environments that require simple plans. More details and code can be found at \\url{https://github.com/jhejna/instruction-prediction}.", + "primary_area": "machine learning ii", + "author": "Joey Hejna; Pieter Abbeel; Lerrel Pinto", + "authorids": "", + "aff": "Stanford University; University of California, Berkeley; New York University", + "bibtex": "@article{Hejna_Abbeel_Pinto_2023, title={Improving Long-Horizon Imitation through Instruction Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25951}, DOI={10.1609/aaai.v37i7.25951}, abstractNote={Complex, long-horizon planning and its combinatorial nature pose steep challenges for learning-based agents. Difficulties in such settings are exacerbated in low data regimes where over-fitting stifles generalization and compounding errors hurt accuracy. In this work, we explore the use of an often unused source of auxiliary supervision: language. Inspired by recent advances in transformer-based models, we train agents with an instruction prediction loss that encourages learning temporally extended representations that operate at a high level of abstraction. Concretely, we demonstrate that instruction modeling significantly improves performance in planning environments when training with a limited number of demonstrations on the BabyAI and Crafter benchmarks. In further analysis we find that instruction modeling is most important for tasks that require complex reasoning, while understandably offering smaller gains in environments that require simple plans. More details and code can be found at \\url{https://github.com/jhejna/instruction-prediction}.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hejna, Joey and Abbeel, Pieter and Pinto, Lerrel}, year={2023}, month={Jun.}, pages={7857-7865} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25951/25723", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25951", + "pdf_size": 287489, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3064485844658227229&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.stanford.edu;berkeley.edu;cs.nyu.edu", + "email": "cs.stanford.edu;berkeley.edu;cs.nyu.edu", + "github": "https://github.com/jhejna/instruction-prediction", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Stanford University;University of California, Berkeley;New York University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.stanford.edu;https://www.berkeley.edu;https://www.nyu.edu", + "aff_unique_abbr": "Stanford;UC Berkeley;NYU", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Stanford;Berkeley;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25953", + "title": "Improving Pareto Front Learning via Multi-Sample Hypernetworks", + "track": "main", + "status": "Technical", + "abstract": "Pareto Front Learning (PFL) was recently introduced as an effective approach to obtain a mapping function from a given trade-off vector to a solution on the Pareto front, which solves the multi-objective optimization (MOO) problem. Due to the inherent trade-off between conflicting objectives, PFL offers a flexible approach in many scenarios in which the decision makers can not specify the preference of one Pareto solution over another, and must switch between them depending on the situation. However, existing PFL methods ignore the relationship between the solutions during the optimization process, which hinders the quality of the obtained front. To overcome this issue, we propose a novel PFL framework namely PHN-HVI, which employs a hypernetwork to generate multiple solutions from a set of diverse trade-off preferences and enhance the quality of the Pareto front by maximizing the Hypervolume indicator defined by these solutions. The experimental results on several MOO machine learning tasks show that the proposed framework significantly outperforms the baselines in producing the trade-off Pareto front.", + "primary_area": "machine learning ii", + "author": "Long P. Hoang; Dung D. Le; Tran Anh Tuan; Tran Ngoc Thang", + "authorids": "", + "aff": "College of Engineering and Computer Science, VinUniversity + School of Applied Mathematics and Informatics, Hanoi University of Science and Technology; College of Engineering and Computer Science, VinUniversity; School of Applied Mathematics and Informatics, Hanoi University of Science and Technology; School of Applied Mathematics and Informatics, Hanoi University of Science and Technology", + "bibtex": "@article{Hoang_Le_Anh Tuan_Ngoc Thang_2023, title={Improving Pareto Front Learning via Multi-Sample Hypernetworks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25953}, DOI={10.1609/aaai.v37i7.25953}, abstractNote={Pareto Front Learning (PFL) was recently introduced as an effective approach to obtain a mapping function from a given trade-off vector to a solution on the Pareto front, which solves the multi-objective optimization (MOO) problem. Due to the inherent trade-off between conflicting objectives, PFL offers a flexible approach in many scenarios in which the decision makers can not specify the preference of one Pareto solution over another, and must switch between them depending on the situation. However, existing PFL methods ignore the relationship between the solutions during the optimization process, which hinders the quality of the obtained front. To overcome this issue, we propose a novel PFL framework namely PHN-HVI, which employs a hypernetwork to generate multiple solutions from a set of diverse trade-off preferences and enhance the quality of the Pareto front by maximizing the Hypervolume indicator defined by these solutions. The experimental results on several MOO machine learning tasks show that the proposed framework significantly outperforms the baselines in producing the trade-off Pareto front.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hoang, Long P. and Le, Dung D. and Anh Tuan, Tran and Ngoc Thang, Tran}, year={2023}, month={Jun.}, pages={7875-7883} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25953/25725", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25953", + "pdf_size": 5026528, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12484183108964265625&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "vinuni.edu.vn;vinuni.edu.vn;sis.hust.edu.vn;hust.edu.vn", + "email": "vinuni.edu.vn;vinuni.edu.vn;sis.hust.edu.vn;hust.edu.vn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;1", + "aff_unique_norm": "VinUniversity;Hanoi University of Science and Technology", + "aff_unique_dep": "College of Engineering and Computer Science;School of Applied Mathematics and Informatics", + "aff_unique_url": "https://vinuni.edu.vn;https://www.hust.edu.vn", + "aff_unique_abbr": "VinUni;HUST", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Hanoi", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "Vietnam" + }, + { + "id": "article-25763", + "title": "Improving Robotic Tactile Localization Super-resolution via Spatiotemporal Continuity Learning and Overlapping Air Chambers", + "track": "main", + "status": "Technical", + "abstract": "Human hand has amazing super-resolution ability in sensing the force and position of contact and this ability can be strengthened by practice. Inspired by this, we propose a method for robotic tactile super-resolution enhancement by learning spatiotemporal continuity of contact position and a tactile sensor composed of overlapping air chambers. Each overlapping air chamber is constructed of soft material and seals the barometer inside to mimic adapting receptors of human skin. Each barometer obtains the global receptive field of the contact surface with the pressure propagation in the hyperelastic seal overlapping air chambers. \nNeural networks with causal convolution are employed to resolve the pressure data sampled by barometers and to predict the contact position. The temporal consistency of spatial position contributes to the accuracy and stability of positioning. We obtain an average super-resolution (SR) factor of over 2500 with only four physical sensing nodes on the rubber surface (0.1 mm in the best case on 38 \u00d7 26 mm\u00b2), which outperforms the state-of-the-art. The effect of time series length on the location prediction accuracy of causal convolution is quantitatively analyzed in this article. \nWe show that robots can accomplish challenging tasks such as haptic trajectory following, adaptive grasping, and human-robot interaction with the tactile sensor. This research provides new insight into tactile super-resolution sensing and could be beneficial to various applications in the robotics field.", + "primary_area": "intelligent robotics", + "author": "Xuyang Li; Yipu Zhang; Xuemei Xie; Jiawei Li; Guangming Shi", + "authorids": "", + "aff": "Xidian University; Xidian University; Xidian University + Pazhou Lab; Xidian University; Xidian University + Peng Cheng Laboratory", + "bibtex": "@article{Li_Zhang_Xie_Li_Shi_2023, title={Improving Robotic Tactile Localization Super-resolution via Spatiotemporal Continuity Learning and Overlapping Air Chambers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25763}, DOI={10.1609/aaai.v37i5.25763}, abstractNote={Human hand has amazing super-resolution ability in sensing the force and position of contact and this ability can be strengthened by practice. Inspired by this, we propose a method for robotic tactile super-resolution enhancement by learning spatiotemporal continuity of contact position and a tactile sensor composed of overlapping air chambers. Each overlapping air chamber is constructed of soft material and seals the barometer inside to mimic adapting receptors of human skin. Each barometer obtains the global receptive field of the contact surface with the pressure propagation in the hyperelastic seal overlapping air chambers. Neural networks with causal convolution are employed to resolve the pressure data sampled by barometers and to predict the contact position. The temporal consistency of spatial position contributes to the accuracy and stability of positioning. We obtain an average super-resolution (SR) factor of over 2500 with only four physical sensing nodes on the rubber surface (0.1 mm in the best case on 38 \u00d7 26 mm\u00b2), which outperforms the state-of-the-art. The effect of time series length on the location prediction accuracy of causal convolution is quantitatively analyzed in this article. We show that robots can accomplish challenging tasks such as haptic trajectory following, adaptive grasping, and human-robot interaction with the tactile sensor. This research provides new insight into tactile super-resolution sensing and could be beneficial to various applications in the robotics field.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xuyang and Zhang, Yipu and Xie, Xuemei and Li, Jiawei and Shi, Guangming}, year={2023}, month={Jun.}, pages={6192-6199} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25763/25535", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25763", + "pdf_size": 944144, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16288738832091918021&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xidian.edu.cn;stu.xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn", + "email": "stu.xidian.edu.cn;stu.xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;0;0+2", + "aff_unique_norm": "Xidian University;Pazhou Lab;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.xidian.edu.cn/;;http://www.pcl.ac.cn", + "aff_unique_abbr": "Xidian;;PCL", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-26769", + "title": "Improving Robust Fariness via Balance Adversarial Training", + "track": "aaai special track", + "status": "Technical", + "abstract": "Adversarial training (AT) methods are effective against adversarial attacks, yet they introduce severe disparity of accuracy and robustness between different classes, known as the robust fairness problem. Previously proposed Fair Robust Learning (FRL) adaptively reweights different classes to improve fairness. However, the performance of the better-performed classes decreases, leading to a strong performance drop. In this paper, we observed two unfair phenomena during adversarial training: different difficulties in generating adversarial examples from each class (source-class fairness) and disparate target class tendencies when generating adversarial examples (target-class fairness). From the observations, we propose Balance Adversarial Training (BAT) to address the robust fairness problem. Regarding source-class fairness, we adjust the attack strength and difficulties of each class to generate samples near the decision boundary for easier and fairer model learning; considering target-class fairness, by introducing a uniform distribution constraint, we encourage the adversarial example generation process for each class with a fair tendency. Extensive experiments conducted on multiple datasets (CIFAR-10, CIFAR-100, and ImageNette) demonstrate that our BAT can significantly outperform other baselines in mitigating the robust fairness problem (+5-10\\% on the worst class accuracy)(Our codes can be found at https://github.com/silvercherry/Improving-Robust-Fairness-via-Balance-Adversarial-Training).", + "primary_area": "safe and robust ai", + "author": "Chunyu Sun; Chenye Xu; Chengyuan Yao; Siyuan Liang; Yichao Wu; Ding Liang; Xianglong Liu; Aishan Liu", + "authorids": "", + "aff": "SenseTime Research; SenseTime Research; SenseTime Research; Institute of Information Engineering, Chinese Academy of Sciences; SenseTime Research; SenseTime Research; Zhongguancun Laboratory, Beijing, China+Institute of Dataspace, Hefei, Anhui, China+NLSDE, Beihang University, Beijing, China; NLSDE, Beihang University, Beijing, China", + "bibtex": "@article{Sun_Xu_Yao_Liang_Wu_Liang_Liu_Liu_2023, title={Improving Robust Fariness via Balance Adversarial Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26769}, DOI={10.1609/aaai.v37i12.26769}, abstractNote={Adversarial training (AT) methods are effective against adversarial attacks, yet they introduce severe disparity of accuracy and robustness between different classes, known as the robust fairness problem. Previously proposed Fair Robust Learning (FRL) adaptively reweights different classes to improve fairness. However, the performance of the better-performed classes decreases, leading to a strong performance drop. In this paper, we observed two unfair phenomena during adversarial training: different difficulties in generating adversarial examples from each class (source-class fairness) and disparate target class tendencies when generating adversarial examples (target-class fairness). From the observations, we propose Balance Adversarial Training (BAT) to address the robust fairness problem. Regarding source-class fairness, we adjust the attack strength and difficulties of each class to generate samples near the decision boundary for easier and fairer model learning; considering target-class fairness, by introducing a uniform distribution constraint, we encourage the adversarial example generation process for each class with a fair tendency. Extensive experiments conducted on multiple datasets (CIFAR-10, CIFAR-100, and ImageNette) demonstrate that our BAT can significantly outperform other baselines in mitigating the robust fairness problem (+5-10\\% on the worst class accuracy)(Our codes can be found at https://github.com/silvercherry/Improving-Robust-Fairness-via-Balance-Adversarial-Training).}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Chunyu and Xu, Chenye and Yao, Chengyuan and Liang, Siyuan and Wu, Yichao and Liang, Ding and Liu, Xianglong and Liu, Aishan}, year={2023}, month={Jun.}, pages={15161-15169} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26769/26541", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26769", + "pdf_size": 691657, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8689517644745464835&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "sensetime.com;sensetime.com;sensetime.com;iie.ac.cn;sensetime.com;sensetime.com;buaa.edu.cn;buaa.edu.cn", + "email": "sensetime.com;sensetime.com;sensetime.com;iie.ac.cn;sensetime.com;sensetime.com;buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/silvercherry/Improving-Robust-Fairness-via-Balance-Adversarial-Training", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;0;0;2+3+4;4", + "aff_unique_norm": "SenseTime;Chinese Academy of Sciences;Zhongguancun Laboratory;Institute of Dataspace;Beihang University", + "aff_unique_dep": "SenseTime Research;Institute of Information Engineering;;;NLSDE", + "aff_unique_url": "https://www.sensetime.com;http://www.cas.cn;;;http://www.buaa.edu.cn", + "aff_unique_abbr": "SenseTime;CAS;;;BUAA", + "aff_campus_unique_index": "1+2;2", + "aff_campus_unique": ";Hefei;Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25497", + "title": "Improving Scene Text Image Super-resolution via Dual Prior Modulation Network", + "track": "main", + "status": "Technical", + "abstract": "Scene text image super-resolution (STISR) aims to simultaneously increase the resolution and legibility of the text images, and the resulting images will significantly affect the performance of downstream tasks. Although numerous progress has been made, existing approaches raise two crucial issues: (1) They neglect the global structure of the text, which bounds the semantic determinism of the scene text. (2) The priors, e.g., text prior or stroke prior, employed in existing works, are extracted from pre-trained text recognizers. That said, such priors suffer from the domain gap including low resolution and blurriness caused by poor imaging conditions, leading to incorrect guidance. Our work addresses these gaps and proposes a plug-and-play module dubbed Dual Prior Modulation Network (DPMN), which leverages dual image-level priors to bring performance gain over existing approaches. Specifically, two types of prior-guided refinement modules, each using the text mask or graphic recognition result of the low-quality SR image from the preceding layer, are designed to improve the structural clarity and semantic accuracy of the text, respectively. The following attention mechanism hence modulates two quality-enhanced images to attain a superior SR result. Extensive experiments validate that our method improves the image quality and boosts the performance of downstream tasks over five typical approaches on the benchmark. Substantial visualizations and ablation studies demonstrate the advantages of the proposed DPMN. Code is available at: https://github.com/jdfxzzy/DPMN.", + "primary_area": "computer vision iii", + "author": "Shipeng Zhu; Zuoyan Zhao; Pengfei Fang; Hui Xue", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University, Nanjing 210096, China+MOE Key Laboratory of Computer Network and Information Integration (Southeast University), China; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China+MOE Key Laboratory of Computer Network and Information Integration (Southeast University), China; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China+MOE Key Laboratory of Computer Network and Information Integration (Southeast University), China; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China+MOE Key Laboratory of Computer Network and Information Integration (Southeast University), China", + "bibtex": "@article{Zhu_Zhao_Fang_Xue_2023, title={Improving Scene Text Image Super-resolution via Dual Prior Modulation Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25497}, DOI={10.1609/aaai.v37i3.25497}, abstractNote={Scene text image super-resolution (STISR) aims to simultaneously increase the resolution and legibility of the text images, and the resulting images will significantly affect the performance of downstream tasks. Although numerous progress has been made, existing approaches raise two crucial issues: (1) They neglect the global structure of the text, which bounds the semantic determinism of the scene text. (2) The priors, e.g., text prior or stroke prior, employed in existing works, are extracted from pre-trained text recognizers. That said, such priors suffer from the domain gap including low resolution and blurriness caused by poor imaging conditions, leading to incorrect guidance. Our work addresses these gaps and proposes a plug-and-play module dubbed Dual Prior Modulation Network (DPMN), which leverages dual image-level priors to bring performance gain over existing approaches. Specifically, two types of prior-guided refinement modules, each using the text mask or graphic recognition result of the low-quality SR image from the preceding layer, are designed to improve the structural clarity and semantic accuracy of the text, respectively. The following attention mechanism hence modulates two quality-enhanced images to attain a superior SR result. Extensive experiments validate that our method improves the image quality and boosts the performance of downstream tasks over five typical approaches on the benchmark. Substantial visualizations and ablation studies demonstrate the advantages of the proposed DPMN. Code is available at: https://github.com/jdfxzzy/DPMN.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Shipeng and Zhao, Zuoyan and Fang, Pengfei and Xue, Hui}, year={2023}, month={Jun.}, pages={3843-3851} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25497/25269", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25497", + "pdf_size": 836279, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15662727805890176308&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "https://github.com/jdfxzzy/DPMN", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26497", + "title": "Improving Simultaneous Machine Translation with Monolingual Data", + "track": "main", + "status": "Technical", + "abstract": "Simultaneous machine translation (SiMT) is usually done via sequence-level knowledge distillation (Seq-KD) from a full-sentence neural machine translation (NMT) model. However, there is still a significant performance gap between NMT and SiMT. In this work, we propose to leverage monolingual data to improve SiMT, which trains a SiMT student on the combination of bilingual data and external monolingual data distilled by Seq-KD. Preliminary experiments on En-Zh and En-Ja news domain corpora demonstrate that monolingual data can significantly improve translation quality (e.g., +3.15 BLEU on En-Zh). Inspired by the behavior of human simultaneous interpreters, we propose a novel monolingual sampling strategy for SiMT, considering both chunk length and monotonicity. Experimental results show that our sampling strategy consistently outperforms the random sampling strategy (and other conventional typical NMT monolingual sampling strategies) by avoiding the key problem of SiMT -- hallucination, and has better scalability. We achieve +0.72 BLEU improvements on average against random sampling on En-Zh and En-Ja. Data and codes can be found at https://github.com/hexuandeng/Mono4SiMT.", + "primary_area": "speech natural language processing", + "author": "Hexuan Deng; Liang Ding; Xuebo Liu; Meishan Zhang; Dacheng Tao; Min Zhang", + "authorids": "", + "aff": "Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; JD Explore Academy, JD.com Inc. + Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; JD Explore Academy, JD.com Inc.; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China", + "bibtex": "@article{Deng_Ding_Liu_Zhang_Tao_Zhang_2023, title={Improving Simultaneous Machine Translation with Monolingual Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26497}, DOI={10.1609/aaai.v37i11.26497}, abstractNote={Simultaneous machine translation (SiMT) is usually done via sequence-level knowledge distillation (Seq-KD) from a full-sentence neural machine translation (NMT) model. However, there is still a significant performance gap between NMT and SiMT. In this work, we propose to leverage monolingual data to improve SiMT, which trains a SiMT student on the combination of bilingual data and external monolingual data distilled by Seq-KD. Preliminary experiments on En-Zh and En-Ja news domain corpora demonstrate that monolingual data can significantly improve translation quality (e.g., +3.15 BLEU on En-Zh). Inspired by the behavior of human simultaneous interpreters, we propose a novel monolingual sampling strategy for SiMT, considering both chunk length and monotonicity. Experimental results show that our sampling strategy consistently outperforms the random sampling strategy (and other conventional typical NMT monolingual sampling strategies) by avoiding the key problem of SiMT -- hallucination, and has better scalability. We achieve +0.72 BLEU improvements on average against random sampling on En-Zh and En-Ja. Data and codes can be found at https://github.com/hexuandeng/Mono4SiMT.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Hexuan and Ding, Liang and Liu, Xuebo and Zhang, Meishan and Tao, Dacheng and Zhang, Min}, year={2023}, month={Jun.}, pages={12728-12736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26497/26269", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26497", + "pdf_size": 2669971, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1734483329992192962&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.hit.edu.cn;jd.com;hit.edu.cn;hit.edu.cn;gmail.com;hit.edu.cn", + "email": "stu.hit.edu.cn;jd.com;hit.edu.cn;hit.edu.cn;gmail.com;hit.edu.cn", + "github": "https://github.com/hexuandeng/Mono4SiMT", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+0;0;0;1;0", + "aff_unique_norm": "Harbin Institute of Technology;JD.com Inc.", + "aff_unique_dep": "Institute of Computing and Intelligence;JD Explore Academy", + "aff_unique_url": "http://www.hhit.edu.cn;https://www.jd.com", + "aff_unique_abbr": "HIT;JD.com", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26760", + "title": "Improving Training and Inference of Face Recognition Models via Random Temperature Scaling", + "track": "aaai special track", + "status": "Technical", + "abstract": "Data uncertainty is commonly observed in the images for face recognition (FR). However, deep learning algorithms often make predictions with high confidence even for uncertain or irrelevant inputs. Intuitively, FR algorithms can benefit from both the estimation of uncertainty and the detection of out-of-distribution (OOD) samples. Taking a probabilistic view of the current classification model, the temperature scalar is exactly the scale of uncertainty noise implicitly added in the softmax function. Meanwhile, the uncertainty of images in a dataset should follow a prior distribution. Based on the observation, a unified framework for uncertainty modeling and FR, Random Temperature Scaling (RTS), is proposed to learn a reliable FR algorithm. The benefits of RTS are two-fold. (1) In the training phase, it can adjust the learning strength of clean and noisy samples for stability and accuracy. (2) In the test phase, it can provide a score of confidence to detect uncertain, low-quality and even OOD samples, without training on extra labels. Extensive experiments on FR benchmarks demonstrate that the magnitude of variance in RTS, which serves as an OOD detection metric, is closely related to the uncertainty of the input image. RTS can achieve top performance on both the FR and OOD detection tasks. Moreover, the model trained with RTS can perform robustly on datasets with noise. The proposed module is light-weight and only adds negligible computation cost to the model.", + "primary_area": "safe and robust ai", + "author": "Lei Shang; Mouxiao Huang; Wu Shi; Yuchen Liu; Yang Liu; Wang Steven; Baigui Sun; Xuansong Xie; Yu Qiao", + "authorids": "", + "aff": "Alibaba Group; The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences + Shanghai Artificial Intelligence Laboratory", + "bibtex": "@article{Shang_Huang_Shi_Liu_Liu_Steven_Sun_Xie_Qiao_2023, title={Improving Training and Inference of Face Recognition Models via Random Temperature Scaling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26760}, DOI={10.1609/aaai.v37i12.26760}, abstractNote={Data uncertainty is commonly observed in the images for face recognition (FR). However, deep learning algorithms often make predictions with high confidence even for uncertain or irrelevant inputs. Intuitively, FR algorithms can benefit from both the estimation of uncertainty and the detection of out-of-distribution (OOD) samples. Taking a probabilistic view of the current classification model, the temperature scalar is exactly the scale of uncertainty noise implicitly added in the softmax function. Meanwhile, the uncertainty of images in a dataset should follow a prior distribution. Based on the observation, a unified framework for uncertainty modeling and FR, Random Temperature Scaling (RTS), is proposed to learn a reliable FR algorithm. The benefits of RTS are two-fold. (1) In the training phase, it can adjust the learning strength of clean and noisy samples for stability and accuracy. (2) In the test phase, it can provide a score of confidence to detect uncertain, low-quality and even OOD samples, without training on extra labels. Extensive experiments on FR benchmarks demonstrate that the magnitude of variance in RTS, which serves as an OOD detection metric, is closely related to the uncertainty of the input image. RTS can achieve top performance on both the FR and OOD detection tasks. Moreover, the model trained with RTS can perform robustly on datasets with noise. The proposed module is light-weight and only adds negligible computation cost to the model.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shang, Lei and Huang, Mouxiao and Shi, Wu and Liu, Yuchen and Liu, Yang and Steven, Wang and Sun, Baigui and Xie, Xuansong and Qiao, Yu}, year={2023}, month={Jun.}, pages={15082-15090} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26760/26532", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26760", + "pdf_size": 3775639, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6358877232652107362&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "alibaba-inc.com;siat.ac.cn;siat.ac.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;siat.ac.cn", + "email": "alibaba-inc.com;siat.ac.cn;siat.ac.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;siat.ac.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1+2;1;0;0;0;0;0;1+3", + "aff_unique_norm": "Alibaba Group;Chinese Academy of Sciences;University of Chinese Academy of Sciences;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Computer Vision and Virtual Reality Technology;;", + "aff_unique_url": "https://www.alibaba.com;http://www.cas.cn;http://www.ucas.ac.cn;http://www.shailab.org/", + "aff_unique_abbr": "Alibaba;CAS;UCAS;Shanghai AI Lab", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0+0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25936", + "title": "Improving Uncertainty Quantification of Deep Classifiers via Neighborhood Conformal Prediction: Novel Algorithm and Theoretical Analysis", + "track": "main", + "status": "Technical", + "abstract": "Safe deployment of deep neural networks in high-stake real-world applications require theoretically sound uncertainty quantification. Conformal prediction (CP) is a principled framework for uncertainty quantification of deep models in the form of prediction set for classification tasks with a user-specified coverage (i.e., true class label is contained with high probability). This paper proposes a novel algorithm referred to as Neighborhood Conformal Prediction (NCP) to improve the efficiency of uncertainty quantification from CP for deep classifiers (i.e., reduce prediction set size). The key idea behind NCP is to use the learned representation of the neural network to identify k nearest-neighbor calibration examples for a given testing input and assign them importance weights proportional to their distance to create adaptive prediction sets. We theoretically show that if the learned data representation of the neural network satisfies some mild conditions, NCP will produce smaller prediction sets than traditional CP algorithms. Our comprehensive experiments on CIFAR-10, CIFAR-100, and ImageNet datasets using diverse deep neural networks strongly demonstrate that NCP leads to significant reduction in prediction set size over prior CP methods.", + "primary_area": "machine learning i", + "author": "Subhankar Ghosh; Taha Belkhouja; Yan Yan; Janardhan Rao Doppa", + "authorids": "", + "aff": "School of EECS, Washington State University; School of EECS, Washington State University; School of EECS, Washington State University; School of EECS, Washington State University", + "bibtex": "@article{Ghosh_Belkhouja_Yan_Doppa_2023, title={Improving Uncertainty Quantification of Deep Classifiers via Neighborhood Conformal Prediction: Novel Algorithm and Theoretical Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25936}, DOI={10.1609/aaai.v37i6.25936}, abstractNote={Safe deployment of deep neural networks in high-stake real-world applications require theoretically sound uncertainty quantification. Conformal prediction (CP) is a principled framework for uncertainty quantification of deep models in the form of prediction set for classification tasks with a user-specified coverage (i.e., true class label is contained with high probability). This paper proposes a novel algorithm referred to as Neighborhood Conformal Prediction (NCP) to improve the efficiency of uncertainty quantification from CP for deep classifiers (i.e., reduce prediction set size). The key idea behind NCP is to use the learned representation of the neural network to identify k nearest-neighbor calibration examples for a given testing input and assign them importance weights proportional to their distance to create adaptive prediction sets. We theoretically show that if the learned data representation of the neural network satisfies some mild conditions, NCP will produce smaller prediction sets than traditional CP algorithms. Our comprehensive experiments on CIFAR-10, CIFAR-100, and ImageNet datasets using diverse deep neural networks strongly demonstrate that NCP leads to significant reduction in prediction set size over prior CP methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosh, Subhankar and Belkhouja, Taha and Yan, Yan and Doppa, Janardhan Rao}, year={2023}, month={Jun.}, pages={7722-7730} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25936/25708", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25936", + "pdf_size": 391699, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4677965558903183829&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "wsu.edu;wsu.edu;wsu.edu;wsu.edu", + "email": "wsu.edu;wsu.edu;wsu.edu;wsu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Washington State University", + "aff_unique_dep": "School of Electrical Engineering and Computer Science", + "aff_unique_url": "https://eece.wsu.edu", + "aff_unique_abbr": "WSU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Pullman", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26574", + "title": "Improving the Cross-Lingual Generalisation in Visual Question Answering", + "track": "main", + "status": "Technical", + "abstract": "While several benefits were realized for multilingual vision-language pretrained models, recent benchmarks across various tasks and languages showed poor cross-lingual generalisation when multilingually pre-trained vision-language models are applied to non-English data, with a large gap between (supervised) English performance and (zero-shot) cross-lingual transfer. In this work, we explore the poor performance of these models on a zero-shot cross-lingual visual question answering (VQA) task, where models are fine-tuned on English visual-question data and evaluated on 7 typologically diverse languages. We improve cross-lingual transfer with three strategies: (1) we introduce a linguistic prior objective to augment the cross-entropy loss with a similarity-based loss to guide the model during training, (2) we learn a task-specific subnetwork that improves cross-lingual generalisation and reduces variance without model modification, (3) we augment training examples using synthetic code-mixing to promote alignment of embeddings between source and target languages. Our experiments on xGQA using the pretrained multilingual multimodal transformers UC2 and M3P demonstrates the consistent effectiveness of the proposed fine-tuning strategy for 7 languages, outperforming existing transfer methods with sparse models.", + "primary_area": "speech natural language processing", + "author": "Farhad Nooralahzadeh; Rico Sennrich", + "authorids": "", + "aff": "Department of Computational Linguistics, University of Zurich; Department of Computational Linguistics, University of Zurich", + "bibtex": "@article{Nooralahzadeh_Sennrich_2023, title={Improving the Cross-Lingual Generalisation in Visual Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26574}, DOI={10.1609/aaai.v37i11.26574}, abstractNote={While several benefits were realized for multilingual vision-language pretrained models, recent benchmarks across various tasks and languages showed poor cross-lingual generalisation when multilingually pre-trained vision-language models are applied to non-English data, with a large gap between (supervised) English performance and (zero-shot) cross-lingual transfer. In this work, we explore the poor performance of these models on a zero-shot cross-lingual visual question answering (VQA) task, where models are fine-tuned on English visual-question data and evaluated on 7 typologically diverse languages. We improve cross-lingual transfer with three strategies: (1) we introduce a linguistic prior objective to augment the cross-entropy loss with a similarity-based loss to guide the model during training, (2) we learn a task-specific subnetwork that improves cross-lingual generalisation and reduces variance without model modification, (3) we augment training examples using synthetic code-mixing to promote alignment of embeddings between source and target languages. Our experiments on xGQA using the pretrained multilingual multimodal transformers UC2 and M3P demonstrates the consistent effectiveness of the proposed fine-tuning strategy for 7 languages, outperforming existing transfer methods with sparse models.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nooralahzadeh, Farhad and Sennrich, Rico}, year={2023}, month={Jun.}, pages={13419-13427} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26574/26346", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26574", + "pdf_size": 1031395, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12718084968765760124&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "uzh.ch;cl.uzh.ch", + "email": "uzh.ch;cl.uzh.ch", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Zurich", + "aff_unique_dep": "Department of Computational Linguistics", + "aff_unique_url": "https://www.unizh.ch", + "aff_unique_abbr": "UZH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "article-26979", + "title": "In-Game Toxic Language Detection: Shared Task and Attention Residuals (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In-game toxic language becomes the hot potato in the gaming industry and community. There have been several online game toxicity analysis frameworks and models proposed. However, it is still challenging to detect toxicity due to the nature of in-game chat, which has extremely short length. In this paper, we describe how the in-game toxic language shared task has been established using the real-world in-game chat data. In addition, we propose and introduce the model/framework for toxic language token tagging (slot filling) from the in-game chat. The data and code will be released.", + "primary_area": "", + "author": "Yuanzhe Jia; Weixuan Wu; Feiqi Cao; Soyeon Caren Han", + "authorids": "", + "aff": "School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia", + "bibtex": "@article{Jia_Wu_Cao_Han_2024, title={In-Game Toxic Language Detection: Shared Task and Attention Residuals (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26979}, DOI={10.1609/aaai.v37i13.26979}, abstractNote={In-game toxic language becomes the hot potato in the gaming industry and community. There have been several online game toxicity analysis frameworks and models proposed. However, it is still challenging to detect toxicity due to the nature of in-game chat, which has extremely short length. In this paper, we describe how the in-game toxic language shared task has been established using the real-world in-game chat data. In addition, we propose and introduce the model/framework for toxic language token tagging (slot filling) from the in-game chat. The data and code will be released.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Yuanzhe and Wu, Weixuan and Cao, Feiqi and Han, Soyeon Caren}, year={2024}, month={Jul.}, pages={16238-16239} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26979/26751", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26979", + "pdf_size": 101917, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1945160623554343232&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;sydney.edu.au", + "email": "uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;sydney.edu.au", + "github": "", + "project": "https://www.kaggle.com/competitions/2022-comp5046-a2", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Sydney", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.sydney.edu.au", + "aff_unique_abbr": "USYD", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Sydney", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25845", + "title": "InParformer: Evolutionary Decomposition Transformers with Interactive Parallel Attention for Long-Term Time Series Forecasting", + "track": "main", + "status": "Technical", + "abstract": "Long-term time series forecasting (LTSF) provides substantial benefits for numerous real-world applications, whereas places essential demands on the model capacity to capture long-range dependencies. Recent Transformer-based models have significantly improved LTSF performance. It is worth noting that Transformer with the self-attention mechanism was originally proposed to model language sequences whose tokens (i.e., words) are discrete and highly semantic. However, unlike language sequences, most time series are sequential and continuous numeric points. Time steps with temporal redundancy are weakly semantic, and only leveraging time-domain tokens is hard to depict the overall properties of time series (e.g., the overall trend and periodic variations). To address these problems, we propose a novel Transformer-based forecasting model named InParformer with an Interactive Parallel Attention (InPar Attention) mechanism. The InPar Attention is proposed to learn long-range dependencies comprehensively in both frequency and time domains. To improve its learning capacity and efficiency, we further design several mechanisms, including query selection, key-value pair compression, and recombination. Moreover, InParformer is constructed with evolutionary seasonal-trend decomposition modules to enhance intricate temporal pattern extraction. Extensive experiments on six real-world benchmarks show that InParformer outperforms the state-of-the-art forecasting Transformers.", + "primary_area": "machine learning i", + "author": "Haizhou Cao; Zhenhao Huang; Tiechui Yao; Jue Wang; Hui He; Yangang Wang", + "authorids": "", + "aff": "Computer Network Information Center, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; North China Electric Power University, Beijing, China; Computer Network Information Center, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; Computer Network Information Center, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; North China Electric Power University, Beijing, China; Computer Network Information Center, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Cao_Huang_Yao_Wang_He_Wang_2023, title={InParformer: Evolutionary Decomposition Transformers with Interactive Parallel Attention for Long-Term Time Series Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25845}, DOI={10.1609/aaai.v37i6.25845}, abstractNote={Long-term time series forecasting (LTSF) provides substantial benefits for numerous real-world applications, whereas places essential demands on the model capacity to capture long-range dependencies. Recent Transformer-based models have significantly improved LTSF performance. It is worth noting that Transformer with the self-attention mechanism was originally proposed to model language sequences whose tokens (i.e., words) are discrete and highly semantic. However, unlike language sequences, most time series are sequential and continuous numeric points. Time steps with temporal redundancy are weakly semantic, and only leveraging time-domain tokens is hard to depict the overall properties of time series (e.g., the overall trend and periodic variations). To address these problems, we propose a novel Transformer-based forecasting model named InParformer with an Interactive Parallel Attention (InPar Attention) mechanism. The InPar Attention is proposed to learn long-range dependencies comprehensively in both frequency and time domains. To improve its learning capacity and efficiency, we further design several mechanisms, including query selection, key-value pair compression, and recombination. Moreover, InParformer is constructed with evolutionary seasonal-trend decomposition modules to enhance intricate temporal pattern extraction. Extensive experiments on six real-world benchmarks show that InParformer outperforms the state-of-the-art forecasting Transformers.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Haizhou and Huang, Zhenhao and Yao, Tiechui and Wang, Jue and He, Hui and Wang, Yangang}, year={2023}, month={Jun.}, pages={6906-6915} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25845/25617", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25845", + "pdf_size": 238616, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17027964803981986515&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "cnic.cn;ncepu.edu.cn;cnic.cn;sccas.cn;ncepu.edu.cn;sccas.cn", + "email": "cnic.cn;ncepu.edu.cn;cnic.cn;sccas.cn;ncepu.edu.cn;sccas.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;0+1;0+1;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;North China Electric Power University", + "aff_unique_dep": "Computer Network Information Center;;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;http://www.ncepu.edu.cn", + "aff_unique_abbr": "CAS;UCAS;NCEPU", + "aff_campus_unique_index": "0+0;0;0+0;0+0;0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25744", + "title": "Incentive-Boosted Federated Crowdsourcing", + "track": "main", + "status": "Technical", + "abstract": "Crowdsourcing is a favorable computing paradigm for processing computer-hard tasks by harnessing human intelligence. However, generic crowdsourcing systems may lead to privacy-leakage through the sharing of worker data. To tackle this problem, we propose a novel approach, called iFedCrowd (incentive-boosted Federated Crowdsourcing), to manage the privacy and quality of crowdsourcing projects. iFedCrowd allows participants to locally process sensitive data and only upload encrypted training models, and then aggregates the model parameters to build a shared server model to protect data privacy. To motivate workers to build a high-quality global model in an efficacy way, we introduce an incentive mechanism that encourages workers to constantly collect fresh data to train accurate client models and boosts the global model training. We model the incentive-based interaction between the crowdsourcing platform and participating workers as a Stackelberg game, in which each side maximizes its own profit. We derive the Nash Equilibrium of the game to find the optimal solutions for the two sides. Experimental results confirm that iFedCrowd can complete secure crowdsourcing projects with high quality and efficiency.", + "primary_area": "humans and ai", + "author": "Xiangping Kang; Guoxian Yu; Jun Wang; Wei Guo; Carlotta Domeniconi; Jinglin Zhang", + "authorids": "", + "aff": "School of Software, Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China + SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; Department of Computer Science, George Mason University, Fairfax, V A, USA; School of Control Science and Engineering, Shandong University, Jinan, China", + "bibtex": "@article{Kang_Yu_Wang_Guo_Domeniconi_Zhang_2023, title={Incentive-Boosted Federated Crowdsourcing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25744}, DOI={10.1609/aaai.v37i5.25744}, abstractNote={Crowdsourcing is a favorable computing paradigm for processing computer-hard tasks by harnessing human intelligence. However, generic crowdsourcing systems may lead to privacy-leakage through the sharing of worker data. To tackle this problem, we propose a novel approach, called iFedCrowd (incentive-boosted Federated Crowdsourcing), to manage the privacy and quality of crowdsourcing projects. iFedCrowd allows participants to locally process sensitive data and only upload encrypted training models, and then aggregates the model parameters to build a shared server model to protect data privacy. To motivate workers to build a high-quality global model in an efficacy way, we introduce an incentive mechanism that encourages workers to constantly collect fresh data to train accurate client models and boosts the global model training. We model the incentive-based interaction between the crowdsourcing platform and participating workers as a Stackelberg game, in which each side maximizes its own profit. We derive the Nash Equilibrium of the game to find the optimal solutions for the two sides. Experimental results confirm that iFedCrowd can complete secure crowdsourcing projects with high quality and efficiency.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Xiangping and Yu, Guoxian and Wang, Jun and Guo, Wei and Domeniconi, Carlotta and Zhang, Jinglin}, year={2023}, month={Jun.}, pages={6021-6029} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25744/25516", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25744", + "pdf_size": 316602, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5009125685914567157&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;cs.gmu.edu;sdu.edu.cn", + "email": "mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;cs.gmu.edu;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+0;0;0;1;0", + "aff_unique_norm": "Shandong University;George Mason University", + "aff_unique_dep": "School of Software;Department of Computer Science", + "aff_unique_url": "http://www.sdu.edu.cn;https://www.gmu.edu", + "aff_unique_abbr": ";GMU", + "aff_campus_unique_index": "0;0+0;0;0;1;0", + "aff_campus_unique": "Jinan;Fairfax", + "aff_country_unique_index": "0;0+0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26060", + "title": "Incomplete Multi-View Multi-Label Learning via Label-Guided Masked View- and Category-Aware Transformers", + "track": "main", + "status": "Technical", + "abstract": "As we all know, multi-view data is more expressive than single-view data and multi-label annotation enjoys richer supervision information than single-label, which makes multi-view multi-label learning widely applicable for various pattern recognition tasks. In this complex representation learning problem, three main challenges can be characterized as follows: i) How to learn consistent representations of samples across all views? ii) How to exploit and utilize category correlations of multi-label to guide inference? iii) How to avoid the negative impact resulting from the incompleteness of views or labels? To cope with these problems, we propose a general multi-view multi-label learning framework named label-guided masked view- and category-aware transformers in this paper. First, we design two transformer-style based modules for cross-view features aggregation and multi-label classification, respectively. The former aggregates information from different views in the process of extracting view-specific features, and the latter learns subcategory embedding to improve classification performance. Second, considering the imbalance of expressive power among views, an adaptively weighted view fusion module is proposed to obtain view-consistent embedding features. Third, we impose a label manifold constraint in sample-level representation learning to maximize the utilization of supervised information. Last but not least, all the modules are designed under the premise of incomplete views and labels, which makes our method adaptable to arbitrary multi-view and multi-label data. Extensive experiments on five datasets confirm that our method has clear advantages over other state-of-the-art methods.", + "primary_area": "machine learning ii", + "author": "Chengliang Liu; Jie Wen; Xiaoling Luo; Yong Xu", + "authorids": "", + "aff": "Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China + Pengcheng Laboratory, Shenzhen, China", + "bibtex": "@article{Liu_Wen_Luo_Xu_2023, title={Incomplete Multi-View Multi-Label Learning via Label-Guided Masked View- and Category-Aware Transformers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26060}, DOI={10.1609/aaai.v37i7.26060}, abstractNote={As we all know, multi-view data is more expressive than single-view data and multi-label annotation enjoys richer supervision information than single-label, which makes multi-view multi-label learning widely applicable for various pattern recognition tasks. In this complex representation learning problem, three main challenges can be characterized as follows: i) How to learn consistent representations of samples across all views? ii) How to exploit and utilize category correlations of multi-label to guide inference? iii) How to avoid the negative impact resulting from the incompleteness of views or labels? To cope with these problems, we propose a general multi-view multi-label learning framework named label-guided masked view- and category-aware transformers in this paper. First, we design two transformer-style based modules for cross-view features aggregation and multi-label classification, respectively. The former aggregates information from different views in the process of extracting view-specific features, and the latter learns subcategory embedding to improve classification performance. Second, considering the imbalance of expressive power among views, an adaptively weighted view fusion module is proposed to obtain view-consistent embedding features. Third, we impose a label manifold constraint in sample-level representation learning to maximize the utilization of supervised information. Last but not least, all the modules are designed under the premise of incomplete views and labels, which makes our method adaptable to arbitrary multi-view and multi-label data. Extensive experiments on five datasets confirm that our method has clear advantages over other state-of-the-art methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Chengliang and Wen, Jie and Luo, Xiaoling and Xu, Yong}, year={2023}, month={Jun.}, pages={8816-8824} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26060/25832", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26060", + "pdf_size": 1049404, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11886895989016818271&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "163.com;126.com;outlook.com;ymail.com", + "email": "163.com;126.com;outlook.com;ymail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Pengcheng Laboratory", + "aff_unique_dep": "Shenzhen Key Laboratory of Visual Object Detection and Recognition;", + "aff_unique_url": "http://www.hit.edu.cn/;", + "aff_unique_abbr": "HIT;", + "aff_campus_unique_index": "0;0;0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25783", + "title": "Inconsistent Cores for ASP: The Perks and Perils of Non-monotonicity", + "track": "main", + "status": "Technical", + "abstract": "Answer Set Programming (ASP) is a prominent modeling and solving framework. An inconsistent core (IC) of an ASP program is an inconsistent subset of rules. In the case of inconsistent programs, a smallest or subset-minimal IC contains crucial rules for the inconsistency. In this work, we study fnding minimal ICs of ASP programs and key fragments from a complexity-theoretic perspective. Interestingly, due to ASP\u2019s non-monotonic behavior, also consistent programs admit ICs. It turns out that there is an entire landscape of problems involving ICs with a diverse range of complexities up to the fourth level of the Polynomial Hierarchy. Deciding the existence of an IC is, already for tight programs, on the second level of the Polynomial Hierarchy. Furthermore, we give encodings for IC-related problems on the fragment of tight programs and illustrate feasibility on small instance sets.", + "primary_area": "knowledge representation and reasoning", + "author": "Johannes K. Fichte; Markus Hecher; Stefan Szeider", + "authorids": "", + "aff": "TU Wien, Research Unit Databases and AI, Vienna, Austria; Computer Science and Artificial Intelligence Lab, Massachusetts Institute of Technology, Cambridge, MA, United States; TU Wien, Research Unit Algorithms and Complexity, Vienna, Austria", + "bibtex": "@article{Fichte_Hecher_Szeider_2023, title={Inconsistent Cores for ASP: The Perks and Perils of Non-monotonicity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25783}, DOI={10.1609/aaai.v37i5.25783}, abstractNote={Answer Set Programming (ASP) is a prominent modeling and solving framework. An inconsistent core (IC) of an ASP program is an inconsistent subset of rules. In the case of inconsistent programs, a smallest or subset-minimal IC contains crucial rules for the inconsistency. In this work, we study fnding minimal ICs of ASP programs and key fragments from a complexity-theoretic perspective. Interestingly, due to ASP\u2019s non-monotonic behavior, also consistent programs admit ICs. It turns out that there is an entire landscape of problems involving ICs with a diverse range of complexities up to the fourth level of the Polynomial Hierarchy. Deciding the existence of an IC is, already for tight programs, on the second level of the Polynomial Hierarchy. Furthermore, we give encodings for IC-related problems on the fragment of tight programs and illustrate feasibility on small instance sets.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fichte, Johannes K. and Hecher, Markus and Szeider, Stefan}, year={2023}, month={Jun.}, pages={6363-6371} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25783/25555", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25783", + "pdf_size": 206903, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:-JjpVfMY6HMJ:scholar.google.com/&scioq=Inconsistent+Cores+for+ASP:+The+Perks+and+Perils+of+Non-monotonicity&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "tuwien.ac.at;mit.edu;ac.tuwien.ac.at", + "email": "tuwien.ac.at;mit.edu;ac.tuwien.ac.at", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "TU Wien;Massachusetts Institute of Technology", + "aff_unique_dep": "Research Unit Databases and AI;Computer Science and Artificial Intelligence Lab", + "aff_unique_url": "https://www.tuwien.ac.at;https://web.mit.edu", + "aff_unique_abbr": "TU Wien;MIT", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Vienna;Cambridge", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Austria;United States" + }, + { + "id": "article-26849", + "title": "Increasing Impact of Mobile Health Programs: SAHELI for Maternal and Child Care", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Underserved communities face critical health challenges due to lack of access to timely and reliable information. Nongovernmental organizations are leveraging the widespread use of cellphones to combat these healthcare challenges and spread preventative awareness. The health workers at these organizations reach out individually to beneficiaries; however such programs still suffer from declining engagement. We have deployed SAHELI, a system to efficiently utilize the limited availability of health workers for improving maternal and child health in India. SAHELI uses the Restless Multiarmed Bandit (RMAB) framework to identify beneficiaries for outreach. It is the first deployed application for RMABs in public health, and is already in continuous use by our partner NGO, ARMMAN. We have already reached ~100K beneficiaries with SAHELI, and are on track to serve 1 million beneficiaries by the end of 2023. This scale and impact has been achieved through multiple innovations in the RMAB model and its development, in preparation of real world data, and in deployment practices; and through careful consideration of responsible AI practices. Specifically, in this paper, we describe our approach to learn from past data to improve the performance of SAHELI\u2019s RMAB model, the real-world challenges faced during deployment and adoption of SAHELI, and\nthe end-to-end pipeline.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Shresth Verma; Gargi Singh; Aditya Mate; Paritosh Verma; Sruthi Gorantla; Neha Madhiwalla; Aparna Hegde; Divy Thakkar; Manish Jain; Milind Tambe; Aparna Taneja", + "authorids": "", + "aff": "Google Research India; Google Research India; Google Research India+Harvard University; Google Research India+Purdue University; Google Research India+Indian Institute of Science, Bangalore; ARMMAN; ARMMAN; Google Research India; Google Research India; Google Research India; Google Research India", + "bibtex": "@article{Verma_Singh_Mate_Verma_Gorantla_Madhiwalla_Hegde_Thakkar_Jain_Tambe_Taneja_2024, title={Increasing Impact of Mobile Health Programs: SAHELI for Maternal and Child Care}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26849}, DOI={10.1609/aaai.v37i13.26849}, abstractNote={Underserved communities face critical health challenges due to lack of access to timely and reliable information. Nongovernmental organizations are leveraging the widespread use of cellphones to combat these healthcare challenges and spread preventative awareness. The health workers at these organizations reach out individually to beneficiaries; however such programs still suffer from declining engagement. We have deployed SAHELI, a system to efficiently utilize the limited availability of health workers for improving maternal and child health in India. SAHELI uses the Restless Multiarmed Bandit (RMAB) framework to identify beneficiaries for outreach. It is the first deployed application for RMABs in public health, and is already in continuous use by our partner NGO, ARMMAN. We have already reached ~100K beneficiaries with SAHELI, and are on track to serve 1 million beneficiaries by the end of 2023. This scale and impact has been achieved through multiple innovations in the RMAB model and its development, in preparation of real world data, and in deployment practices; and through careful consideration of responsible AI practices. Specifically, in this paper, we describe our approach to learn from past data to improve the performance of SAHELI\u2019s RMAB model, the real-world challenges faced during deployment and adoption of SAHELI, and\nthe end-to-end pipeline.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Verma, Shresth and Singh, Gargi and Mate, Aditya and Verma, Paritosh and Gorantla, Sruthi and Madhiwalla, Neha and Hegde, Aparna and Thakkar, Divy and Jain, Manish and Tambe, Milind and Taneja, Aparna}, year={2024}, month={Jul.}, pages={15594-15602} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26849/26621", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26849", + "pdf_size": 3985262, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10570476371661824223&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "google.com;google.com;g.harvard.edu;cs.purdue.edu;iisc.ac.in;armman.org;armman.org;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;g.harvard.edu;cs.purdue.edu;iisc.ac.in;armman.org;armman.org;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0+1;0+2;0+3;4;4;0;0;0;0", + "aff_unique_norm": "Google;Harvard University;Purdue University;Indian Institute of Science;ARMMAN", + "aff_unique_dep": "Google Research;;;;", + "aff_unique_url": "https://research.google;https://www.harvard.edu;https://www.purdue.edu;https://www.iisc.ac.in;", + "aff_unique_abbr": "Google Research India;Harvard;Purdue;IISc;", + "aff_campus_unique_index": "0;0;0;0;0+0;0;0;0;0", + "aff_campus_unique": "Bangalore;", + "aff_country_unique_index": "0;0;0+1;0+1;0+0;0;0;0;0", + "aff_country_unique": "India;United States;" + }, + { + "id": "article-26981", + "title": "Incremental Density-Based Clustering with Grid Partitioning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "DBSCAN is widely used in various fields, but it requires computational costs similar to those of re-clustering from scratch to update clusters when new data is inserted. To solve this, we propose an incremental density-based clustering method that rapidly updates clusters by identifying in advance regions where cluster updates will occur. Also, through extensive experiments, we show that our method provides clustering results similar to those of DBSCAN.", + "primary_area": "", + "author": "Jeong-Hun Kim; Tserenpurev Chuluunsaikhan; Jong-Hyeok Choi; Aziz Nasridinov", + "authorids": "", + "aff": "Department of Computer Science, Chungbuk National University; Department of Computer Science, Chungbuk National University; Bigdata Research Institute, Chungbuk National University; Department of Computer Science, Chungbuk National University", + "bibtex": "@article{Kim_Chuluunsaikhan_Choi_Nasridinov_2024, title={Incremental Density-Based Clustering with Grid Partitioning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26981}, DOI={10.1609/aaai.v37i13.26981}, abstractNote={DBSCAN is widely used in various fields, but it requires computational costs similar to those of re-clustering from scratch to update clusters when new data is inserted. To solve this, we propose an incremental density-based clustering method that rapidly updates clusters by identifying in advance regions where cluster updates will occur. Also, through extensive experiments, we show that our method provides clustering results similar to those of DBSCAN.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Jeong-Hun and Chuluunsaikhan, Tserenpurev and Choi, Jong-Hyeok and Nasridinov, Aziz}, year={2024}, month={Jul.}, pages={16242-16243} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26981/26753", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26981", + "pdf_size": 270013, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11676270657282926808&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "chungbuk.ac.kr;chungbuk.ac.kr;chungbuk.ac.kr;chungbuk.ac.kr", + "email": "chungbuk.ac.kr;chungbuk.ac.kr;chungbuk.ac.kr;chungbuk.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Chungbuk National University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "http://www.cbnu.ac.kr", + "aff_unique_abbr": "CBNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25145", + "title": "Incremental Image De-raining via Associative Memory", + "track": "main", + "status": "Technical", + "abstract": "While deep learning models have achieved the state-of-the-art performance on single-image rain removal, most methods only consider learning fixed mapping rules on the single synthetic dataset for lifetime. This limits the real-life application as iterative optimization may change mapping rules and training samples. However, when models learn a sequence of datasets in multiple incremental steps, they are susceptible to catastrophic forgetting that adapts to new incremental episodes while failing to preserve previously acquired mapping rules. In this paper, we argue the importance of sample diversity in the episodes on the iterative optimization, and propose a novel memory management method, Associative Memory, to achieve incremental image de-raining. It bridges connections between current and past episodes for feature reconstruction by sampling domain mappings of past learning steps, and guides the learning to trace the current pathway back to the historical environment without storing extra data. Experiments demonstrate that our method can achieve better performance than existing approaches on both inhomogeneous and incremental datasets within the spectrum of highly compact systems.", + "primary_area": "computer vision i", + "author": "Yi Gu; Chao Wang; Jie Li", + "authorids": "", + "aff": "Alibaba Cloud Computing Ltd.; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University", + "bibtex": "@article{Gu_Wang_Li_2023, title={Incremental Image De-raining via Associative Memory}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25145}, DOI={10.1609/aaai.v37i1.25145}, abstractNote={While deep learning models have achieved the state-of-the-art performance on single-image rain removal, most methods only consider learning fixed mapping rules on the single synthetic dataset for lifetime. This limits the real-life application as iterative optimization may change mapping rules and training samples. However, when models learn a sequence of datasets in multiple incremental steps, they are susceptible to catastrophic forgetting that adapts to new incremental episodes while failing to preserve previously acquired mapping rules. In this paper, we argue the importance of sample diversity in the episodes on the iterative optimization, and propose a novel memory management method, Associative Memory, to achieve incremental image de-raining. It bridges connections between current and past episodes for feature reconstruction by sampling domain mappings of past learning steps, and guides the learning to trace the current pathway back to the historical environment without storing extra data. Experiments demonstrate that our method can achieve better performance than existing approaches on both inhomogeneous and incremental datasets within the spectrum of highly compact systems.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gu, Yi and Wang, Chao and Li, Jie}, year={2023}, month={Jun.}, pages={685-693} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25145/24917", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25145", + "pdf_size": 1573999, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10790695631704013982&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff_domain": "alibaba-inc.com; ;sjtu.edu.cn", + "email": "alibaba-inc.com; ;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Alibaba Cloud Computing;Shanghai Jiao Tong University", + "aff_unique_dep": ";Department of Computer Science and Engineering", + "aff_unique_url": "https://www.alibabacloud.com;https://www.sjtu.edu.cn", + "aff_unique_abbr": "Alibaba Cloud;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25899", + "title": "Incremental Reinforcement Learning with Dual-Adaptive \u03b5-Greedy Exploration", + "track": "main", + "status": "Technical", + "abstract": "Reinforcement learning (RL) has achieved impressive performance in various domains. However, most RL frameworks oversimplify the problem by assuming a fixed-yet-known environment and often have difficulty being generalized to real-world scenarios. In this paper, we address a new challenge with a more realistic setting, Incremental Reinforcement Learning, where the search space of the Markov Decision Process continually expands. While previous methods usually suffer from the lack of efficiency in exploring the unseen transitions, especially with increasing search space, we present a new exploration framework named Dual-Adaptive \u03f5-greedy Exploration (DAE) to address the challenge of Incremental RL. Specifically, DAE employs a Meta Policy and an Explorer to avoid redundant computation on those sufficiently\nlearned samples. Furthermore, we release a testbed based on a synthetic environment and the Atari benchmark to validate the effectiveness of any exploration algorithms under Incremental RL. Experimental results demonstrate that the proposed framework can efficiently learn the unseen transitions in new environments, leading to notable performance improvement, i.e., an average of more than 80%, over eight baselines examined.", + "primary_area": "machine learning i", + "author": "Wei Ding; Siyang Jiang; Hsi-Wen Chen; Ming-Syan Chen", + "authorids": "", + "aff": "Graduate Institute of Electrical Engineering, National Taiwan University, Taiwan; Graduate Institute of Electrical Engineering, National Taiwan University, Taiwan; Graduate Institute of Electrical Engineering, National Taiwan University, Taiwan; Graduate Institute of Electrical Engineering, National Taiwan University, Taiwan", + "bibtex": "@article{Ding_Jiang_Chen_Chen_2023, title={Incremental Reinforcement Learning with Dual-Adaptive \u03b5-Greedy Exploration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25899}, DOI={10.1609/aaai.v37i6.25899}, abstractNote={Reinforcement learning (RL) has achieved impressive performance in various domains. However, most RL frameworks oversimplify the problem by assuming a fixed-yet-known environment and often have difficulty being generalized to real-world scenarios. In this paper, we address a new challenge with a more realistic setting, Incremental Reinforcement Learning, where the search space of the Markov Decision Process continually expands. While previous methods usually suffer from the lack of efficiency in exploring the unseen transitions, especially with increasing search space, we present a new exploration framework named Dual-Adaptive \u03f5-greedy Exploration (DAE) to address the challenge of Incremental RL. Specifically, DAE employs a Meta Policy and an Explorer to avoid redundant computation on those sufficiently\nlearned samples. Furthermore, we release a testbed based on a synthetic environment and the Atari benchmark to validate the effectiveness of any exploration algorithms under Incremental RL. Experimental results demonstrate that the proposed framework can efficiently learn the unseen transitions in new environments, leading to notable performance improvement, i.e., an average of more than 80%, over eight baselines examined.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Wei and Jiang, Siyang and Chen, Hsi-Wen and Chen, Ming-Syan}, year={2023}, month={Jun.}, pages={7387-7395} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25899/25671", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25899", + "pdf_size": 849213, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18412165857619021822&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;ntu.edu.tw", + "email": "arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;ntu.edu.tw", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "Graduate Institute of Electrical Engineering", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25129", + "title": "Incremental-DETR: Incremental Few-Shot Object Detection via Self-Supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "Incremental few-shot object detection aims at detecting novel classes without forgetting knowledge of the base classes with only a few labeled training data from the novel classes. Most related prior works are on incremental object detection that rely on the availability of abundant training samples per novel class that substantially limits the scalability to real-world setting where novel data can be scarce. In this paper, we propose the Incremental-DETR that does incremental few-shot object detection via fine-tuning and self-supervised learning on the DETR object detector. To alleviate severe over-fitting with few novel class data, we first fine-tune the class-specific components of DETR with self-supervision from additional object proposals generated using Selective Search as pseudo labels. We further introduce an incremental few-shot fine-tuning strategy with knowledge distillation on the class-specific components of DETR to encourage the network in detecting novel classes without forgetting the base classes. Extensive experiments conducted on standard incremental object detection and incremental few-shot object detection settings show that our approach significantly outperforms state-of-the-art methods by a large margin. Our source code is available at https://github.com/dongnana777/Incremental-DETR.", + "primary_area": "computer vision i", + "author": "Na Dong; Yongqiang Zhang; Mingli Ding; Gim Hee Lee", + "authorids": "", + "aff": "Department of Computer Science, National University of Singapore + School of Instrument Science and Engineering, Harbin Institute of Technology; School of Instrument Science and Engineering, Harbin Institute of Technology; School of Instrument Science and Engineering, Harbin Institute of Technology; Department of Computer Science, National University of Singapore", + "bibtex": "@article{Dong_Zhang_Ding_Lee_2023, title={Incremental-DETR: Incremental Few-Shot Object Detection via Self-Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25129}, DOI={10.1609/aaai.v37i1.25129}, abstractNote={Incremental few-shot object detection aims at detecting novel classes without forgetting knowledge of the base classes with only a few labeled training data from the novel classes. Most related prior works are on incremental object detection that rely on the availability of abundant training samples per novel class that substantially limits the scalability to real-world setting where novel data can be scarce. In this paper, we propose the Incremental-DETR that does incremental few-shot object detection via fine-tuning and self-supervised learning on the DETR object detector. To alleviate severe over-fitting with few novel class data, we first fine-tune the class-specific components of DETR with self-supervision from additional object proposals generated using Selective Search as pseudo labels. We further introduce an incremental few-shot fine-tuning strategy with knowledge distillation on the class-specific components of DETR to encourage the network in detecting novel classes without forgetting the base classes. Extensive experiments conducted on standard incremental object detection and incremental few-shot object detection settings show that our approach significantly outperforms state-of-the-art methods by a large margin. Our source code is available at https://github.com/dongnana777/Incremental-DETR.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Na and Zhang, Yongqiang and Ding, Mingli and Lee, Gim Hee}, year={2023}, month={Jun.}, pages={543-551} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25129/24901", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25129", + "pdf_size": 824403, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8625771028148797234&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hit.edu.cn;hit.edu.cn;hit.edu.cn;comp.nus.edu.sg", + "email": "hit.edu.cn;hit.edu.cn;hit.edu.cn;comp.nus.edu.sg", + "github": "https://github.com/dongnana777/Incremental-DETR", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0", + "aff_unique_norm": "National University of Singapore;Harbin Institute of Technology", + "aff_unique_dep": "Department of Computer Science;School of Instrument Science and Engineering", + "aff_unique_url": "https://www.nus.edu.sg;http://www.hit.edu.cn/", + "aff_unique_abbr": "NUS;HIT", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0+1;1;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26521", + "title": "IndicSUPERB: A Speech Processing Universal Performance Benchmark for Indian Languages", + "track": "main", + "status": "Technical", + "abstract": "A cornerstone in AI research has been the creation and adoption of standardized training and test datasets to earmark the progress of state-of-the-art models. A particularly successful example is the GLUE dataset for training and evaluating Natural Language Understanding (NLU) models for English. The large body of research around self-supervised BERT-based language models revolved around performance improvements on NLU tasks in GLUE. To evaluate language models in other languages, several language-specific GLUE datasets were created. The area of speech language understanding (SLU) has followed a similar trajectory. The success of large self-supervised models such as wav2vec2 enable creation of speech models with relatively easy to access unlabelled data. These models can then be evaluated on SLU tasks, such as the SUPERB benchmark. In this work, we extend this to Indic languages by releasing the IndicSUPERB benchmark. Specifically, we make the following three contributions. (i) We collect Kathbath containing 1,684 hours of labelled speech data across 12 Indian languages from 1,218 contributors located in 203 districts in India. (ii) Using Kathbath, we create benchmarks across 6 speech tasks: Automatic Speech Recognition, Speaker Verification, Speaker Identification (mono/multi), Language Identification, Query By Example, and Keyword Spotting for 12 languages. (iii) On the released benchmarks, we train and evaluate different self-supervised models alongside the a commonly used baseline FBANK. We show that language-specific fine-tuned models are more accurate than baseline on most of the tasks, including a large gap of 76% for Language Identification task. However, for speaker identification, self-supervised models trained on large datasets demonstrate an advantage. We hope IndicSUPERB contributes to the progress of developing speech language understanding models for Indian languages.", + "primary_area": "speech natural language processing", + "author": "Tahir Javed; Kaushal Bhogale; Abhigyan Raman; Pratyush Kumar; Anoop Kunchukuttan; Mitesh M. Khapra", + "authorids": "", + "aff": "Indian Institute of Technology Madras+AI4Bharat; Indian Institute of Technology Madras+AI4Bharat; AI4Bharat; AI4Bharat+Microsoft; AI4Bharat+Microsoft; Indian Institute of Technology Madras+AI4Bharat", + "bibtex": "@article{Javed_Bhogale_Raman_Kumar_Kunchukuttan_Khapra_2023, title={IndicSUPERB: A Speech Processing Universal Performance Benchmark for Indian Languages}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26521}, DOI={10.1609/aaai.v37i11.26521}, abstractNote={A cornerstone in AI research has been the creation and adoption of standardized training and test datasets to earmark the progress of state-of-the-art models. A particularly successful example is the GLUE dataset for training and evaluating Natural Language Understanding (NLU) models for English. The large body of research around self-supervised BERT-based language models revolved around performance improvements on NLU tasks in GLUE. To evaluate language models in other languages, several language-specific GLUE datasets were created. The area of speech language understanding (SLU) has followed a similar trajectory. The success of large self-supervised models such as wav2vec2 enable creation of speech models with relatively easy to access unlabelled data. These models can then be evaluated on SLU tasks, such as the SUPERB benchmark. In this work, we extend this to Indic languages by releasing the IndicSUPERB benchmark. Specifically, we make the following three contributions. (i) We collect Kathbath containing 1,684 hours of labelled speech data across 12 Indian languages from 1,218 contributors located in 203 districts in India. (ii) Using Kathbath, we create benchmarks across 6 speech tasks: Automatic Speech Recognition, Speaker Verification, Speaker Identification (mono/multi), Language Identification, Query By Example, and Keyword Spotting for 12 languages. (iii) On the released benchmarks, we train and evaluate different self-supervised models alongside the a commonly used baseline FBANK. We show that language-specific fine-tuned models are more accurate than baseline on most of the tasks, including a large gap of 76% for Language Identification task. However, for speaker identification, self-supervised models trained on large datasets demonstrate an advantage. We hope IndicSUPERB contributes to the progress of developing speech language understanding models for Indian languages.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Javed, Tahir and Bhogale, Kaushal and Raman, Abhigyan and Kumar, Pratyush and Kunchukuttan, Anoop and Khapra, Mitesh M.}, year={2023}, month={Jun.}, pages={12942-12950} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26521/26293", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26521", + "pdf_size": 730445, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3606772293106032129&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cse.iitm.ac.in;cse.iitm.ac.in;gmail.com;cse.iitm.ac.in;microsoft.com;cse.iitm.ac.in", + "email": "cse.iitm.ac.in;cse.iitm.ac.in;gmail.com;cse.iitm.ac.in;microsoft.com;cse.iitm.ac.in", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;1;1+2;1+2;0+1", + "aff_unique_norm": "Indian Institute of Technology Madras;AI4Bharat;Microsoft Corporation", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.iitm.ac.in;;https://www.microsoft.com", + "aff_unique_abbr": "IIT Madras;;Microsoft", + "aff_campus_unique_index": "0;0;;;0", + "aff_campus_unique": "Madras;", + "aff_country_unique_index": "0+0;0+0;0;0+1;0+1;0+0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-26847", + "title": "Industry-Scale Orchestrated Federated Learning for Drug Discovery", + "track": "iaai technical track", + "status": "Technical", + "abstract": "To apply federated learning to drug discovery we developed a novel platform in the context of European Innovative Medicines Initiative (IMI) project MELLODDY (grant n\u00b0831472), which was comprised of 10 pharmaceutical companies, academic research labs, large industrial companies and startups. The MELLODDY platform was the first industry-scale platform to enable the creation of a global federated model for drug discovery without sharing the confidential data sets of the individual partners. The federated model was trained on the platform by aggregating the gradients of all contributing partners in a cryptographic, secure way following each training iteration. The platform was deployed on an Amazon Web Services (AWS) multi-account architecture running Kubernetes clusters in private subnets. Organisationally, the roles of the different partners were codified as different rights and permissions on the platform and administrated in a decentralized way. The MELLODDY platform generated new scientific discoveries which are described in a companion paper.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Martijn Oldenhof; Gergely \u00c1cs; Bal\u00e1zs Pej\u00f3; Ansgar Schuffenhauer; Nicholas Holway; No\u00e9 Sturm; Arne Dieckmann; Oliver Fortmeier; Eric Boniface; Cl\u00e9ment Mayer; Arnaud Gohier; Peter Schmidtke; Ritsuya Niwayama; Dieter Kopecky; Lewis Mervin; Prakash Chandra Rathi; Lukas Friedrich; Andr\u00e1s Formanek; Peter Antal; Jordon Rahaman; Adam Zalewski; Wouter Heyndrickx; Ezron Oluoch; Manuel St\u00f6\u00dfel; Michal Van\u010do; David Endico; Fabien Gelus; Tha\u00efs de Boisfoss\u00e9; Adrien Darbier; Ashley Nicollet; Matthieu Blotti\u00e8re; Maria Telenczuk; Van Tien Nguyen; Thibaud Martinez; Camille Boillet; Kelvin Moutet; Alexandre Picosson; Aur\u00e9lien Gasser; Inal Djafar; Antoine Simon; \u00c1d\u00e1m Arany; Jaak Simm; Yves Moreau; Ola Engkvist; Hugo Ceulemans; Camille Marini; Mathieu Galtier", + "authorids": "", + "aff": "KU Leuven, ESAT-STADIUS, Kasteelpark Arenberg 10, 3001 Heverlee, Belgium; BME-HIT, CrySyS Lab, Budapest, Hungary; BME-HIT, CrySyS Lab, Budapest, Hungary; Novartis Institutes for BioMedical Research, Basel, Switzerland; Novartis Institutes for BioMedical Research, Basel, Switzerland; Novartis Institutes for BioMedical Research, Basel, Switzerland; Bayer AG, Leverkusen, Germany; Bayer AG, Leverkusen, Germany; Substra Foundation \u2013 Labelia Labs, Nantes, France; Substra Foundation \u2013 Labelia Labs, Nantes, France; Institut de recherches Servier, Croissy-sur-Seine, France; Discngine, Paris, France; Institut de recherches Servier, Croissy-sur-Seine, France; Boehringer Ingelheim RCV GmbH & Co KG, Vienna, Austria; Molecular AI, Discovery Sciences, R&D, AstraZeneca, Cambridge, UK; R&D IT, AstraZeneca, Cambridge, UK; Merck KGaA, Global R&D, Darmstadt, Germany; KU Leuven, ESAT-STADIUS, Kasteelpark Arenberg 10, 3001 Heverlee, Belgium+BME-MIT, Budapest, Hungary; BME-MIT, Budapest, Hungary; Pillar Biosciences, Natick MA, USA; Amgen Research (Munich) GmbH, Munich, Germany; Janssen Pharmaceutica NV, Beerse, Belgium; Kubermatic, Hamburg, Germany; Kubermatic, Hamburg, Germany; Kubermatic, Hamburg, Germany; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; Owkin, Paris, France; KU Leuven, ESAT-STADIUS, Kasteelpark Arenberg 10, 3001 Heverlee, Belgium; KU Leuven, ESAT-STADIUS, Kasteelpark Arenberg 10, 3001 Heverlee, Belgium; KU Leuven, ESAT-STADIUS, Kasteelpark Arenberg 10, 3001 Heverlee, Belgium; Molecular AI, Discovery Sciences, R&D, AstraZeneca, Gothenburg, Sweden+Department of Computer Science and Engineering, Chalmers University of Technology, Gothenburg, Sweden; Janssen Pharmaceutica NV, Beerse, Belgium; Owkin, Paris, France; Owkin, Paris, France", + "bibtex": "@article{Oldenhof_\u00c1cs_Pej\u00f3_Schuffenhauer_Holway_Sturm_Dieckmann_Fortmeier_Boniface_Mayer_Gohier_Schmidtke_Niwayama_Kopecky_Mervin_Rathi_Friedrich_Formanek_Antal_Rahaman_Zalewski_Heyndrickx_Oluoch_St\u00f6\u00dfel_Van\u010do_Endico_Gelus_de Boisfoss\u00e9_Darbier_Nicollet_Blotti\u00e8re_Telenczuk_Nguyen_Martinez_Boillet_Moutet_Picosson_Gasser_Djafar_Simon_Arany_Simm_Moreau_Engkvist_Ceulemans_Marini_Galtier_2024, title={Industry-Scale Orchestrated Federated Learning for Drug Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26847}, DOI={10.1609/aaai.v37i13.26847}, abstractNote={To apply federated learning to drug discovery we developed a novel platform in the context of European Innovative Medicines Initiative (IMI) project MELLODDY (grant n\u00b0831472), which was comprised of 10 pharmaceutical companies, academic research labs, large industrial companies and startups. The MELLODDY platform was the first industry-scale platform to enable the creation of a global federated model for drug discovery without sharing the confidential data sets of the individual partners. The federated model was trained on the platform by aggregating the gradients of all contributing partners in a cryptographic, secure way following each training iteration. The platform was deployed on an Amazon Web Services (AWS) multi-account architecture running Kubernetes clusters in private subnets. Organisationally, the roles of the different partners were codified as different rights and permissions on the platform and administrated in a decentralized way. The MELLODDY platform generated new scientific discoveries which are described in a companion paper.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Oldenhof, Martijn and \u00c1cs, Gergely and Pej\u00f3, Bal\u00e1zs and Schuffenhauer, Ansgar and Holway, Nicholas and Sturm, No\u00e9 and Dieckmann, Arne and Fortmeier, Oliver and Boniface, Eric and Mayer, Cl\u00e9ment and Gohier, Arnaud and Schmidtke, Peter and Niwayama, Ritsuya and Kopecky, Dieter and Mervin, Lewis and Rathi, Prakash Chandra and Friedrich, Lukas and Formanek, Andr\u00e1s and Antal, Peter and Rahaman, Jordon and Zalewski, Adam and Heyndrickx, Wouter and Oluoch, Ezron and St\u00f6\u00dfel, Manuel and Van\u010do, Michal and Endico, David and Gelus, Fabien and de Boisfoss\u00e9, Tha\u00efs and Darbier, Adrien and Nicollet, Ashley and Blotti\u00e8re, Matthieu and Telenczuk, Maria and Nguyen, Van Tien and Martinez, Thibaud and Boillet, Camille and Moutet, Kelvin and Picosson, Alexandre and Gasser, Aur\u00e9lien and Djafar, Inal and Simon, Antoine and Arany, \u00c1d\u00e1m and Simm, Jaak and Moreau, Yves and Engkvist, Ola and Ceulemans, Hugo and Marini, Camille and Galtier, Mathieu}, year={2024}, month={Jul.}, pages={15576-15584} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26847/26619", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26847", + "pdf_size": 706022, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=980016685377458663&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kuleuven.be; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;owkin.com", + "email": "kuleuven.be; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;owkin.com", + "github": "", + "project": "", + "author_num": 47, + "aff_unique_index": "0;1;1;2;2;2;3;3;4;4;5;6;5;7;8;8;9;0+1;1;10;11;12;13;13;13;14;14;14;14;14;14;14;14;14;14;14;14;14;14;14;0;0;0;8+15;12;14;14", + "aff_unique_norm": "KU Leuven;Budapest University of Technology and Economics;Novartis Institutes for BioMedical Research;Bayer AG;Substra Foundation;Institut de recherches Servier;Discngine;Boehringer Ingelheim RCV GmbH & Co KG;AstraZeneca;Merck KGaA;Pillar Biosciences;Amgen Research;Janssen Pharmaceutica NV;Kubermatic;Owkin;Chalmers University of Technology", + "aff_unique_dep": "ESAT-STADIUS;CrySyS Lab;;;Labelia Labs;;;;Molecular AI, Discovery Sciences, R&D;Global R&D;;;;;;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.kuleuven.be;https://www.bme.hu;https://www.nibr.com;https://www.bayer.com;;;;https://www.boehringer-ingelheim.com;https://www.astrazeneca.com;https://www.merckgroup.com;;https://www.amgen.com/;https://www.janssen.com;;https://www.owkin.io;https://www.chalmers.se", + "aff_unique_abbr": "KU Leuven;BME;NIBR;Bayer;;;;;AZ;Merck;;Amgen;;;;Chalmers", + "aff_campus_unique_index": "0;1;1;2;2;2;3;3;5;6;6;0+1;1;7;8;0;0;0;9+9", + "aff_campus_unique": "Heverlee;Budapest;Basel;Leverkusen;;Paris;Cambridge;Natick;Munich;Gothenburg", + "aff_country_unique_index": "0;1;1;2;2;2;3;3;4;4;4;4;4;5;6;6;3;0+1;1;7;3;0;3;3;3;4;4;4;4;4;4;4;4;4;4;4;4;4;4;4;0;0;0;8+8;0;4;4", + "aff_country_unique": "Belgium;Hungary;Switzerland;Germany;France;Austria;United Kingdom;United States;Sweden" + }, + { + "id": "article-26570", + "title": "Inferential Knowledge-Enhanced Integrated Reasoning for Video Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Recently, video question answering has attracted growing attention. It involves answering a question based on a fine-grained understanding of video multi-modal information. Most existing methods have successfully explored the deep understanding of visual modality. We argue that a deep understanding of linguistic modality is also essential for answer reasoning, especially for videos that contain character dialogues. To this end, we propose an Inferential Knowledge-Enhanced Integrated Reasoning method. Our method consists of two main components: 1) an Inferential Knowledge Reasoner to generate inferential knowledge for linguistic modality inputs that reveals deeper semantics, including the implicit causes, effects, mental states, etc. 2) an Integrated Reasoning Mechanism to enhance video content understanding and answer reasoning by leveraging the generated inferential knowledge. Experimental results show that our method achieves significant improvement on two mainstream datasets. The ablation study further demonstrates the effectiveness of each component of our approach.", + "primary_area": "speech natural language processing", + "author": "Jianguo Mao; Wenbin Jiang; Hong Liu; Xiangdong Wang; Yajuan Lyu", + "authorids": "", + "aff": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China + University of Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China; Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China", + "bibtex": "@article{Mao_Jiang_Liu_Wang_Lyu_2023, title={Inferential Knowledge-Enhanced Integrated Reasoning for Video Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26570}, DOI={10.1609/aaai.v37i11.26570}, abstractNote={Recently, video question answering has attracted growing attention. It involves answering a question based on a fine-grained understanding of video multi-modal information. Most existing methods have successfully explored the deep understanding of visual modality. We argue that a deep understanding of linguistic modality is also essential for answer reasoning, especially for videos that contain character dialogues. To this end, we propose an Inferential Knowledge-Enhanced Integrated Reasoning method. Our method consists of two main components: 1) an Inferential Knowledge Reasoner to generate inferential knowledge for linguistic modality inputs that reveals deeper semantics, including the implicit causes, effects, mental states, etc. 2) an Integrated Reasoning Mechanism to enhance video content understanding and answer reasoning by leveraging the generated inferential knowledge. Experimental results show that our method achieves significant improvement on two mainstream datasets. The ablation study further demonstrates the effectiveness of each component of our approach.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Jianguo and Jiang, Wenbin and Liu, Hong and Wang, Xiangdong and Lyu, Yajuan}, year={2023}, month={Jun.}, pages={13380-13388} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26570/26342", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26570", + "pdf_size": 792762, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17915312140998949526&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ict.ac.cn;baidu.com;ict.ac.cn;ict.ac.cn;baidu.com", + "email": "ict.ac.cn;baidu.com;ict.ac.cn;ict.ac.cn;baidu.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0;0;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Baidu Inc.", + "aff_unique_dep": "Institute of Computing Technology;;", + "aff_unique_url": "http://www.ict.cas.cn;http://www.ucas.ac.cn;https://www.baidu.com", + "aff_unique_abbr": "CAS;UCAS;Baidu", + "aff_campus_unique_index": "0+0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26152", + "title": "Inferring Patient Zero on Temporal Networks via Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "The world is currently seeing frequent local outbreaks of epidemics, such as COVID-19 and Monkeypox. Preventing further propagation of the outbreak requires prompt implementation of control measures, and a critical step is to quickly infer patient zero. This backtracking task is challenging for two reasons. First, due to the sudden emergence of local epidemics, information recording the spreading process is limited. Second, the spreading process has strong randomness. To address these challenges, we tailor a gnn-based model to establish the inverse statistical association between the current and initial state implicitly. This model uses contact topology and the current state of the local population to determine the possibility that each individual could be patient zero. We benchmark our model on data from important epidemiological models on five real temporal networks, showing performance significantly superior to previous methods. We also demonstrate that our method is robust to missing information about contact structure or current state. Further, we find the individuals assigned higher inferred possibility by model are closer to patient zero in terms of core number and the activity sequence recording the times at which the individual had contact with other nodes.", + "primary_area": "machine learning iii", + "author": "Xiaolei Ru; Jack Murdoch Moore; Xin-Ya Zhang; Yeting Zeng; Gang Yan", + "authorids": "", + "aff": "School of Physical Science and Engineering, and National Key Laboratory of Autonomous Intelligent Unmanned Systems, Tongji University, Shanghai, China; School of Physical Science and Engineering, and National Key Laboratory of Autonomous Intelligent Unmanned Systems, Tongji University, Shanghai, China; School of Physical Science and Engineering, and National Key Laboratory of Autonomous Intelligent Unmanned Systems, Tongji University, Shanghai, China; Zhongshan Hospital, Fudan University, Shanghai, China; School of Physical Science and Engineering, and National Key Laboratory of Autonomous Intelligent Unmanned Systems, Tongji University, Shanghai, China", + "bibtex": "@article{Ru_Murdoch Moore_Zhang_Zeng_Yan_2023, title={Inferring Patient Zero on Temporal Networks via Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26152}, DOI={10.1609/aaai.v37i8.26152}, abstractNote={The world is currently seeing frequent local outbreaks of epidemics, such as COVID-19 and Monkeypox. Preventing further propagation of the outbreak requires prompt implementation of control measures, and a critical step is to quickly infer patient zero. This backtracking task is challenging for two reasons. First, due to the sudden emergence of local epidemics, information recording the spreading process is limited. Second, the spreading process has strong randomness. To address these challenges, we tailor a gnn-based model to establish the inverse statistical association between the current and initial state implicitly. This model uses contact topology and the current state of the local population to determine the possibility that each individual could be patient zero. We benchmark our model on data from important epidemiological models on five real temporal networks, showing performance significantly superior to previous methods. We also demonstrate that our method is robust to missing information about contact structure or current state. Further, we find the individuals assigned higher inferred possibility by model are closer to patient zero in terms of core number and the activity sequence recording the times at which the individual had contact with other nodes.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ru, Xiaolei and Murdoch Moore, Jack and Zhang, Xin-Ya and Zeng, Yeting and Yan, Gang}, year={2023}, month={Jun.}, pages={9632-9640} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26152/25924", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26152", + "pdf_size": 11952388, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7225914776566236279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "tongji.edu.cn;tongji.edu.cn;tongji.edu.cn;m.fudan.edu.cn;tongji.edu.cn", + "email": "tongji.edu.cn;tongji.edu.cn;tongji.edu.cn;m.fudan.edu.cn;tongji.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Tongji University;Fudan University", + "aff_unique_dep": "School of Physical Science and Engineering;Zhongshan Hospital", + "aff_unique_url": "https://www.tongji.edu.cn;https://www.fudan.edu.cn", + "aff_unique_abbr": "Tongji;Fudan", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26612", + "title": "InfoCTM: A Mutual Information Maximization Perspective of Cross-Lingual Topic Modeling", + "track": "main", + "status": "Technical", + "abstract": "Cross-lingual topic models have been prevalent for cross-lingual text analysis by revealing aligned latent topics. However, most existing methods suffer from producing repetitive topics that hinder further analysis and performance decline caused by low-coverage dictionaries. In this paper, we propose the Cross-lingual Topic Modeling with Mutual Information (InfoCTM). Instead of the direct alignment in previous work, we propose a topic alignment with mutual information method. This works as a regularization to properly align topics and prevent degenerate topic representations of words, which mitigates the repetitive topic issue. To address the low-coverage dictionary issue, we further propose a cross-lingual vocabulary linking method that finds more linked cross-lingual words for topic alignment beyond the translations of a given dictionary. Extensive experiments on English, Chinese, and Japanese datasets demonstrate that our method outperforms state-of-the-art baselines, producing more coherent, diverse, and well-aligned topics and showing better transferability for cross-lingual classification tasks.", + "primary_area": "speech natural language processing", + "author": "Xiaobao Wu; Xinshuai Dong; Thong Nguyen; Chaoqun Liu; Liang-Ming Pan; Anh Tuan Luu", + "authorids": "", + "aff": "Nanyang Technological University, Singapore; Carnegie Mellon University, USA; National University of Singapore, Singapore; Nanyang Technological University, Singapore + DAMO Academy, Alibaba Group, Singapore; National University of Singapore, Singapore; Nanyang Technological University, Singapore", + "bibtex": "@article{Wu_Dong_Nguyen_Liu_Pan_Luu_2023, title={InfoCTM: A Mutual Information Maximization Perspective of Cross-Lingual Topic Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26612}, DOI={10.1609/aaai.v37i11.26612}, abstractNote={Cross-lingual topic models have been prevalent for cross-lingual text analysis by revealing aligned latent topics. However, most existing methods suffer from producing repetitive topics that hinder further analysis and performance decline caused by low-coverage dictionaries. In this paper, we propose the Cross-lingual Topic Modeling with Mutual Information (InfoCTM). Instead of the direct alignment in previous work, we propose a topic alignment with mutual information method. This works as a regularization to properly align topics and prevent degenerate topic representations of words, which mitigates the repetitive topic issue. To address the low-coverage dictionary issue, we further propose a cross-lingual vocabulary linking method that finds more linked cross-lingual words for topic alignment beyond the translations of a given dictionary. Extensive experiments on English, Chinese, and Japanese datasets demonstrate that our method outperforms state-of-the-art baselines, producing more coherent, diverse, and well-aligned topics and showing better transferability for cross-lingual classification tasks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xiaobao and Dong, Xinshuai and Nguyen, Thong and Liu, Chaoqun and Pan, Liang-Ming and Luu, Anh Tuan}, year={2023}, month={Jun.}, pages={13763-13771} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26612/26384", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26612", + "pdf_size": 519472, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13499205383989036994&as_sdt=4000005&sciodt=0,18&hl=en", + "gs_version_total": 4, + "aff_domain": "ntu.edu.sg;andrew.cmu.edu;u.nus.edu;ntu.edu.sg;u.nus.edu;ntu.edu.sg", + "email": "ntu.edu.sg;andrew.cmu.edu;u.nus.edu;ntu.edu.sg;u.nus.edu;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0+3;2;0", + "aff_unique_norm": "Nanyang Technological University;Carnegie Mellon University;National University of Singapore;Alibaba Group", + "aff_unique_dep": ";;;DAMO Academy", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.cmu.edu;https://www.nus.edu.sg;https://www.alibaba.com", + "aff_unique_abbr": "NTU;CMU;NUS;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0+0;0;0", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "article-26831", + "title": "Information Transfer in Multitask Learning, Data Augmentation, and Beyond", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "A hallmark of human intelligence is that we continue to learn new information and then extrapolate the learned information onto new tasks and domains (see, e.g., Thrun and Pratt (1998)). While this is a fairly intuitive observation, formulating such ideas has proved to be a challenging research problem and continues to inspire new studies. Recently, there has been increasing interest in AI/ML about building models that generalize across tasks, even when they have some form of distribution shifts. How can we ground this research in a solid framework to develop principled methods for better practice? This talk will present my recent works addressing this research question. My talk will involve three parts: revisiting multitask learning from the lens of deep learning theory, designing principled methods for robust transfer, and algorithmic implications for data augmentation.", + "primary_area": "", + "author": "Hongyang R. Zhang", + "authorids": "", + "aff": "Northeastern University, Boston, Massachusetts", + "bibtex": "@article{Zhang_2024, title={Information Transfer in Multitask Learning, Data Augmentation, and Beyond}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26831}, DOI={10.1609/aaai.v37i13.26831}, abstractNote={A hallmark of human intelligence is that we continue to learn new information and then extrapolate the learned information onto new tasks and domains (see, e.g., Thrun and Pratt (1998)). While this is a fairly intuitive observation, formulating such ideas has proved to be a challenging research problem and continues to inspire new studies. Recently, there has been increasing interest in AI/ML about building models that generalize across tasks, even when they have some form of distribution shifts. How can we ground this research in a solid framework to develop principled methods for better practice? This talk will present my recent works addressing this research question. My talk will involve three parts: revisiting multitask learning from the lens of deep learning theory, designing principled methods for robust transfer, and algorithmic implications for data augmentation.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Hongyang R.}, year={2024}, month={Jul.}, pages={15464-15464} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26831/26603", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26831", + "pdf_size": 46220, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:k1HDXs12n3wJ:scholar.google.com/&scioq=Information+Transfer+in+Multitask+Learning,+Data+Augmentation,+and+Beyond&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "northeastern.edu", + "email": "northeastern.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Northeastern University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.northeastern.edu", + "aff_unique_abbr": "NEU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Boston", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26100", + "title": "Information-Theoretic Causal Discovery and Intervention Detection over Multiple Environments", + "track": "main", + "status": "Technical", + "abstract": "Given multiple datasets over a fixed set of random variables, each collected from a different environment, we are interested in discovering the shared underlying causal network and the local interventions per environment, without assuming prior knowledge on which datasets are observational or interventional, and without assuming the shape of the causal dependencies. We formalize this problem using the Algorithmic Model of Causation, instantiate a consistent score via the Minimum Description Length principle, and show under which conditions the network and interventions are identifiable. To efficiently discover causal networks and intervention targets in practice, we introduce the ORION algorithm, which through extensive experiments we show outperforms the state of the art in causal inference over multiple environments.", + "primary_area": "machine learning iii", + "author": "Osman Mian; Michael Kamp; Jilles Vreeken", + "authorids": "", + "aff": "CISPA Helmholtz Center for Information Security; Institute for AI in medicine IKIM, Ruhr-University Bochum + Monash University; CISPA Helmholtz Center for Information Security", + "bibtex": "@article{Mian_Kamp_Vreeken_2023, title={Information-Theoretic Causal Discovery and Intervention Detection over Multiple Environments}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26100}, DOI={10.1609/aaai.v37i8.26100}, abstractNote={Given multiple datasets over a fixed set of random variables, each collected from a different environment, we are interested in discovering the shared underlying causal network and the local interventions per environment, without assuming prior knowledge on which datasets are observational or interventional, and without assuming the shape of the causal dependencies. We formalize this problem using the Algorithmic Model of Causation, instantiate a consistent score via the Minimum Description Length principle, and show under which conditions the network and interventions are identifiable. To efficiently discover causal networks and intervention targets in practice, we introduce the ORION algorithm, which through extensive experiments we show outperforms the state of the art in causal inference over multiple environments.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mian, Osman and Kamp, Michael and Vreeken, Jilles}, year={2023}, month={Jun.}, pages={9171-9179} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26100/25872", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26100", + "pdf_size": 186314, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15893534436823179616&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "cispa.de;uk-essen.de;cispa.de", + "email": "cispa.de;uk-essen.de;cispa.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "CISPA Helmholtz Center for Information Security;Ruhr-University Bochum;Monash University", + "aff_unique_dep": ";Institute for AI in medicine IKIM;", + "aff_unique_url": "https://www.cispa.de/;https://www.ruhr-uni-bochum.de;https://www.monash.edu", + "aff_unique_abbr": "CISPA;RUB;Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "Germany;Australia" + }, + { + "id": "article-25432", + "title": "Infusing Definiteness into Randomness: Rethinking Composition Styles for Deep Image Matting", + "track": "main", + "status": "Technical", + "abstract": "We study the composition style in deep image matting, a notion that characterizes a data generation flow on how to exploit limited foregrounds and random backgrounds to form a training dataset. Prior art executes this flow in a completely random manner by simply going through the foreground pool or by optionally combining two foregrounds before foreground-background composition. In this work, we first show that naive foreground combination can be problematic and therefore derive an alternative formulation to reasonably combine foregrounds. Our second contribution is an observation that matting performance can benefit from a certain occurrence frequency of combined foregrounds and their associated source foregrounds during training. Inspired by this, we introduce a novel composition style that binds the source and combined foregrounds in a definite triplet. In addition, we also find that different orders of foreground combination lead to different foreground patterns, which further inspires a quadruplet-based composition style. Results under controlled experiments on four matting baselines show that our composition styles outperform existing ones and invite consistent performance improvement on both composited and real-world datasets. Code is available at: https://github.com/coconuthust/composition_styles", + "primary_area": "computer vision iii", + "author": "Zixuan Ye; Yutong Dai; Chaoyi Hong; Zhiguo Cao; Hao Lu", + "authorids": "", + "aff": "School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, China; Australian Institute for Machine Learning, The University of Adelaide, Australia; School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, China; School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, China; School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, China", + "bibtex": "@article{Ye_Dai_Hong_Cao_Lu_2023, title={Infusing Definiteness into Randomness: Rethinking Composition Styles for Deep Image Matting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25432}, DOI={10.1609/aaai.v37i3.25432}, abstractNote={We study the composition style in deep image matting, a notion that characterizes a data generation flow on how to exploit limited foregrounds and random backgrounds to form a training dataset. Prior art executes this flow in a completely random manner by simply going through the foreground pool or by optionally combining two foregrounds before foreground-background composition. In this work, we first show that naive foreground combination can be problematic and therefore derive an alternative formulation to reasonably combine foregrounds. Our second contribution is an observation that matting performance can benefit from a certain occurrence frequency of combined foregrounds and their associated source foregrounds during training. Inspired by this, we introduce a novel composition style that binds the source and combined foregrounds in a definite triplet. In addition, we also find that different orders of foreground combination lead to different foreground patterns, which further inspires a quadruplet-based composition style. Results under controlled experiments on four matting baselines show that our composition styles outperform existing ones and invite consistent performance improvement on both composited and real-world datasets. Code is available at: https://github.com/coconuthust/composition_styles}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Zixuan and Dai, Yutong and Hong, Chaoyi and Cao, Zhiguo and Lu, Hao}, year={2023}, month={Jun.}, pages={3259-3266} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25432/25204", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25432", + "pdf_size": 3875608, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1237966853343916455&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "hust.edu.cn; ; ; ; ", + "email": "hust.edu.cn; ; ; ; ", + "github": "https://github.com/coconuthust/composition_styles", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology;The University of Adelaide", + "aff_unique_dep": "School of Artificial Intelligence and Automation;Australian Institute for Machine Learning", + "aff_unique_url": "http://www.hust.edu.cn;https://www.adelaide.edu.au", + "aff_unique_abbr": "HUST;Adelaide", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26512", + "title": "Instance Smoothed Contrastive Learning for Unsupervised Sentence Embedding", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning-based methods, such as unsup-SimCSE, have achieved state-of-the-art (SOTA) performances in learning unsupervised sentence embeddings. However, in previous studies, each embedding used for contrastive learning only\nderived from one sentence instance, and we call these embeddings instance-level embeddings. In other words, each embedding is regarded as a unique class of its own, which may hurt the generalization performance. In this study, we propose IS-CSE (instance smoothing contrastive sentence embedding) to smooth the boundaries of embeddings in the feature space. Specifically, we retrieve embeddings from a dynamic memory buffer according to the semantic similarity to get a positive embedding group. Then embeddings in the group are aggregated by a self-attention operation to produce a smoothed instance embedding for further analysis. We evaluate our method on standard semantic text similarity (STS) tasks and achieve an average of 78.30%, 79.47%, 77.73%, and 79.42% Spearman\u2019s correlation on the base of BERT-base, BERT-large, RoBERTa-base, and RoBERTa-large respectively, a 2.05%, 1.06%, 1.16% and 0.52% improvement compared to unsup-SimCSE.", + "primary_area": "speech natural language processing", + "author": "Hongliang He; Junlei Zhang; Zhenzhong Lan; Yue Zhang", + "authorids": "", + "aff": "Zhejiang University, China+School of Engineering, Westlake University, China; Zhejiang University, China+School of Engineering, Westlake University, China; School of Engineering, Westlake University, China+Institute of Advanced Technology, Westlake Institute for Advanced Study, China; School of Engineering, Westlake University, China+Institute of Advanced Technology, Westlake Institute for Advanced Study, China", + "bibtex": "@article{He_Zhang_Lan_Zhang_2023, title={Instance Smoothed Contrastive Learning for Unsupervised Sentence Embedding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26512}, DOI={10.1609/aaai.v37i11.26512}, abstractNote={Contrastive learning-based methods, such as unsup-SimCSE, have achieved state-of-the-art (SOTA) performances in learning unsupervised sentence embeddings. However, in previous studies, each embedding used for contrastive learning only\nderived from one sentence instance, and we call these embeddings instance-level embeddings. In other words, each embedding is regarded as a unique class of its own, which may hurt the generalization performance. In this study, we propose IS-CSE (instance smoothing contrastive sentence embedding) to smooth the boundaries of embeddings in the feature space. Specifically, we retrieve embeddings from a dynamic memory buffer according to the semantic similarity to get a positive embedding group. Then embeddings in the group are aggregated by a self-attention operation to produce a smoothed instance embedding for further analysis. We evaluate our method on standard semantic text similarity (STS) tasks and achieve an average of 78.30%, 79.47%, 77.73%, and 79.42% Spearman\u2019s correlation on the base of BERT-base, BERT-large, RoBERTa-base, and RoBERTa-large respectively, a 2.05%, 1.06%, 1.16% and 0.52% improvement compared to unsup-SimCSE.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Hongliang and Zhang, Junlei and Lan, Zhenzhong and Zhang, Yue}, year={2023}, month={Jun.}, pages={12863-12871} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26512/26284", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26512", + "pdf_size": 394695, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4119671568748018981&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn", + "email": "westlake.edu.cn;westlake.edu.cn;westlake.edu.cn;westlake.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;1+2;1+2", + "aff_unique_norm": "Zhejiang University;Westlake University;Westlake Institute for Advanced Study", + "aff_unique_dep": ";School of Engineering;Institute of Advanced Technology", + "aff_unique_url": "http://www.zju.edu.cn;https://www.westlake.edu.cn;http://www.wias.org.cn/", + "aff_unique_abbr": "ZJU;;WIAS", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25201", + "title": "InstanceFormer: An Online Video Instance Segmentation Framework", + "track": "main", + "status": "Technical", + "abstract": "Recent transformer-based offline video instance segmentation (VIS) approaches achieve encouraging results and significantly outperform online approaches. However, their reliance on the whole video and the immense computational complexity caused by full Spatio-temporal attention limit them in real-life applications such as processing lengthy videos. In this paper, we propose a single-stage transformer-based efficient online VIS framework named InstanceFormer, which is especially suitable for long and challenging videos. We propose three novel components to model short-term and long-term dependency and temporal coherence. First, we propagate the representation, location, and semantic information of prior instances to model short-term changes. Second, we propose a novel memory cross-attention in the decoder, which allows the network to look into earlier instances within a certain temporal window. Finally, we employ a temporal contrastive loss to impose coherence in the representation of an instance across all frames. Memory attention and temporal coherence are particularly beneficial to long-range dependency modeling, including challenging scenarios like occlusion. The proposed InstanceFormer outperforms previous online benchmark methods by a large margin across multiple datasets. Most importantly, InstanceFormer surpasses offline approaches for challenging and long datasets such as YouTube-VIS-2021 and OVIS. Code is available at https://github.com/rajatkoner08/InstanceFormer.", + "primary_area": "computer vision i", + "author": "Rajat Koner; Tanveer Hannan; Suprosanna Shit; Sahand Sharifzadeh; Matthias Schubert; Thomas Seidl; Volker Tresp", + "authorids": "", + "aff": "Ludwig Maximilian University of Munich+MCML; Ludwig Maximilian University of Munich+MCML; Technical University of Munich; Ludwig Maximilian University of Munich+MCML; Ludwig Maximilian University of Munich+MCML; Ludwig Maximilian University of Munich+MCML; Ludwig Maximilian University of Munich+MCML", + "bibtex": "@article{Koner_Hannan_Shit_Sharifzadeh_Schubert_Seidl_Tresp_2023, title={InstanceFormer: An Online Video Instance Segmentation Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25201}, DOI={10.1609/aaai.v37i1.25201}, abstractNote={Recent transformer-based offline video instance segmentation (VIS) approaches achieve encouraging results and significantly outperform online approaches. However, their reliance on the whole video and the immense computational complexity caused by full Spatio-temporal attention limit them in real-life applications such as processing lengthy videos. In this paper, we propose a single-stage transformer-based efficient online VIS framework named InstanceFormer, which is especially suitable for long and challenging videos. We propose three novel components to model short-term and long-term dependency and temporal coherence. First, we propagate the representation, location, and semantic information of prior instances to model short-term changes. Second, we propose a novel memory cross-attention in the decoder, which allows the network to look into earlier instances within a certain temporal window. Finally, we employ a temporal contrastive loss to impose coherence in the representation of an instance across all frames. Memory attention and temporal coherence are particularly beneficial to long-range dependency modeling, including challenging scenarios like occlusion. The proposed InstanceFormer outperforms previous online benchmark methods by a large margin across multiple datasets. Most importantly, InstanceFormer surpasses offline approaches for challenging and long datasets such as YouTube-VIS-2021 and OVIS. Code is available at https://github.com/rajatkoner08/InstanceFormer.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Koner, Rajat and Hannan, Tanveer and Shit, Suprosanna and Sharifzadeh, Sahand and Schubert, Matthias and Seidl, Thomas and Tresp, Volker}, year={2023}, month={Jun.}, pages={1188-1195} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25201/24973", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25201", + "pdf_size": 18107215, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2601467822709472356&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "dbs.ifi.lmu.de;dbs.ifi.lmu.de; ; ; ; ; ", + "email": "dbs.ifi.lmu.de;dbs.ifi.lmu.de; ; ; ; ; ", + "github": "https://github.com/rajatkoner08/InstanceFormer", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;2;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Ludwig Maximilian University of Munich;MCML;Technical University of Munich", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.lmu.de;;https://www.tum.de", + "aff_unique_abbr": "LMU;;TUM", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Germany;" + }, + { + "id": "article-25895", + "title": "Integer Subspace Differential Privacy", + "track": "main", + "status": "Technical", + "abstract": "We propose new differential privacy solutions for when external invariants and integer constraints are simultaneously enforced on the data product. These requirements arise in real world applications of private data curation, including the public release of the 2020 U.S. Decennial Census. They pose a great challenge to the production of provably private data products with adequate statistical usability. We propose integer subspace differential privacy to rigorously articulate the privacy guarantee when data products maintain both the invariants and integer characteristics, and demonstrate the composition and post-processing properties of our proposal. To address the challenge of sampling from a potentially highly restricted discrete space, we devise a pair of unbiased additive mechanisms, the generalized Laplace and the generalized Gaussian mechanisms, by solving the Diophantine equations as defined by the constraints. The proposed mechanisms have good accuracy, with errors exhibiting sub-exponential and sub-Gaussian tail probabilities respectively. To implement our proposal, we design an MCMC algorithm and supply empirical convergence assessment using estimated upper bounds on the total variation distance via L-lag coupling. We demonstrate the efficacy of our proposal with applications to a synthetic problem with intersecting invariants, a sensitive contingency table with known margins, and the 2010 Census county-level demonstration data with mandated fixed state population totals.", + "primary_area": "machine learning i", + "author": "Prathamesh Dharangutte; Jie Gao; Ruobin Gong; Fang-Yi Yu", + "authorids": "", + "aff": "Department of Computer Science, Rutgers University; Department of Computer Science, Rutgers University; Department of Statistics, Rutgers University; Department of Computer Science, George Mason University", + "bibtex": "@article{Dharangutte_Gao_Gong_Yu_2023, title={Integer Subspace Differential Privacy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25895}, DOI={10.1609/aaai.v37i6.25895}, abstractNote={We propose new differential privacy solutions for when external invariants and integer constraints are simultaneously enforced on the data product. These requirements arise in real world applications of private data curation, including the public release of the 2020 U.S. Decennial Census. They pose a great challenge to the production of provably private data products with adequate statistical usability. We propose integer subspace differential privacy to rigorously articulate the privacy guarantee when data products maintain both the invariants and integer characteristics, and demonstrate the composition and post-processing properties of our proposal. To address the challenge of sampling from a potentially highly restricted discrete space, we devise a pair of unbiased additive mechanisms, the generalized Laplace and the generalized Gaussian mechanisms, by solving the Diophantine equations as defined by the constraints. The proposed mechanisms have good accuracy, with errors exhibiting sub-exponential and sub-Gaussian tail probabilities respectively. To implement our proposal, we design an MCMC algorithm and supply empirical convergence assessment using estimated upper bounds on the total variation distance via L-lag coupling. We demonstrate the efficacy of our proposal with applications to a synthetic problem with intersecting invariants, a sensitive contingency table with known margins, and the 2010 Census county-level demonstration data with mandated fixed state population totals.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dharangutte, Prathamesh and Gao, Jie and Gong, Ruobin and Yu, Fang-Yi}, year={2023}, month={Jun.}, pages={7349-7357} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25895/25667", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25895", + "pdf_size": 204209, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6283077582720292953&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "rutgers.edu;rutgers.edu;rutgers.edu;gmu.edu", + "email": "rutgers.edu;rutgers.edu;rutgers.edu;gmu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Rutgers University;George Mason University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.rutgers.edu;https://www.gmu.edu", + "aff_unique_abbr": "Rutgers;GMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25637", + "title": "Integrating Reward Maximization and Population Estimation: Sequential Decision-Making for Internal Revenue Service Audit Selection", + "track": "main", + "status": "Technical", + "abstract": "We introduce a new setting, optimize-and-estimate structured bandits. Here, a policy must select a batch of arms, each characterized by its own context, that would allow it to both maximize reward and maintain an accurate (ideally unbiased) population estimate of the reward. This setting is inherent to many public and private sector applications and often requires handling delayed feedback, small data, and distribution shifts. We demonstrate its importance on real data from the United States Internal Revenue Service (IRS). The IRS performs yearly audits of the tax base. Two of its most important objectives are to identify suspected misreporting and to estimate the \"tax gap\" -- the global difference between the amount paid and true amount owed. Based on a unique collaboration with the IRS, we cast these two processes as a unified optimize-and-estimate structured bandit. We analyze optimize-and-estimate approaches to the IRS problem and propose a novel mechanism for unbiased population estimation that achieves rewards comparable to baseline approaches. This approach has the potential to improve audit efficacy, while maintaining policy-relevant estimates of the tax gap. This has important social consequences given that the current tax gap is estimated at nearly half a trillion dollars. We suggest that this problem setting is fertile ground for further research and we highlight its interesting challenges. The results of this and related research are currently being incorporated into the continual improvement of the IRS audit selection methods.", + "primary_area": "domain s of application", + "author": "Peter Henderson; Ben Chugg; Brandon Anderson; Kristen Altenburger; Alex Turk; John Guyton; Jacob Goldin; Daniel E. Ho", + "authorids": "", + "aff": "Stanford University; Carnegie Mellon University; Internal Revenue Service; Stanford University; Internal Revenue Service; Internal Revenue Service; University of Chicago; Stanford University", + "bibtex": "@article{Henderson_Chugg_Anderson_Altenburger_Turk_Guyton_Goldin_Ho_2023, title={Integrating Reward Maximization and Population Estimation: Sequential Decision-Making for Internal Revenue Service Audit Selection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25637}, DOI={10.1609/aaai.v37i4.25637}, abstractNote={We introduce a new setting, optimize-and-estimate structured bandits. Here, a policy must select a batch of arms, each characterized by its own context, that would allow it to both maximize reward and maintain an accurate (ideally unbiased) population estimate of the reward. This setting is inherent to many public and private sector applications and often requires handling delayed feedback, small data, and distribution shifts. We demonstrate its importance on real data from the United States Internal Revenue Service (IRS). The IRS performs yearly audits of the tax base. Two of its most important objectives are to identify suspected misreporting and to estimate the "tax gap" -- the global difference between the amount paid and true amount owed. Based on a unique collaboration with the IRS, we cast these two processes as a unified optimize-and-estimate structured bandit. We analyze optimize-and-estimate approaches to the IRS problem and propose a novel mechanism for unbiased population estimation that achieves rewards comparable to baseline approaches. This approach has the potential to improve audit efficacy, while maintaining policy-relevant estimates of the tax gap. This has important social consequences given that the current tax gap is estimated at nearly half a trillion dollars. We suggest that this problem setting is fertile ground for further research and we highlight its interesting challenges. The results of this and related research are currently being incorporated into the continual improvement of the IRS audit selection methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Henderson, Peter and Chugg, Ben and Anderson, Brandon and Altenburger, Kristen and Turk, Alex and Guyton, John and Goldin, Jacob and Ho, Daniel E.}, year={2023}, month={Jun.}, pages={5087-5095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25637/25409", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25637", + "pdf_size": 13728990, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13229597438375772617&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;0;2;2;3;0", + "aff_unique_norm": "Stanford University;Carnegie Mellon University;Internal Revenue Service;University of Chicago", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.stanford.edu;https://www.cmu.edu;https://www.irs.gov;https://www.uchicago.edu", + "aff_unique_abbr": "Stanford;CMU;IRS;UChicago", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26812", + "title": "Intelligent Planning for Large-Scale Multi-Robot Coordination", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Robots will play a crucial role in the future and need to work as a team in increasingly more complex applications. Advances in robotics have laid the hardware foundations for building large-scale multi-robot systems. But how to coordinate robots intelligently is a difficult problem. We believe that graph-search-based planning can systematically exploit the combinatorial structure of multi-robot coordination problems and efficiently generate solutions with rigorous guarantees on correctness, completeness, and solution quality. We started with one problem that is central to many multi-robot applications. Multi-Agent Path Finding (MAPF) is an NP-hard problem of planning collision-free paths for a team of agents while minimizing their travel times. We addressed the MAPF problem from both (1) a theoretical perspective by developing efficient algorithms to solve large MAPF instances with completeness and optimality guarantees via a variety of AI and optimization technologies, such as constraint reasoning, heuristic search, stochastic local search, and machine learning, and (2) an applicational perspective by developing algorithmic techniques for integrating MAPF with task planning and execution for various multi-robot systems, such as mobile robot coordination, traffic management, drone swarm control, multi-arm assembly, and character control in video games. This paper is part of the AAAI-23 New Faculty Highlights.", + "primary_area": "", + "author": "Jiaoyang Li", + "authorids": "", + "aff": "Carnegie Mellon University", + "bibtex": "@article{Li_2024, title={Intelligent Planning for Large-Scale Multi-Robot Coordination}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26812}, DOI={10.1609/aaai.v37i13.26812}, abstractNote={Robots will play a crucial role in the future and need to work as a team in increasingly more complex applications. Advances in robotics have laid the hardware foundations for building large-scale multi-robot systems. But how to coordinate robots intelligently is a difficult problem. We believe that graph-search-based planning can systematically exploit the combinatorial structure of multi-robot coordination problems and efficiently generate solutions with rigorous guarantees on correctness, completeness, and solution quality. We started with one problem that is central to many multi-robot applications. Multi-Agent Path Finding (MAPF) is an NP-hard problem of planning collision-free paths for a team of agents while minimizing their travel times. We addressed the MAPF problem from both (1) a theoretical perspective by developing efficient algorithms to solve large MAPF instances with completeness and optimality guarantees via a variety of AI and optimization technologies, such as constraint reasoning, heuristic search, stochastic local search, and machine learning, and (2) an applicational perspective by developing algorithmic techniques for integrating MAPF with task planning and execution for various multi-robot systems, such as mobile robot coordination, traffic management, drone swarm control, multi-arm assembly, and character control in video games. This paper is part of the AAAI-23 New Faculty Highlights.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jiaoyang}, year={2024}, month={Jul.}, pages={15445-15445} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26812/26584", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26812", + "pdf_size": 3166141, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7846391530649491648&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "cmu.edu", + "email": "cmu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25077", + "title": "Intensity-Aware Loss for Dynamic Facial Expression Recognition in the Wild", + "track": "main", + "status": "Technical", + "abstract": "Compared with the image-based static facial expression recognition (SFER) task, the dynamic facial expression recognition (DFER) task based on video sequences is closer to the natural expression recognition scene. However, DFER is often more challenging. One of the main reasons is that video sequences often contain frames with different expression intensities, especially for the facial expressions in the real-world scenarios, while the images in SFER frequently present uniform and high expression intensities. Nevertheless, if the expressions with different intensities are treated equally, the features learned by the networks will have large intra-class and small inter-class differences, which are harmful to DFER. To tackle this problem, we propose the global convolution-attention block (GCA) to rescale the channels of the feature maps. In addition, we introduce the intensity-aware loss (IAL) in the training process to help the network distinguish the samples with relatively low expression intensities. Experiments on two in-the-wild dynamic facial expression datasets (i.e., DFEW and FERV39k) indicate that our method outperforms the state-of-the-art DFER approaches. The source code will be available at https://github.com/muse1998/IAL-for-Facial-Expression-Recognition.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Hanting Li; Hongjing Niu; Zhaoqing Zhu; Feng Zhao", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Li_Niu_Zhu_Zhao_2023, title={Intensity-Aware Loss for Dynamic Facial Expression Recognition in the Wild}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25077}, DOI={10.1609/aaai.v37i1.25077}, abstractNote={Compared with the image-based static facial expression recognition (SFER) task, the dynamic facial expression recognition (DFER) task based on video sequences is closer to the natural expression recognition scene. However, DFER is often more challenging. One of the main reasons is that video sequences often contain frames with different expression intensities, especially for the facial expressions in the real-world scenarios, while the images in SFER frequently present uniform and high expression intensities. Nevertheless, if the expressions with different intensities are treated equally, the features learned by the networks will have large intra-class and small inter-class differences, which are harmful to DFER. To tackle this problem, we propose the global convolution-attention block (GCA) to rescale the channels of the feature maps. In addition, we introduce the intensity-aware loss (IAL) in the training process to help the network distinguish the samples with relatively low expression intensities. Experiments on two in-the-wild dynamic facial expression datasets (i.e., DFEW and FERV39k) indicate that our method outperforms the state-of-the-art DFER approaches. The source code will be available at https://github.com/muse1998/IAL-for-Facial-Expression-Recognition.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Hanting and Niu, Hongjing and Zhu, Zhaoqing and Zhao, Feng}, year={2023}, month={Jun.}, pages={67-75} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25077/24849", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25077", + "pdf_size": 776334, + "gs_citation": 63, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1116671211124084251&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/muse1998/IAL-for-Facial-Expression-Recognition", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25410", + "title": "Inter-image Contrastive Consistency for Multi-Person Pose Estimation", + "track": "main", + "status": "Technical", + "abstract": "Multi-person pose estimation (MPPE) has achieved impressive progress in recent years. However, due to the large variance of appearances among images or occlusions, the model can hardly learn consistent patterns enough, which leads to severe location jitter and missing issues. In this study, we propose a novel framework, termed Inter-image Contrastive consistency (ICON), to strengthen the keypoint consistency among images for MPPE. Concretely, we consider two-fold consistency constraints, which include single keypoint contrastive consistency (SKCC) and pair relation contrastive consistency (PRCC). The SKCC learns to strengthen the consistency of individual keypoints across images in the same category to improve the category-specific robustness. Only with SKCC, the model can effectively reduce location errors caused by large appearance variations, but remains challenging with extreme postures (e.g., occlusions) due to lack of relational guidance. Therefore, PRCC is proposed to strengthen the consistency of pair-wise joint relation between images to preserve the instructive relation. Cooperating with SKCC, PRCC further improves structure aware robustness in handling extreme postures. Extensive experiments on kinds of architectures across three datasets (i.e., MS-COCO, MPII, CrowdPose) show the proposed ICON achieves substantial improvements over baselines. Furthermore, ICON under the semi-supervised setup can obtain comparable results with the fully-supervised methods using only 30% labeled data.", + "primary_area": "computer vision iii", + "author": "Xixia Xu; Yingguo Gao; Xingjia Pan; Ke Yan; Xiaoyu Chen; Qi Zou", + "authorids": "", + "aff": "Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China", + "bibtex": "@article{Xu_Gao_Pan_Yan_Chen_Zou_2023, title={Inter-image Contrastive Consistency for Multi-Person Pose Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25410}, DOI={10.1609/aaai.v37i3.25410}, abstractNote={Multi-person pose estimation (MPPE) has achieved impressive progress in recent years. However, due to the large variance of appearances among images or occlusions, the model can hardly learn consistent patterns enough, which leads to severe location jitter and missing issues. In this study, we propose a novel framework, termed Inter-image Contrastive consistency (ICON), to strengthen the keypoint consistency among images for MPPE. Concretely, we consider two-fold consistency constraints, which include single keypoint contrastive consistency (SKCC) and pair relation contrastive consistency (PRCC). The SKCC learns to strengthen the consistency of individual keypoints across images in the same category to improve the category-specific robustness. Only with SKCC, the model can effectively reduce location errors caused by large appearance variations, but remains challenging with extreme postures (e.g., occlusions) due to lack of relational guidance. Therefore, PRCC is proposed to strengthen the consistency of pair-wise joint relation between images to preserve the instructive relation. Cooperating with SKCC, PRCC further improves structure aware robustness in handling extreme postures. Extensive experiments on kinds of architectures across three datasets (i.e., MS-COCO, MPII, CrowdPose) show the proposed ICON achieves substantial improvements over baselines. Furthermore, ICON under the semi-supervised setup can obtain comparable results with the fully-supervised methods using only 30% labeled data.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Xixia and Gao, Yingguo and Pan, Xingjia and Yan, Ke and Chen, Xiaoyu and Zou, Qi}, year={2023}, month={Jun.}, pages={3063-3071} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25410/25182", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25410", + "pdf_size": 3241820, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16050684390761391893&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "bjtu.edu.cn;tencent.com;gmail.com;tencent.com;bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;tencent.com;gmail.com;tencent.com;bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;0", + "aff_unique_norm": "Beijing Jiaotong University;Tencent", + "aff_unique_dep": "Beijing Key Laboratory of Traffic Data Analysis and Mining;Youtu Lab", + "aff_unique_url": "http://www.bjtu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "BJTU;Tencent", + "aff_campus_unique_index": "0;1;1;1;0;0", + "aff_campus_unique": "Beijing;Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25736", + "title": "Interactive Concept Bottleneck Models", + "track": "main", + "status": "Technical", + "abstract": "Concept bottleneck models (CBMs) are interpretable neural networks that first predict labels for human-interpretable concepts relevant to the prediction task, and then predict the final label based on the concept label predictions. We extend CBMs to interactive prediction settings where the model can query a human collaborator for the label to some concepts. We develop an interaction policy that, at prediction time, chooses which concepts to request a label for so as to maximally improve the final prediction. We demonstrate that a simple policy combining concept prediction uncertainty and influence of the concept on the final prediction achieves strong performance and outperforms static approaches as well as active feature acquisition methods proposed in the literature. We show that the interactive CBM can achieve accuracy gains of 5-10% with only 5 interactions over competitive baselines on the Caltech-UCSD Birds, CheXpert and OAI datasets.", + "primary_area": "humans and ai", + "author": "Kushal Chauhan; Rishabh Tiwari; Jan Freyberg; Pradeep Shenoy; Krishnamurthy Dvijotham", + "authorids": "", + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research", + "bibtex": "@article{Chauhan_Tiwari_Freyberg_Shenoy_Dvijotham_2023, title={Interactive Concept Bottleneck Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25736}, DOI={10.1609/aaai.v37i5.25736}, abstractNote={Concept bottleneck models (CBMs) are interpretable neural networks that first predict labels for human-interpretable concepts relevant to the prediction task, and then predict the final label based on the concept label predictions. We extend CBMs to interactive prediction settings where the model can query a human collaborator for the label to some concepts. We develop an interaction policy that, at prediction time, chooses which concepts to request a label for so as to maximally improve the final prediction. We demonstrate that a simple policy combining concept prediction uncertainty and influence of the concept on the final prediction achieves strong performance and outperforms static approaches as well as active feature acquisition methods proposed in the literature. We show that the interactive CBM can achieve accuracy gains of 5-10% with only 5 interactions over competitive baselines on the Caltech-UCSD Birds, CheXpert and OAI datasets.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chauhan, Kushal and Tiwari, Rishabh and Freyberg, Jan and Shenoy, Pradeep and Dvijotham, Krishnamurthy}, year={2023}, month={Jun.}, pages={5948-5955} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25736/25508", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25736", + "pdf_size": 5182553, + "gs_citation": 65, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17227231848298082803&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26818", + "title": "Internal Robust Representations for Domain Generalization", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Model generalization under distributional changes remains a significant challenge for machine learning. We present consolidating the internal representation of the training data in a model as a strategy of improving model generalization.", + "primary_area": "", + "author": "Mohammad Rostami", + "authorids": "", + "aff": "Information Sciences Institute, University of Southern California", + "bibtex": "@article{Rostami_2024, title={Internal Robust Representations for Domain Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26818}, DOI={10.1609/aaai.v37i13.26818}, abstractNote={Model generalization under distributional changes remains a significant challenge for machine learning. We present consolidating the internal representation of the training data in a model as a strategy of improving model generalization.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rostami, Mohammad}, year={2024}, month={Jul.}, pages={15451-15451} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26818/26590", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26818", + "pdf_size": 57573, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8070570100579424799&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 2, + "aff_domain": "usc.edu", + "email": "usc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "Information Sciences Institute", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25941", + "title": "Interpolating Graph Pair to Regularize Graph Classification", + "track": "main", + "status": "Technical", + "abstract": "We present a simple and yet effective interpolation-based regularization technique, aiming to improve the generalization of Graph Neural Networks (GNNs) on supervised graph classification. We leverage Mixup, an effective regularizer for vision, where random sample pairs and their labels are interpolated to create synthetic images for training. Unlike images with grid-like coordinates, graphs have arbitrary structure and topology, which can be very sensitive to any modification that alters the graph's semantic meanings. This posts two unanswered questions for Mixup-like regularization schemes: Can we directly mix up a pair of graph inputs? If so, how well does such mixing strategy regularize the learning of GNNs? To answer these two questions, we propose ifMixup, which first adds dummy nodes to make two graphs have the same input size and then simultaneously performs linear interpolation between the aligned node feature vectors and the aligned edge representations of the two graphs. We empirically show that such simple mixing schema can effectively regularize the classification learning, resulting in superior predictive accuracy to popular graph augmentation and GNN methods.", + "primary_area": "machine learning i", + "author": "Hongyu Guo; Yongyi Mao", + "authorids": "", + "aff": "National Research Council Canada+University of Ottawa; University of Ottawa", + "bibtex": "@article{Guo_Mao_2023, title={Interpolating Graph Pair to Regularize Graph Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25941}, DOI={10.1609/aaai.v37i6.25941}, abstractNote={We present a simple and yet effective interpolation-based regularization technique, aiming to improve the generalization of Graph Neural Networks (GNNs) on supervised graph classification. We leverage Mixup, an effective regularizer for vision, where random sample pairs and their labels are interpolated to create synthetic images for training. Unlike images with grid-like coordinates, graphs have arbitrary structure and topology, which can be very sensitive to any modification that alters the graph\u2019s semantic meanings. This posts two unanswered questions for Mixup-like regularization schemes: Can we directly mix up a pair of graph inputs? If so, how well does such mixing strategy regularize the learning of GNNs? To answer these two questions, we propose ifMixup, which first adds dummy nodes to make two graphs have the same input size and then simultaneously performs linear interpolation between the aligned node feature vectors and the aligned edge representations of the two graphs. We empirically show that such simple mixing schema can effectively regularize the classification learning, resulting in superior predictive accuracy to popular graph augmentation and GNN methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Hongyu and Mao, Yongyi}, year={2023}, month={Jun.}, pages={7766-7774} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25941/25713", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25941", + "pdf_size": 584280, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=208521224042523717&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "uottawa.ca;uottawa.ca", + "email": "uottawa.ca;uottawa.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "National Research Council Canada;University of Ottawa", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nrc-cnrc.gc.ca;https://www.uottawa.ca", + "aff_unique_abbr": "NRC-CNRC;U Ottawa", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26679", + "title": "Interpretable Chirality-Aware Graph Neural Network for Quantitative Structure Activity Relationship Modeling in Drug Discovery", + "track": "aaai special track", + "status": "Technical", + "abstract": "In computer-aided drug discovery, quantitative structure activity relation models are trained to predict biological activity from chemical structure. Despite the recent success of applying graph neural network to this task, important chemical information such as molecular chirality is ignored. To fill this crucial gap, we propose Molecular-Kernel Graph NeuralNetwork (MolKGNN) for molecular representation learning, which features SE(3)-/conformation invariance, chirality-awareness, and interpretability. For our MolKGNN, we first design a molecular graph convolution to capture the chemical pattern by comparing the atom's similarity with the learnable molecular kernels. Furthermore, we propagate the similarity score to capture the higher-order chemical pattern. To assess the method, we conduct a comprehensive evaluation with nine well-curated datasets spanning numerous important drug targets that feature realistic high class imbalance and it demonstrates the superiority of MolKGNN over other graph neural networks in computer-aided drug discovery. Meanwhile, the learned kernels identify patterns that agree with domain knowledge, confirming the pragmatic interpretability of this approach. Our code and supplementary material are publicly available at https://github.com/meilerlab/MolKGNN.", + "primary_area": "ai for social impact", + "author": "Yunchao (Lance) Liu; Yu Wang; Oanh Vu; Rocco Moretti; Bobby Bodenheimer; Jens Meiler; Tyler Derr", + "authorids": "", + "aff": "Vanderbilt University; Vanderbilt University; Vanderbilt University; Vanderbilt University; Vanderbilt University; Vanderbilt University+Leipzig University; Vanderbilt University", + "bibtex": "@article{Liu_Wang_Vu_Moretti_Bodenheimer_Meiler_Derr_2023, title={Interpretable Chirality-Aware Graph Neural Network for Quantitative Structure Activity Relationship Modeling in Drug Discovery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26679}, DOI={10.1609/aaai.v37i12.26679}, abstractNote={In computer-aided drug discovery, quantitative structure activity relation models are trained to predict biological activity from chemical structure. Despite the recent success of applying graph neural network to this task, important chemical information such as molecular chirality is ignored. To fill this crucial gap, we propose Molecular-Kernel Graph NeuralNetwork (MolKGNN) for molecular representation learning, which features SE(3)-/conformation invariance, chirality-awareness, and interpretability. For our MolKGNN, we first design a molecular graph convolution to capture the chemical pattern by comparing the atom\u2019s similarity with the learnable molecular kernels. Furthermore, we propagate the similarity score to capture the higher-order chemical pattern. To assess the method, we conduct a comprehensive evaluation with nine well-curated datasets spanning numerous important drug targets that feature realistic high class imbalance and it demonstrates the superiority of MolKGNN over other graph neural networks in computer-aided drug discovery. Meanwhile, the learned kernels identify patterns that agree with domain knowledge, confirming the pragmatic interpretability of this approach. Our code and supplementary material are publicly available at https://github.com/meilerlab/MolKGNN.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yunchao (Lance) and Wang, Yu and Vu, Oanh and Moretti, Rocco and Bodenheimer, Bobby and Meiler, Jens and Derr, Tyler}, year={2023}, month={Jun.}, pages={14356-14364} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26679/26451", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26679", + "pdf_size": 895586, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7539426996919374672&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 11, + "aff_domain": "vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu", + "email": "vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu;vanderbilt.edu", + "github": "https://github.com/meilerlab/MolKGNN", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0+1;0", + "aff_unique_norm": "Vanderbilt University;Leipzig University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.vanderbilt.edu;https://www.uni-leipzig.de", + "aff_unique_abbr": "Vanderbilt;Uni Leipzig", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+1;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "article-25905", + "title": "Interpreting Unfairness in Graph Neural Networks via Training Node Attribution", + "track": "main", + "status": "Technical", + "abstract": "Graph Neural Networks (GNNs) have emerged as the leading paradigm for solving graph analytical problems in various real-world applications. Nevertheless, GNNs could potentially render biased predictions towards certain demographic subgroups. Understanding how the bias in predictions arises is critical, as it guides the design of GNN debiasing mechanisms. However, most existing works overwhelmingly focus on GNN debiasing, but fall short on explaining how such bias is induced. In this paper, we study a novel problem of interpreting GNN unfairness through attributing it to the influence of training nodes. Specifically, we propose a novel strategy named Probabilistic Distribution Disparity (PDD) to measure the bias exhibited in GNNs, and develop an algorithm to efficiently estimate the influence of each training node on such bias. We verify the validity of PDD and the effectiveness of influence estimation through experiments on real-world datasets. Finally, we also demonstrate how the proposed framework could be used for debiasing GNNs. Open-source code can be found at https://github.com/yushundong/BIND.", + "primary_area": "machine learning i", + "author": "Yushun Dong; Song Wang; Jing Ma; Ninghao Liu; Jundong Li", + "authorids": "", + "aff": "University of Virginia; University of Virginia; University of Virginia; University of Georgia; University of Virginia", + "bibtex": "@article{Dong_Wang_Ma_Liu_Li_2023, title={Interpreting Unfairness in Graph Neural Networks via Training Node Attribution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25905}, DOI={10.1609/aaai.v37i6.25905}, abstractNote={Graph Neural Networks (GNNs) have emerged as the leading paradigm for solving graph analytical problems in various real-world applications. Nevertheless, GNNs could potentially render biased predictions towards certain demographic subgroups. Understanding how the bias in predictions arises is critical, as it guides the design of GNN debiasing mechanisms. However, most existing works overwhelmingly focus on GNN debiasing, but fall short on explaining how such bias is induced. In this paper, we study a novel problem of interpreting GNN unfairness through attributing it to the influence of training nodes. Specifically, we propose a novel strategy named Probabilistic Distribution Disparity (PDD) to measure the bias exhibited in GNNs, and develop an algorithm to efficiently estimate the influence of each training node on such bias. We verify the validity of PDD and the effectiveness of influence estimation through experiments on real-world datasets. Finally, we also demonstrate how the proposed framework could be used for debiasing GNNs. Open-source code can be found at https://github.com/yushundong/BIND.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Yushun and Wang, Song and Ma, Jing and Liu, Ninghao and Li, Jundong}, year={2023}, month={Jun.}, pages={7441-7449} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25905/25677", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25905", + "pdf_size": 422116, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7505109409032681160&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "virginia.edu;virginia.edu;virginia.edu;uga.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu;uga.edu;virginia.edu", + "github": "https://github.com/yushundong/BIND", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "University of Virginia;University of Georgia", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.virginia.edu;https://www.uga.edu", + "aff_unique_abbr": "UVA;UGA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26368", + "title": "Intersection Coordination with Priority-Based Search for Autonomous Vehicles", + "track": "main", + "status": "Technical", + "abstract": "The development of connected and autonomous vehicles opens an opportunity to manage intersections without signals. One promising approach is to use a central autonomous intersection manager to optimize the movement of the vehicles in the intersection. Existing work uses Mixed Integer Linear Programming (MILP) to find optimal solutions for this problem but is time-consuming and cannot be applied in real-time. On the other hand, the coordination of the vehicles is essentially a Multi-Agent Path Finding (MAPF) problem, for which dozens of efficient algorithms have been proposed in recent years. Inspired by these MAPF algorithms, we propose a three-level algorithm called PSL to solve the intersection coordination problem. Theoretically, PSL is complete and polynomial-time in the number of vehicles. Empirically, PSL runs significantly faster with only a slight compromise in the solution quality than the optimal MILP method. It also generates significantly better solutions with a slightly larger runtime than the traditional First-Come-First-Served strategy.", + "primary_area": "multiagent systems", + "author": "Jiaoyang Li; The Anh Hoang; Eugene Lin; Hai L. Vu; Sven Koenig", + "authorids": "", + "aff": "Robotics Institute, Carnegie Mellon University, USA; Department of Civil Engineering, Monash University, Australia; Computer Science Department, University of Southern California, USA; Department of Civil Engineering, Monash University, Australia; Computer Science Department, University of Southern California, USA", + "bibtex": "@article{Li_Hoang_Lin_Vu_Koenig_2023, title={Intersection Coordination with Priority-Based Search for Autonomous Vehicles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26368}, DOI={10.1609/aaai.v37i10.26368}, abstractNote={The development of connected and autonomous vehicles opens an opportunity to manage intersections without signals. One promising approach is to use a central autonomous intersection manager to optimize the movement of the vehicles in the intersection. Existing work uses Mixed Integer Linear Programming (MILP) to find optimal solutions for this problem but is time-consuming and cannot be applied in real-time. On the other hand, the coordination of the vehicles is essentially a Multi-Agent Path Finding (MAPF) problem, for which dozens of efficient algorithms have been proposed in recent years. Inspired by these MAPF algorithms, we propose a three-level algorithm called PSL to solve the intersection coordination problem. Theoretically, PSL is complete and polynomial-time in the number of vehicles. Empirically, PSL runs significantly faster with only a slight compromise in the solution quality than the optimal MILP method. It also generates significantly better solutions with a slightly larger runtime than the traditional First-Come-First-Served strategy.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jiaoyang and Hoang, The Anh and Lin, Eugene and Vu, Hai L. and Koenig, Sven}, year={2023}, month={Jun.}, pages={11578-11585} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26368/26140", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26368", + "pdf_size": 2420898, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1470921396285046931&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "cmu.edu;monash.edu;gmail.com;monash.edu;usc.edu", + "email": "cmu.edu;monash.edu;gmail.com;monash.edu;usc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;2", + "aff_unique_norm": "Carnegie Mellon University;Monash University;University of Southern California", + "aff_unique_dep": "Robotics Institute;Department of Civil Engineering;Computer Science Department", + "aff_unique_url": "https://www.cmu.edu;https://www.monash.edu;https://www.usc.edu", + "aff_unique_abbr": "CMU;Monash;USC", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;1;0;1;0", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-26322", + "title": "Interventional SHAP Values and Interaction Values for Piecewise Linear Regression Trees", + "track": "main", + "status": "Technical", + "abstract": "In recent years, game-theoretic Shapley values have gained increasing attention with respect to local model explanation by feature attributions. While the approach using Shapley values is model-independent, their (exact) computation is usually intractable, so efficient model-specific algorithms have been devised including approaches for decision trees or their ensembles in general. Our work goes further in this direction by extending the interventional TreeSHAP algorithm to piecewise linear regression trees, which gained more attention in the past few years. To this end, we introduce a decomposition of the contribution function based on decision paths, which allows a more comprehensible formulation of SHAP algorithms for tree-based models. Our algorithm can also be readily applied to computing SHAP interaction values of these models. In particular, as the main contribution of this paper, we provide a more efficient approach of interventional SHAP for tree-based models by precomputing statistics of the background data based on the tree structure.", + "primary_area": "machine learning iv", + "author": "Artjom Zern; Klaus Broelemann; Gjergji Kasneci", + "authorids": "", + "aff": "SCHUFA Holding AG, Germany; SCHUFA Holding AG, Germany; University of Tuebingen, Germany", + "bibtex": "@article{Zern_Broelemann_Kasneci_2023, title={Interventional SHAP Values and Interaction Values for Piecewise Linear Regression Trees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26322}, DOI={10.1609/aaai.v37i9.26322}, abstractNote={In recent years, game-theoretic Shapley values have gained increasing attention with respect to local model explanation by feature attributions. While the approach using Shapley values is model-independent, their (exact) computation is usually intractable, so efficient model-specific algorithms have been devised including approaches for decision trees or their ensembles in general. Our work goes further in this direction by extending the interventional TreeSHAP algorithm to piecewise linear regression trees, which gained more attention in the past few years. To this end, we introduce a decomposition of the contribution function based on decision paths, which allows a more comprehensible formulation of SHAP algorithms for tree-based models. Our algorithm can also be readily applied to computing SHAP interaction values of these models. In particular, as the main contribution of this paper, we provide a more efficient approach of interventional SHAP for tree-based models by precomputing statistics of the background data based on the tree structure.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zern, Artjom and Broelemann, Klaus and Kasneci, Gjergji}, year={2023}, month={Jun.}, pages={11164-11173} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26322/26094", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26322", + "pdf_size": 257942, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4129765945305148908&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "schufa.de;schufa.de;uni-tuebingen.de", + "email": "schufa.de;schufa.de;uni-tuebingen.de", + "github": "", + "project": "https://github.com/microsoft/LightGBM/releases/tag/v3.2.0", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "SCHUFA Holding AG;University of Tuebingen", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.schufa.de;https://www.uni-tuebingen.de/", + "aff_unique_abbr": ";Uni Tuebingen", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25281", + "title": "Intriguing Findings of Frequency Selection for Image Deblurring", + "track": "main", + "status": "Technical", + "abstract": "Blur was naturally analyzed in the frequency domain, by estimating the latent sharp image and the blur kernel given a blurry image. Recent progress on image deblurring always designs end-to-end architectures and aims at learning the difference between blurry and sharp image pairs from pixel-level, which inevitably overlooks the importance of blur kernels. This paper reveals an intriguing phenomenon that simply applying ReLU operation on the frequency domain of a blur image followed by inverse Fourier transform, i.e., frequency selection, provides faithful information about the blur pattern (e.g., the blur direction and blur level, implicitly shows the kernel pattern). Based on this observation, we attempt to leverage kernel-level information for image deblurring networks by inserting Fourier transform, ReLU operation, and inverse Fourier transform to the standard ResBlock. 1 \u00d7 1 convolution is further added to let the network modulate flexible thresholds for frequency selection. We term our newly built block as Res FFT-ReLU Block, which takes advantages of both kernel-level and pixel-level features via learning frequency-spatial dual-domain representations. Extensive experiments are conducted to acquire a thorough analysis on the insights of the method. Moreover, after plugging the proposed block into NAFNet, we can achieve 33.85 dB in PSNR on GoPro dataset. Our method noticeably improves backbone architectures without introducing many parameters, while maintaining low computational complexity. Code is available at https://github.com/DeepMed-Lab/DeepRFT-AAAI2023.", + "primary_area": "computer vision ii", + "author": "Xintian Mao; Yiming Liu; Fengze Liu; Qingli Li; Wei Shen; Yan Wang", + "authorids": "", + "aff": "Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University; Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University; Department of Computer Science, the Johns Hopkins University; Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University", + "bibtex": "@article{Mao_Liu_Liu_Li_Shen_Wang_2023, title={Intriguing Findings of Frequency Selection for Image Deblurring}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25281}, DOI={10.1609/aaai.v37i2.25281}, abstractNote={Blur was naturally analyzed in the frequency domain, by estimating the latent sharp image and the blur kernel given a blurry image. Recent progress on image deblurring always designs end-to-end architectures and aims at learning the difference between blurry and sharp image pairs from pixel-level, which inevitably overlooks the importance of blur kernels. This paper reveals an intriguing phenomenon that simply applying ReLU operation on the frequency domain of a blur image followed by inverse Fourier transform, i.e., frequency selection, provides faithful information about the blur pattern (e.g., the blur direction and blur level, implicitly shows the kernel pattern). Based on this observation, we attempt to leverage kernel-level information for image deblurring networks by inserting Fourier transform, ReLU operation, and inverse Fourier transform to the standard ResBlock. 1 \u00d7 1 convolution is further added to let the network modulate flexible thresholds for frequency selection. We term our newly built block as Res FFT-ReLU Block, which takes advantages of both kernel-level and pixel-level features via learning frequency-spatial dual-domain representations. Extensive experiments are conducted to acquire a thorough analysis on the insights of the method. Moreover, after plugging the proposed block into NAFNet, we can achieve 33.85 dB in PSNR on GoPro dataset. Our method noticeably improves backbone architectures without introducing many parameters, while maintaining low computational complexity. Code is available at https://github.com/DeepMed-Lab/DeepRFT-AAAI2023.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Xintian and Liu, Yiming and Liu, Fengze and Li, Qingli and Shen, Wei and Wang, Yan}, year={2023}, month={Jun.}, pages={1905-1913} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25281/25053", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25281", + "pdf_size": 867998, + "gs_citation": 182, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4145203574784160017&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;jhu.edu;cs.ecnu.edu.cn;sjtu.edu.cn;cee.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;jhu.edu;cs.ecnu.edu.cn;sjtu.edu.cn;cee.ecnu.edu.cn", + "github": "https://github.com/DeepMed-Lab/DeepRFT-AAAI2023", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;0", + "aff_unique_norm": "East China Normal University;Johns Hopkins University;Shanghai Jiao Tong University", + "aff_unique_dep": "Shanghai Key Laboratory of Multidimensional Information Processing;Department of Computer Science;AI Institute", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.jhu.edu;https://www.sjtu.edu.cn", + "aff_unique_abbr": "ECNU;JHU;SJTU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26861", + "title": "Intuitive Access to Smartphone Settings Using Relevance Model Trained by Contrastive Learning", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The more new features that are being added to smartphones, the harder it becomes for users to find them. This is because the feature names are usually short and there are just too many of them for the users to remember the exact words. The users are more comfortable asking contextual queries that describe the features they are looking for, but the standard term frequency-based search cannot process them. This paper presents a novel retrieval system for mobile features that accepts intuitive and contextual search queries. We trained a relevance model via contrastive learning from a pre-trained language model to perceive the contextual relevance between a query embedding and indexed mobile features. Also, to make it efficiently run on-device using minimal resources, we applied knowledge distillation to compress the model without degrading much performance. To verify the feasibility of our method, we collected test queries and conducted comparative experiments with the currently deployed search baselines. The results show that our system outperforms the others on contextual sentence queries and even on usual keyword-based queries.", + "primary_area": "emerging applications of ai", + "author": "Joonyoung Kim; Kangwook Lee; Haebin Shin; Hurnjoo Lee; Sechun Kang; Byunguk Choi; Dong Shin; Joohyung Lee", + "authorids": "", + "aff": "Samsung Research; Samsung Research; Samsung Research; Samsung Research; Samsung Research; Samsung Research; Samsung Research; Arizona State University", + "bibtex": "@article{Kim_Lee_Shin_Lee_Kang_Choi_Shin_Lee_2024, title={Intuitive Access to Smartphone Settings Using Relevance Model Trained by Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26861}, DOI={10.1609/aaai.v37i13.26861}, abstractNote={The more new features that are being added to smartphones, the harder it becomes for users to find them. This is because the feature names are usually short and there are just too many of them for the users to remember the exact words. The users are more comfortable asking contextual queries that describe the features they are looking for, but the standard term frequency-based search cannot process them. This paper presents a novel retrieval system for mobile features that accepts intuitive and contextual search queries. We trained a relevance model via contrastive learning from a pre-trained language model to perceive the contextual relevance between a query embedding and indexed mobile features. Also, to make it efficiently run on-device using minimal resources, we applied knowledge distillation to compress the model without degrading much performance. To verify the feasibility of our method, we collected test queries and conducted comparative experiments with the currently deployed search baselines. The results show that our system outperforms the others on contextual sentence queries and even on usual keyword-based queries.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Joonyoung and Lee, Kangwook and Shin, Haebin and Lee, Hurnjoo and Kang, Sechun and Choi, Byunguk and Shin, Dong and Lee, Joohyung}, year={2024}, month={Jul.}, pages={15689-15695} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26861/26633", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26861", + "pdf_size": 1085331, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:QZ28S2X3PqMJ:scholar.google.com/&scioq=Intuitive+Access+to+Smartphone+Settings+Using+Relevance+Model+Trained+by+Contrastive+Learning&hl=en&as_sdt=0,5", + "gs_version_total": 9, + "aff_domain": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;asu.edu", + "email": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;asu.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;1", + "aff_unique_norm": "Samsung Research;Arizona State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://research.samsung.com;https://www.asu.edu", + "aff_unique_abbr": "Samsung;ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-25851", + "title": "Invariant Representations with Stochastically Quantized Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Representation learning algorithms offer the opportunity to learn invariant representations of the input data with regard to nuisance factors.\nMany authors have leveraged such strategies to learn fair representations, i.e., vectors where information about sensitive attributes is removed. These methods are attractive as they may be interpreted as minimizing the mutual information between a neural layer's activations and a sensitive attribute.\nHowever, the theoretical grounding of such methods relies either on the computation of infinitely accurate adversaries or on minimizing a variational upper bound of a mutual information estimate.\nIn this paper, we propose a methodology for direct computation of the mutual information between neurons in a layer and a sensitive attribute. We employ stochastically-activated binary neural networks, which lets us treat neurons as random variables.\nOur method is therefore able to minimize an upper bound on the mutual information between the neural representations and a sensitive attribute.\nWe show that this method compares favorably with the state of the art in fair representation learning and that the learned representations display a higher level of invariance compared to full-precision neural networks.", + "primary_area": "machine learning i", + "author": "Mattia Cerrato; Marius K\u00f6ppel; Roberto Esposito; Stefan Kramer", + "authorids": "", + "aff": "Institute of Computer Science, Johannes Gutenberg-Universit\u00e4t Mainz, Germany; Institute for Nuclear Physics, Johannes Gutenberg-Universit\u00e4t Mainz, Germany; Computer Science Department, Universit\u00e0 degli Studi di Torino, Italy; Institute of Computer Science, Johannes Gutenberg-Universit\u00e4t Mainz, Germany", + "bibtex": "@article{Cerrato_K\u00f6ppel_Esposito_Kramer_2023, title={Invariant Representations with Stochastically Quantized Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25851}, DOI={10.1609/aaai.v37i6.25851}, abstractNote={Representation learning algorithms offer the opportunity to learn invariant representations of the input data with regard to nuisance factors.\nMany authors have leveraged such strategies to learn fair representations, i.e., vectors where information about sensitive attributes is removed. These methods are attractive as they may be interpreted as minimizing the mutual information between a neural layer\u2019s activations and a sensitive attribute.\nHowever, the theoretical grounding of such methods relies either on the computation of infinitely accurate adversaries or on minimizing a variational upper bound of a mutual information estimate.\nIn this paper, we propose a methodology for direct computation of the mutual information between neurons in a layer and a sensitive attribute. We employ stochastically-activated binary neural networks, which lets us treat neurons as random variables.\nOur method is therefore able to minimize an upper bound on the mutual information between the neural representations and a sensitive attribute.\nWe show that this method compares favorably with the state of the art in fair representation learning and that the learned representations display a higher level of invariance compared to full-precision neural networks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cerrato, Mattia and K\u00f6ppel, Marius and Esposito, Roberto and Kramer, Stefan}, year={2023}, month={Jun.}, pages={6962-6970} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25851/25623", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25851", + "pdf_size": 190430, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10777680385398778676&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "uni-mainz.de;uni-mainz.de;unito.it;informatik.uni-mainz.de", + "email": "uni-mainz.de;uni-mainz.de;unito.it;informatik.uni-mainz.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Johannes Gutenberg-Universit\u00e4t Mainz;Universit\u00e0 degli Studi di Torino", + "aff_unique_dep": "Institute of Computer Science;Computer Science Department", + "aff_unique_url": "https://www.uni-mainz.de/;https://www.unito.it", + "aff_unique_abbr": ";Unito", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "Germany;Italy" + }, + { + "id": "article-25997", + "title": "Inverse-Reference Priors for Fisher Regularization of Bayesian Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Recent studies have shown that the generalization ability of deep neural networks (DNNs) is closely related to the Fisher information matrix (FIM) calculated during the early training phase. Several methods have been proposed to regularize the FIM for increased generalization of DNNs. However, they cannot be used directly for Bayesian neural networks (BNNs) because the variable parameters of BNNs make it difficult to calculate the FIM. To address this problem, we achieve regularization of the FIM of BNNs by specifying a new suitable prior distribution called the inverse-reference (IR) prior. To regularize the FIM, the IR prior is derived as the inverse of the reference prior that imposes minimal prior knowledge on the parameters and maximizes the trace of the FIM. We demonstrate that the IR prior can enhance the generalization ability of BNNs for large-scale data over previously used priors while providing adequate uncertainty quantifications using various benchmark image datasets and BNN structures.", + "primary_area": "machine learning ii", + "author": "Keunseo Kim; Eun-Yeol Ma; Jeongman Choi; Heeyoung Kim", + "authorids": "", + "aff": "Samsung Advanced Institute of Technology, Suwon, Republic of Korea; Department of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Department of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Department of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea", + "bibtex": "@article{Kim_Ma_Choi_Kim_2023, title={Inverse-Reference Priors for Fisher Regularization of Bayesian Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25997}, DOI={10.1609/aaai.v37i7.25997}, abstractNote={Recent studies have shown that the generalization ability of deep neural networks (DNNs) is closely related to the Fisher information matrix (FIM) calculated during the early training phase. Several methods have been proposed to regularize the FIM for increased generalization of DNNs. However, they cannot be used directly for Bayesian neural networks (BNNs) because the variable parameters of BNNs make it difficult to calculate the FIM. To address this problem, we achieve regularization of the FIM of BNNs by specifying a new suitable prior distribution called the inverse-reference (IR) prior. To regularize the FIM, the IR prior is derived as the inverse of the reference prior that imposes minimal prior knowledge on the parameters and maximizes the trace of the FIM. We demonstrate that the IR prior can enhance the generalization ability of BNNs for large-scale data over previously used priors while providing adequate uncertainty quantifications using various benchmark image datasets and BNN structures.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Keunseo and Ma, Eun-Yeol and Choi, Jeongman and Kim, Heeyoung}, year={2023}, month={Jun.}, pages={8264-8272} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25997/25769", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25997", + "pdf_size": 2724336, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15331827333546427762&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "samsung.com;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "samsung.com;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Samsung Advanced Institute of Technology;KAIST", + "aff_unique_dep": ";Department of Industrial and Systems Engineering", + "aff_unique_url": "https://www.sait.samsung.com;https://www.kaist.ac.kr", + "aff_unique_abbr": "SAIT;KAIST", + "aff_campus_unique_index": "0;1;1;1", + "aff_campus_unique": "Suwon;Daejeon", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26972", + "title": "Invertible Conditional GAN Revisited: Photo-to-Manga Face Translation with Modern Architectures (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent style translation methods have extended their transferability from texture to geometry. However, performing translation while preserving image content when there is a significant style difference is still an open problem. To overcome this problem, we propose Invertible Conditional Fast GAN (IcFGAN) based on GAN inversion and cFGAN. It allows for unpaired photo-to-manga face translation. Experimental results show that our method could translate styles under significant style gaps, while the state-of-the-art methods could hardly preserve image content.", + "primary_area": "", + "author": "Taro Hatakeyama; Ryusuke Saito; Komei Hiruta; Atsushi Hashimoto; Satoshi Kurihara", + "authorids": "", + "aff": "Keio University; Keio University; Keio University; Keio University+OMRON SINIC X Corp.; Keio University", + "bibtex": "@article{Hatakeyama_Saito_Hiruta_Hashimoto_Kurihara_2024, title={Invertible Conditional GAN Revisited: Photo-to-Manga Face Translation with Modern Architectures (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26972}, DOI={10.1609/aaai.v37i13.26972}, abstractNote={Recent style translation methods have extended their transferability from texture to geometry. However, performing translation while preserving image content when there is a significant style difference is still an open problem. To overcome this problem, we propose Invertible Conditional Fast GAN (IcFGAN) based on GAN inversion and cFGAN. It allows for unpaired photo-to-manga face translation. Experimental results show that our method could translate styles under significant style gaps, while the state-of-the-art methods could hardly preserve image content.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hatakeyama, Taro and Saito, Ryusuke and Hiruta, Komei and Hashimoto, Atsushi and Kurihara, Satoshi}, year={2024}, month={Jul.}, pages={16224-16225} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26972/26744", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26972", + "pdf_size": 3724813, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13972510780590740032&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "keio.jp;keio.jp;keio.jp;keio.jp;keio.jp", + "email": "keio.jp;keio.jp;keio.jp;keio.jp;keio.jp", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+1;0", + "aff_unique_norm": "Keio University;OMRON Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.keio.ac.jp;https://www.omron.com/", + "aff_unique_abbr": "Keio;OMRON", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26121", + "title": "Ising-Traffic: Using Ising Machine Learning to Predict Traffic Congestion under Uncertainty", + "track": "main", + "status": "Technical", + "abstract": "This paper addresses the challenges in accurate and real-time traffic congestion prediction under uncertainty by proposing Ising-Traffic, a dual-model Ising-based traffic prediction framework that delivers higher accuracy and lower latency than SOTA solutions. While traditional solutions face the dilemma from the trade-off between algorithm complexity and computational efficiency, our Ising-based method breaks away from the trade-off leveraging the Ising model's strong expressivity and the Ising machine's strong computation power. In particular, Ising-Traffic formulates traffic prediction under uncertainty into two Ising models: Reconstruct-Ising and Predict-Ising. Reconstruct-Ising is mapped onto modern Ising machines and handles uncertainty in traffic accurately with negligible latency and energy consumption, while Predict-Ising is mapped onto traditional processors and predicts future congestion precisely with only at most 1.8% computational demands of existing solutions. Our evaluation shows Ising-Traffic delivers on average 98X speedups and 5% accuracy improvement over SOTA.", + "primary_area": "machine learning iii", + "author": "Zhenyu Pan; Anshujit Sharma; Jerry Yao-Chieh Hu; Zhuo Liu; Ang Li; Han Liu; Michael Huang; Tony Geng", + "authorids": "", + "aff": "University of Rochester; University of Rochester; Northwestern University; University of Rochester; Pacific Northwest National Laboratory; Northwestern University; University of Rochester; University of Rochester", + "bibtex": "@article{Pan_Sharma_Hu_Liu_Li_Liu_Huang_Geng_2023, title={Ising-Traffic: Using Ising Machine Learning to Predict Traffic Congestion under Uncertainty}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26121}, DOI={10.1609/aaai.v37i8.26121}, abstractNote={This paper addresses the challenges in accurate and real-time traffic congestion prediction under uncertainty by proposing Ising-Traffic, a dual-model Ising-based traffic prediction framework that delivers higher accuracy and lower latency than SOTA solutions. While traditional solutions face the dilemma from the trade-off between algorithm complexity and computational efficiency, our Ising-based method breaks away from the trade-off leveraging the Ising model\u2019s strong expressivity and the Ising machine\u2019s strong computation power. In particular, Ising-Traffic formulates traffic prediction under uncertainty into two Ising models: Reconstruct-Ising and Predict-Ising. Reconstruct-Ising is mapped onto modern Ising machines and handles uncertainty in traffic accurately with negligible latency and energy consumption, while Predict-Ising is mapped onto traditional processors and predicts future congestion precisely with only at most 1.8% computational demands of existing solutions. Our evaluation shows Ising-Traffic delivers on average 98X speedups and 5% accuracy improvement over SOTA.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Zhenyu and Sharma, Anshujit and Hu, Jerry Yao-Chieh and Liu, Zhuo and Li, Ang and Liu, Han and Huang, Michael and Geng, Tony}, year={2023}, month={Jun.}, pages={9354-9363} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26121/25893", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26121", + "pdf_size": 750133, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=537380253190789847&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "rochester.edu;rochester.edu;northwestern.edu;rochester.edu;pnnl.gov;northwestern.edu;rochester.edu;rochester.edu", + "email": "rochester.edu;rochester.edu;northwestern.edu;rochester.edu;pnnl.gov;northwestern.edu;rochester.edu;rochester.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;0;2;1;0;0", + "aff_unique_norm": "University of Rochester;Northwestern University;Pacific Northwest National Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.rochester.edu;https://www.northwestern.edu;https://www.pnnl.gov", + "aff_unique_abbr": "U of R;NU;PNNL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26216", + "title": "Isolation and Impartial Aggregation: A Paradigm of Incremental Learning without Interference", + "track": "main", + "status": "Technical", + "abstract": "This paper focuses on the prevalent stage interference and stage performance imbalance of incremental learning. To avoid obvious stage learning bottlenecks, we propose a new incremental learning framework, which leverages a series of stage-isolated classifiers to perform the learning task at each stage, without interference from others. To be concrete, to aggregate multiple stage classifiers as a uniform one impartially, we first introduce a temperature-controlled energy metric for indicating the confidence score levels of the stage classifiers. We then propose an anchor-based energy self-normalization strategy to ensure the stage classifiers work at the same energy level. Finally, we design a voting-based inference augmentation strategy for robust inference. The proposed method is rehearsal-free and can work for almost all incremental learning scenarios. We evaluate the proposed method on four large datasets. Extensive results demonstrate the superiority of the proposed method in setting up new state-of-the-art overall performance. Code is available at https://github.com/iamwangyabin/ESN.", + "primary_area": "machine learning iii", + "author": "Yabin Wang; Zhiheng Ma; Zhiwu Huang; Yaowei Wang; Zhou Su; Xiaopeng Hong", + "authorids": "", + "aff": "School of Cyber Science and Engineering, Xi\u2019an Jiaotong University + Singapore Management University; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; Singapore Management University + University of Southampton; Peng Cheng Laboratory; School of Cyber Science and Engineering, Xi\u2019an Jiaotong University; Harbin Institute of Technology + Peng Cheng Laboratory", + "bibtex": "@article{Wang_Ma_Huang_Wang_Su_Hong_2023, title={Isolation and Impartial Aggregation: A Paradigm of Incremental Learning without Interference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26216}, DOI={10.1609/aaai.v37i8.26216}, abstractNote={This paper focuses on the prevalent stage interference and stage performance imbalance of incremental learning. To avoid obvious stage learning bottlenecks, we propose a new incremental learning framework, which leverages a series of stage-isolated classifiers to perform the learning task at each stage, without interference from others. To be concrete, to aggregate multiple stage classifiers as a uniform one impartially, we first introduce a temperature-controlled energy metric for indicating the confidence score levels of the stage classifiers. We then propose an anchor-based energy self-normalization strategy to ensure the stage classifiers work at the same energy level. Finally, we design a voting-based inference augmentation strategy for robust inference. The proposed method is rehearsal-free and can work for almost all incremental learning scenarios. We evaluate the proposed method on four large datasets. Extensive results demonstrate the superiority of the proposed method in setting up new state-of-the-art overall performance. Code is available at https://github.com/iamwangyabin/ESN.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yabin and Ma, Zhiheng and Huang, Zhiwu and Wang, Yaowei and Su, Zhou and Hong, Xiaopeng}, year={2023}, month={Jun.}, pages={10209-10217} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26216/25988", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26216", + "pdf_size": 492674, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7838941781450076401&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xjtu.edu.cn;siat.ac.cn;soton.ac.uk;pcl.ac.cn;ieee.org;ieee.org", + "email": "stu.xjtu.edu.cn;siat.ac.cn;soton.ac.uk;pcl.ac.cn;ieee.org;ieee.org", + "github": "https://github.com/iamwangyabin/ESN", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;1+3;4;0;5+4", + "aff_unique_norm": "Xi'an Jiaotong University;Singapore Management University;Shenzhen Institute of Advanced Technology;University of Southampton;Peng Cheng Laboratory;Harbin Institute of Technology", + "aff_unique_dep": "School of Cyber Science and Engineering;;;;;", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.smu.edu.sg;http://www.siat.cas.cn;https://www.southampton.ac.uk;http://www.pcl.ac.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "XJTU;SMU;SIAT;Southampton;PCL;HIT", + "aff_campus_unique_index": "0;2;;0;3", + "aff_campus_unique": "Xi'an;;Shenzhen;Harbin", + "aff_country_unique_index": "0+1;0;1+2;0;0;0+0", + "aff_country_unique": "China;Singapore;United Kingdom" + }, + { + "id": "article-26124", + "title": "Isometric Manifold Learning Using Hierarchical Flow", + "track": "main", + "status": "Technical", + "abstract": "We propose the Hierarchical Flow (HF) model constrained by isometric regularizations for manifold learning that combines manifold learning goals such as dimensionality reduction, inference, sampling, projection and density estimation into one unified framework. Our proposed HF model is regularized to not only produce embeddings preserving the geometric structure of the manifold, but also project samples onto the manifold in a manner conforming to the rigorous definition of projection. Theoretical guarantees are provided for our HF model to satisfy the two desired properties. In order to detect the real dimensionality of the manifold, we also propose a two-stage dimensionality reduction algorithm, which is a time-efficient algorithm thanks to the hierarchical architecture design of our HF model. Experimental results justify our theoretical analysis, demonstrate the superiority of our dimensionality reduction algorithm in cost of training time, and verify the effect of the aforementioned properties in improving performances on downstream tasks such as anomaly detection.", + "primary_area": "machine learning iii", + "author": "Ziqi Pan; Jianfu Zhang; Li Niu; Liqing Zhang", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China", + "bibtex": "@article{Pan_Zhang_Niu_Zhang_2023, title={Isometric Manifold Learning Using Hierarchical Flow}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26124}, DOI={10.1609/aaai.v37i8.26124}, abstractNote={We propose the Hierarchical Flow (HF) model constrained by isometric regularizations for manifold learning that combines manifold learning goals such as dimensionality reduction, inference, sampling, projection and density estimation into one unified framework. Our proposed HF model is regularized to not only produce embeddings preserving the geometric structure of the manifold, but also project samples onto the manifold in a manner conforming to the rigorous definition of projection. Theoretical guarantees are provided for our HF model to satisfy the two desired properties. In order to detect the real dimensionality of the manifold, we also propose a two-stage dimensionality reduction algorithm, which is a time-efficient algorithm thanks to the hierarchical architecture design of our HF model. Experimental results justify our theoretical analysis, demonstrate the superiority of our dimensionality reduction algorithm in cost of training time, and verify the effect of the aforementioned properties in improving performances on downstream tasks such as anomaly detection.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pan, Ziqi and Zhang, Jianfu and Niu, Li and Zhang, Liqing}, year={2023}, month={Jun.}, pages={9381-9388} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26124/25896", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26124", + "pdf_size": 2169048, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LelbEci_PfEJ:scholar.google.com/&scioq=Isometric+Manifold+Learning+Using+Hierarchical+Flow&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25570", + "title": "IterDE: An Iterative Knowledge Distillation Framework for Knowledge Graph Embeddings", + "track": "main", + "status": "Technical", + "abstract": "Knowledge distillation for knowledge graph embedding (KGE) aims to reduce the KGE model size to address the challenges of storage limitations and knowledge reasoning efficiency. However, current work still suffers from the performance drops when compressing a high-dimensional original KGE model to a low-dimensional distillation KGE model. Moreover, most work focuses on the reduction of inference time but ignores the time-consuming training process of distilling KGE models. In this paper, we propose IterDE, a novel knowledge distillation framework for KGEs. First, IterDE introduces an iterative distillation way and enables a KGE model to alternately be a student model and a teacher model during the iterative distillation process. Consequently, knowledge can be transferred in a smooth manner between high-dimensional teacher models and low-dimensional student models, while preserving good KGE performances. Furthermore, in order to optimize the training process, we consider that different optimization objects between hard label loss and soft label loss can affect the efficiency of training, and then we propose a soft-label weighting dynamic adjustment mechanism that can balance the inconsistency of optimization direction between hard and soft label loss by gradually increasing the weighting of soft label loss. Our experimental results demonstrate that IterDE achieves a new state-of-the-art distillation performance for KGEs compared to strong baselines on the link prediction task. Significantly, IterDE can reduce the training time by 50% on average. Finally, more exploratory experiments show that the soft-label weighting dynamic adjustment mechanism and more fine-grained iterations can improve distillation performance.", + "primary_area": "data mining and knowledge management", + "author": "Jiajun Liu; Peng Wang; Ziyu Shang; Chenxiao Wu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University", + "bibtex": "@article{Liu_Wang_Shang_Wu_2023, title={IterDE: An Iterative Knowledge Distillation Framework for Knowledge Graph Embeddings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25570}, DOI={10.1609/aaai.v37i4.25570}, abstractNote={Knowledge distillation for knowledge graph embedding (KGE) aims to reduce the KGE model size to address the challenges of storage limitations and knowledge reasoning efficiency. However, current work still suffers from the performance drops when compressing a high-dimensional original KGE model to a low-dimensional distillation KGE model. Moreover, most work focuses on the reduction of inference time but ignores the time-consuming training process of distilling KGE models. In this paper, we propose IterDE, a novel knowledge distillation framework for KGEs. First, IterDE introduces an iterative distillation way and enables a KGE model to alternately be a student model and a teacher model during the iterative distillation process. Consequently, knowledge can be transferred in a smooth manner between high-dimensional teacher models and low-dimensional student models, while preserving good KGE performances. Furthermore, in order to optimize the training process, we consider that different optimization objects between hard label loss and soft label loss can affect the efficiency of training, and then we propose a soft-label weighting dynamic adjustment mechanism that can balance the inconsistency of optimization direction between hard and soft label loss by gradually increasing the weighting of soft label loss. Our experimental results demonstrate that IterDE achieves a new state-of-the-art distillation performance for KGEs compared to strong baselines on the link prediction task. Significantly, IterDE can reduce the training time by 50% on average. Finally, more exploratory experiments show that the soft-label weighting dynamic adjustment mechanism and more fine-grained iterations can improve distillation performance.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jiajun and Wang, Peng and Shang, Ziyu and Wu, Chenxiao}, year={2023}, month={Jun.}, pages={4488-4496} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25570/25342", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25570", + "pdf_size": 252754, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6853957499431888490&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26744", + "title": "Iteratively Enhanced Semidefinite Relaxations for Efficient Neural Network Verification", + "track": "aaai special track", + "status": "Technical", + "abstract": "We propose an enhanced semidefinite program (SDP) relaxation to enable the tight and efficient verification of neural networks (NNs). The tightness improvement is achieved by introducing a nonlinear constraint to existing SDP relaxations previously proposed for NN verification. The efficiency of the proposal stems from the iterative nature of the proposed algorithm in that it solves the resulting non-convex SDP by recursively solving auxiliary convex layer-based SDP problems. We show formally that the solution generated by our algorithm is tighter than state-of-the-art SDP-based solutions for the problem. We also show that the solution sequence converges to the optimal solution of the non-convex enhanced SDP relaxation. The experimental results on standard benchmarks in the area show that our algorithm achieves the state-of-the-art performance whilst maintaining an acceptable computational cost.", + "primary_area": "safe and robust ai", + "author": "Jianglin Lan; Yang Zheng; Alessio Lomuscio", + "authorids": "", + "aff": "James Watt School of Engineering, University of Glasgow, UK; Department of Electrical and Computer Engineering, University of California San Diego, USA; Department of Computing, Imperial College London, UK", + "bibtex": "@article{Lan_Zheng_Lomuscio_2023, title={Iteratively Enhanced Semidefinite Relaxations for Efficient Neural Network Verification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26744}, DOI={10.1609/aaai.v37i12.26744}, abstractNote={We propose an enhanced semidefinite program (SDP) relaxation to enable the tight and efficient verification of neural networks (NNs). The tightness improvement is achieved by introducing a nonlinear constraint to existing SDP relaxations previously proposed for NN verification. The efficiency of the proposal stems from the iterative nature of the proposed algorithm in that it solves the resulting non-convex SDP by recursively solving auxiliary convex layer-based SDP problems. We show formally that the solution generated by our algorithm is tighter than state-of-the-art SDP-based solutions for the problem. We also show that the solution sequence converges to the optimal solution of the non-convex enhanced SDP relaxation. The experimental results on standard benchmarks in the area show that our algorithm achieves the state-of-the-art performance whilst maintaining an acceptable computational cost.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Jianglin and Zheng, Yang and Lomuscio, Alessio}, year={2023}, month={Jun.}, pages={14937-14945} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26744/26516", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26744", + "pdf_size": 288053, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13059351850778114221&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "glasgow.ac.uk;eng.ucsd.edu;imperial.ac.uk", + "email": "glasgow.ac.uk;eng.ucsd.edu;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Glasgow;University of California San Diego;Imperial College London", + "aff_unique_dep": "James Watt School of Engineering;Department of Electrical and Computer Engineering;Department of Computing", + "aff_unique_url": "https://www.gla.ac.uk;https://www.ucsd.edu;https://www.imperial.ac.uk", + "aff_unique_abbr": "UoG;UCSD;Imperial", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "Glasgow;San Diego;London", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-26019", + "title": "I\u2019m Me, We\u2019re Us, and I\u2019m Us: Tri-directional Contrastive Learning on Hypergraphs", + "track": "main", + "status": "Technical", + "abstract": "Although machine learning on hypergraphs has attracted considerable attention, most of the works have focused on (semi-)supervised learning, which may cause heavy labeling costs and poor generalization. Recently, contrastive learning has emerged as a successful unsupervised representation learning method. Despite the prosperous development of contrastive learning in other domains, contrastive learning on hypergraphs remains little explored. In this paper, we propose TriCL (Tri-directional Contrastive Learning), a general framework for contrastive learning on hypergraphs. Its main idea is tri-directional contrast, and specifically, it aims to maximize in two augmented views the agreement (a) between the same node, (b) between the same group of nodes, and (c) between each group and its members. Together with simple but surprisingly effective data augmentation and negative sampling schemes, these three forms of contrast enable TriCL to capture both node- and group-level structural information in node embeddings. Our extensive experiments using 14 baseline approaches, 10 datasets, and two tasks demonstrate the effectiveness of TriCL, and most noticeably, TriCL almost consistently outperforms not just unsupervised competitors but also (semi-)supervised competitors mostly by significant margins for node classification. The code and datasets are available at https://github.com/wooner49/TriCL.", + "primary_area": "machine learning ii", + "author": "Dongjin Lee; Kijung Shin", + "authorids": "", + "aff": "School of Electrical Engineering, KAIST, South Korea + Kim Jaechul Graduate School of AI, KAIST, South Korea; School of Electrical Engineering, KAIST, South Korea + Kim Jaechul Graduate School of AI, KAIST, South Korea", + "bibtex": "@article{Lee_Shin_2023, title={I\u2019m Me, We\u2019re Us, and I\u2019m Us: Tri-directional Contrastive Learning on Hypergraphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26019}, DOI={10.1609/aaai.v37i7.26019}, abstractNote={Although machine learning on hypergraphs has attracted considerable attention, most of the works have focused on (semi-)supervised learning, which may cause heavy labeling costs and poor generalization. Recently, contrastive learning has emerged as a successful unsupervised representation learning method. Despite the prosperous development of contrastive learning in other domains, contrastive learning on hypergraphs remains little explored. In this paper, we propose TriCL (Tri-directional Contrastive Learning), a general framework for contrastive learning on hypergraphs. Its main idea is tri-directional contrast, and specifically, it aims to maximize in two augmented views the agreement (a) between the same node, (b) between the same group of nodes, and (c) between each group and its members. Together with simple but surprisingly effective data augmentation and negative sampling schemes, these three forms of contrast enable TriCL to capture both node- and group-level structural information in node embeddings. Our extensive experiments using 14 baseline approaches, 10 datasets, and two tasks demonstrate the effectiveness of TriCL, and most noticeably, TriCL almost consistently outperforms not just unsupervised competitors but also (semi-)supervised competitors mostly by significant margins for node classification. The code and datasets are available at https://github.com/wooner49/TriCL.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Dongjin and Shin, Kijung}, year={2023}, month={Jun.}, pages={8456-8464} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26019/25791", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26019", + "pdf_size": 593227, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5661547910317412497&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "https://github.com/wooner49/TriCL", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "School of Electrical Engineering", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25314", + "title": "JR2Net: Joint Monocular 3D Face Reconstruction and Reenactment", + "track": "main", + "status": "Technical", + "abstract": "Face reenactment and reconstruction benefit various applications in self-media, VR, etc. Recent face reenactment methods use 2D facial landmarks to implicitly retarget facial expressions and poses from driving videos to source images,\nwhile they suffer from pose and expression preservation issues for cross-identity scenarios, i.e., when the source and the driving subjects are different. Current self-supervised face reconstruction methods also demonstrate impressive results.\nHowever, these methods do not handle large expressions well, since their training data lacks samples of large expressions, and 2D facial attributes are inaccurate on such samples. To mitigate the above problems, we propose to explore the inner connection between the two tasks, i.e., using face reconstruction to provide \nsufficient 3D information for reenactment, and synthesizing videos paired with captured face model parameters through face reenactment to enhance the\nexpression module of face reconstruction. In particular, we propose a novel cascade framework named JR2Net for Joint Face Reconstruction and Reenactment, which begins with the training of a coarse reconstruction network, followed by a 3D-aware face reenactment network based on the coarse reconstruction results. In the end, we train an expression tracking network based on our synthesized videos composed by image-face model parameter pairs. Such an expression tracking network can further enhance the coarse face reconstruction. Extensive experiments show that our JR2Net outperforms the state-of-the-art methods on several face reconstruction and reenactment benchmarks.", + "primary_area": "computer vision ii", + "author": "Jiaxiang Shang; Yu Zeng; Xin Qiao; Xin Wang; Runze Zhang; Guangyuan Sun; Vishal Patel; Hongbo Fu", + "authorids": "", + "aff": "Hong Kong University of Science and Technology; Johns Hopkins University; Tencent; Tencent; Tencent; Tencent; Johns Hopkins University; City University of Hong Kong", + "bibtex": "@article{Shang_Zeng_Qiao_Wang_Zhang_Sun_Patel_Fu_2023, title={JR2Net: Joint Monocular 3D Face Reconstruction and Reenactment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25314}, DOI={10.1609/aaai.v37i2.25314}, abstractNote={Face reenactment and reconstruction benefit various applications in self-media, VR, etc. Recent face reenactment methods use 2D facial landmarks to implicitly retarget facial expressions and poses from driving videos to source images,\nwhile they suffer from pose and expression preservation issues for cross-identity scenarios, i.e., when the source and the driving subjects are different. Current self-supervised face reconstruction methods also demonstrate impressive results.\nHowever, these methods do not handle large expressions well, since their training data lacks samples of large expressions, and 2D facial attributes are inaccurate on such samples. To mitigate the above problems, we propose to explore the inner connection between the two tasks, i.e., using face reconstruction to provide sufficient 3D information for reenactment, and synthesizing videos paired with captured face model parameters through face reenactment to enhance the\nexpression module of face reconstruction. In particular, we propose a novel cascade framework named JR2Net for Joint Face Reconstruction and Reenactment, which begins with the training of a coarse reconstruction network, followed by a 3D-aware face reenactment network based on the coarse reconstruction results. In the end, we train an expression tracking network based on our synthesized videos composed by image-face model parameter pairs. Such an expression tracking network can further enhance the coarse face reconstruction. Extensive experiments show that our JR2Net outperforms the state-of-the-art methods on several face reconstruction and reenactment benchmarks.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shang, Jiaxiang and Zeng, Yu and Qiao, Xin and Wang, Xin and Zhang, Runze and Sun, Guangyuan and Patel, Vishal and Fu, Hongbo}, year={2023}, month={Jun.}, pages={2200-2208} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25314/25086", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25314", + "pdf_size": 1271041, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8464801226099025886&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "cse.ust.hk;qq.com;tencent.com;tencent.com;tencent.com;tencent.com;jhu.edu;cityu.edu.hk", + "email": "cse.ust.hk;qq.com;tencent.com;tencent.com;tencent.com;tencent.com;jhu.edu;cityu.edu.hk", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;2;2;2;1;3", + "aff_unique_norm": "Hong Kong University of Science and Technology;Johns Hopkins University;Tencent Holdings Limited;City University of Hong Kong", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ust.hk;https://www.jhu.edu;https://www.tencent.com;https://www.cityu.edu.hk", + "aff_unique_abbr": "HKUST;JHU;Tencent;CityU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26309", + "title": "Joint Multimodal Entity-Relation Extraction Based on Edge-Enhanced Graph Alignment Network and Word-Pair Relation Tagging", + "track": "main", + "status": "Technical", + "abstract": "Multimodal named entity recognition (MNER) and multimodal relation extraction (MRE) are two fundamental subtasks in the multimodal knowledge graph construction task. However, the existing methods usually handle two tasks independently, which ignores the bidirectional interaction between them. This paper is the first to propose jointly performing MNER and MRE as a joint multimodal entity-relation extraction (JMERE) task .\nBesides, the current MNER and MRE models only consider aligning the visual objects with textual entities in visual and textual graphs but ignore the entity-entity relationships and object-object relationships. To address the above challenges, we propose an edge-enhanced graph alignment network and a word-pair relation tagging (EEGA) for the JMERE task. Specifically, we first design a word-pair relation tagging to exploit the bidirectional interaction between MNER and MRE and avoid error propagation. Then, we propose an edge-enhanced graph alignment network to enhance the JMERE task by aligning nodes and edges in the cross-graph. Compared with previous methods, the proposed method can leverage the edge information to auxiliary alignment between objects and entities and find the correlations between entity-entity relationships and object-object relationships. Experiments are conducted to show the effectiveness of our model.", + "primary_area": "machine learning iv", + "author": "Li Yuan; Yi Cai; Jin Wang; Qing Li", + "authorids": "", + "aff": "School of Software Engineering, South China University of Technology, Guangzhou, China + Key Laboratory of Big Data and Intelligent Robot (SCUT), MOE of China + The Peng Cheng Laboratory, Shenzhen, China; School of Software Engineering, South China University of Technology, Guangzhou, China + Key Laboratory of Big Data and Intelligent Robot (SCUT), MOE of China + The Peng Cheng Laboratory, Shenzhen, China; School of Information Science and Engineering, Yunnan University, Yunnan, P.R. China; Department of Computing, The Hong Kong Polytechnic University, Hong Kong, China", + "bibtex": "@article{Yuan_Cai_Wang_Li_2023, title={Joint Multimodal Entity-Relation Extraction Based on Edge-Enhanced Graph Alignment Network and Word-Pair Relation Tagging}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26309}, DOI={10.1609/aaai.v37i9.26309}, abstractNote={Multimodal named entity recognition (MNER) and multimodal relation extraction (MRE) are two fundamental subtasks in the multimodal knowledge graph construction task. However, the existing methods usually handle two tasks independently, which ignores the bidirectional interaction between them. This paper is the first to propose jointly performing MNER and MRE as a joint multimodal entity-relation extraction (JMERE) task .\nBesides, the current MNER and MRE models only consider aligning the visual objects with textual entities in visual and textual graphs but ignore the entity-entity relationships and object-object relationships. To address the above challenges, we propose an edge-enhanced graph alignment network and a word-pair relation tagging (EEGA) for the JMERE task. Specifically, we first design a word-pair relation tagging to exploit the bidirectional interaction between MNER and MRE and avoid error propagation. Then, we propose an edge-enhanced graph alignment network to enhance the JMERE task by aligning nodes and edges in the cross-graph. Compared with previous methods, the proposed method can leverage the edge information to auxiliary alignment between objects and entities and find the correlations between entity-entity relationships and object-object relationships. Experiments are conducted to show the effectiveness of our model.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Li and Cai, Yi and Wang, Jin and Li, Qing}, year={2023}, month={Jun.}, pages={11051-11059} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26309/26081", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26309", + "pdf_size": 871641, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14304911177869380836&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.scut.edu.cn;scut.edu.cn;ynu.edu.cn;polyu.edu.hk", + "email": "mail.scut.edu.cn;scut.edu.cn;ynu.edu.cn;polyu.edu.hk", + "github": "https://github.com/YuanLi95/EEGA-for-JMERE", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+1;0+0+1;2;3", + "aff_unique_norm": "South China University of Technology;Peng Cheng Laboratory;Yunnan University;The Hong Kong Polytechnic University", + "aff_unique_dep": "School of Software Engineering;;School of Information Science and Engineering;Department of Computing", + "aff_unique_url": "https://www.scut.edu.cn;;http://www.ynu.edu.cn;https://www.polyu.edu.hk", + "aff_unique_abbr": "SCUT;;YNU;PolyU", + "aff_campus_unique_index": "0+2;0+2;3;4", + "aff_campus_unique": "Guangzhou;;Shenzhen;Yunnan;Hong Kong", + "aff_country_unique_index": "0+0+0;0+0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26687", + "title": "Joint Self-Supervised Image-Volume Representation Learning with Intra-inter Contrastive Clustering", + "track": "aaai special track", + "status": "Technical", + "abstract": "Collecting large-scale medical datasets with fully annotated samples for training of deep networks is prohibitively expensive, especially for 3D volume data. Recent breakthroughs in self-supervised learning (SSL) offer the ability to overcome the lack of labeled training samples by learning feature representations from unlabeled data. However, most current SSL techniques in the medical field have been designed for either 2D images or 3D volumes. In practice, this restricts the capability to fully leverage unlabeled data from numerous sources, which may include both 2D and 3D data. Additionally, the use of these pre-trained networks is constrained to downstream tasks with compatible data dimensions.\nIn this paper, we propose a novel framework for unsupervised joint learning on 2D and 3D data modalities. Given a set of 2D images or 2D slices extracted from 3D volumes, we construct an SSL task based on a 2D contrastive clustering problem for distinct classes. The 3D volumes are exploited by computing vectored embedding at each slice and then assembling a holistic feature through deformable self-attention mechanisms in Transformer, allowing incorporating long-range dependencies between slices inside 3D volumes. These holistic features are further utilized to define a novel 3D clustering agreement-based SSL task and masking embedding prediction inspired by pre-trained language models. Experiments on downstream tasks, such as 3D brain segmentation, lung nodule detection, 3D heart structures segmentation, and abnormal chest X-ray detection, demonstrate the effectiveness of our joint 2D and 3D SSL approach. We improve plain 2D Deep-ClusterV2 and SwAV by a significant margin and also surpass various modern 2D and 3D SSL approaches.", + "primary_area": "ai for social impact", + "author": "Duy M. H. Nguyen; Hoang Nguyen; Truong T. N. Mai; Tri Cao; Binh T. Nguyen; Nhat Ho; Paul Swoboda; Shadi Albarqouni; Pengtao Xie; Daniel Sonntag", + "authorids": "", + "aff": "Department of Computer Science, University of Stuttgart, Germany+German Research Center for Artificial Intelligence, Germany; AISIA Lab, University of Science - VNU HCM, Vietnam; Department of Multimedia Engineering, Dongguk University, South Korea; AISIA Lab, University of Science - VNU HCM, Vietnam; AISIA Lab, University of Science - VNU HCM, Vietnam; Department of Statistics and Data Sciences, University of Texas at Austin, United States; Max Planck Institute for Informatics, Germany; Helmholtz AI, Helmholtz Munich, Germany+Clinic for Diagnostic and Interventional Radiology, University of Bonn, Germany; Department of Electrical and Computer Engineering, University of California San Diego, United States; Department of Computer Science, Oldenburg University, Germany+German Research Center for Artificial Intelligence, Germany", + "bibtex": "@article{Nguyen_Nguyen_Mai_Cao_Nguyen_Ho_Swoboda_Albarqouni_Xie_Sonntag_2023, title={Joint Self-Supervised Image-Volume Representation Learning with Intra-inter Contrastive Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26687}, DOI={10.1609/aaai.v37i12.26687}, abstractNote={Collecting large-scale medical datasets with fully annotated samples for training of deep networks is prohibitively expensive, especially for 3D volume data. Recent breakthroughs in self-supervised learning (SSL) offer the ability to overcome the lack of labeled training samples by learning feature representations from unlabeled data. However, most current SSL techniques in the medical field have been designed for either 2D images or 3D volumes. In practice, this restricts the capability to fully leverage unlabeled data from numerous sources, which may include both 2D and 3D data. Additionally, the use of these pre-trained networks is constrained to downstream tasks with compatible data dimensions.\nIn this paper, we propose a novel framework for unsupervised joint learning on 2D and 3D data modalities. Given a set of 2D images or 2D slices extracted from 3D volumes, we construct an SSL task based on a 2D contrastive clustering problem for distinct classes. The 3D volumes are exploited by computing vectored embedding at each slice and then assembling a holistic feature through deformable self-attention mechanisms in Transformer, allowing incorporating long-range dependencies between slices inside 3D volumes. These holistic features are further utilized to define a novel 3D clustering agreement-based SSL task and masking embedding prediction inspired by pre-trained language models. Experiments on downstream tasks, such as 3D brain segmentation, lung nodule detection, 3D heart structures segmentation, and abnormal chest X-ray detection, demonstrate the effectiveness of our joint 2D and 3D SSL approach. We improve plain 2D Deep-ClusterV2 and SwAV by a significant margin and also surpass various modern 2D and 3D SSL approaches.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Duy M. H. and Nguyen, Hoang and Mai, Truong T. N. and Cao, Tri and Nguyen, Binh T. and Ho, Nhat and Swoboda, Paul and Albarqouni, Shadi and Xie, Pengtao and Sonntag, Daniel}, year={2023}, month={Jun.}, pages={14426-14435} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26687/26459", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26687", + "pdf_size": 1664183, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2992479361614785751&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "dfki.de; ; ; ; ; ; ; ; ; ", + "email": "dfki.de; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1;2;3;2;2;4;5;6+7;8;9+1", + "aff_unique_norm": "University of Stuttgart;German Research Center for Artificial Intelligence;University of Science - VNU HCM;Dongguk University;University of Texas at Austin;Max Planck Institute for Informatics;Helmholtz Munich;University of Bonn;University of California San Diego;Oldenburg University", + "aff_unique_dep": "Department of Computer Science;;AISIA Lab;Department of Multimedia Engineering;Department of Statistics and Data Sciences;;Helmholtz AI;Clinic for Diagnostic and Interventional Radiology;Department of Electrical and Computer Engineering;Department of Computer Science", + "aff_unique_url": "https://www.uni-stuttgart.de;https://www.dfki.de/;;https://www.dongguk.edu;https://www.utexas.edu;https://mpi-inf.mpg.de;https://www.helmholtz-munich.de;https://www.uni-bonn.de;https://ucsd.edu;https://www.uni-oldenburg.de", + "aff_unique_abbr": "Uni Stuttgart;DFKI;;Dongguk;UT Austin;MPII;Helmholtz Munich;;UCSD;", + "aff_campus_unique_index": ";1;;2;", + "aff_campus_unique": ";Austin;San Diego", + "aff_country_unique_index": "0+0;1;2;1;1;3;0;0+0;3;0+0", + "aff_country_unique": "Germany;Vietnam;South Korea;United States" + }, + { + "id": "article-25599", + "title": "Jointly Imputing Multi-View Data with Optimal Transport", + "track": "main", + "status": "Technical", + "abstract": "The multi-view data with incomplete information hinder the effective data analysis. Existing multi-view imputation methods that learn the mapping between complete view and completely missing view are not able to deal with the common multi-view data with missing feature information. In this paper, we propose a generative imputation model named Git with optimal transport theory to jointly impute the missing features/values, conditional on all observed values from the multi-view data. Git consists of two modules, i.e., a multi-view joint generator (MJG) and a masking energy discriminator (MED). The generator MJG incorporates a joint autoencoder with the multiple imputation rule to learn the data distribution from all observed multi-view data. The discriminator MED leverages a new masking energy divergence function to make Git differentiable for imputation enhancement. Extensive experiments on several real-world multi-view data sets demonstrate that, Git yields over 35% accuracy gain, compared to the state-of-the-art approaches.", + "primary_area": "data mining and knowledge management", + "author": "Yangyang Wu; Xiaoye Miao; Xinyu Huang; Jianwei Yin", + "authorids": "", + "aff": "Center for Data Science, Zhejiang University, Hangzhou, China+College of Computer Science, Zhejiang University, Hangzhou, China; Center for Data Science, Zhejiang University, Hangzhou, China; Data Science Institute, Columbia University, New York, USA; Center for Data Science, Zhejiang University, Hangzhou, China+College of Computer Science, Zhejiang University, Hangzhou, China", + "bibtex": "@article{Wu_Miao_Huang_Yin_2023, title={Jointly Imputing Multi-View Data with Optimal Transport}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25599}, DOI={10.1609/aaai.v37i4.25599}, abstractNote={The multi-view data with incomplete information hinder the effective data analysis. Existing multi-view imputation methods that learn the mapping between complete view and completely missing view are not able to deal with the common multi-view data with missing feature information. In this paper, we propose a generative imputation model named Git with optimal transport theory to jointly impute the missing features/values, conditional on all observed values from the multi-view data. Git consists of two modules, i.e., a multi-view joint generator (MJG) and a masking energy discriminator (MED). The generator MJG incorporates a joint autoencoder with the multiple imputation rule to learn the data distribution from all observed multi-view data. The discriminator MED leverages a new masking energy divergence function to make Git differentiable for imputation enhancement. Extensive experiments on several real-world multi-view data sets demonstrate that, Git yields over 35% accuracy gain, compared to the state-of-the-art approaches.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yangyang and Miao, Xiaoye and Huang, Xinyu and Yin, Jianwei}, year={2023}, month={Jun.}, pages={4747-4755} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25599/25371", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25599", + "pdf_size": 637657, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6255040827058511244&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;columbia.edu;cs.zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;columbia.edu;cs.zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0;1;0+0", + "aff_unique_norm": "Zhejiang University;Columbia University", + "aff_unique_dep": "Center for Data Science;Data Science Institute", + "aff_unique_url": "http://www.zju.edu.cn;https://www.columbia.edu", + "aff_unique_abbr": "ZJU;Columbia", + "aff_campus_unique_index": "0+0;0;1;0+0", + "aff_campus_unique": "Hangzhou;New York", + "aff_country_unique_index": "0+0;0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25399", + "title": "Just Noticeable Visual Redundancy Forecasting: A Deep Multimodal-Driven Approach", + "track": "main", + "status": "Technical", + "abstract": "Just noticeable difference (JND) refers to the maximum visual change that human eyes cannot perceive, and it has a wide range of applications in multimedia systems. However, most existing JND approaches only focus on a single modality, and rarely consider the complementary effects of multimodal information. In this article, we investigate the JND modeling from an end-to-end homologous multimodal perspective, namely hmJND-Net. Specifically, we explore three important visually sensitive modalities, including saliency, depth, and segmentation. To better utilize homologous multimodal information, we establish an effective fusion method via summation enhancement and subtractive offset, and align homologous multimodal features based on a self-attention driven encoder-decoder paradigm. Extensive experimental results on eight different benchmark datasets validate the superiority of our hmJND-Net over eight representative methods.", + "primary_area": "computer vision iii", + "author": "Wuyuan Xie; Shukang Wang; Sukun Tian; Lirong Huang; Ye Liu; Miaohui Wang", + "authorids": "", + "aff": "Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen University; Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen University; Peking University; Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen University; Nanjing University of Posts and Telecommunications; Guangdong Key Laboratory of Intelligent Information Processing, Shenzhen University", + "bibtex": "@article{Xie_Wang_Tian_Huang_Liu_Wang_2023, title={Just Noticeable Visual Redundancy Forecasting: A Deep Multimodal-Driven Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25399}, DOI={10.1609/aaai.v37i3.25399}, abstractNote={Just noticeable difference (JND) refers to the maximum visual change that human eyes cannot perceive, and it has a wide range of applications in multimedia systems. However, most existing JND approaches only focus on a single modality, and rarely consider the complementary effects of multimodal information. In this article, we investigate the JND modeling from an end-to-end homologous multimodal perspective, namely hmJND-Net. Specifically, we explore three important visually sensitive modalities, including saliency, depth, and segmentation. To better utilize homologous multimodal information, we establish an effective fusion method via summation enhancement and subtractive offset, and align homologous multimodal features based on a self-attention driven encoder-decoder paradigm. Extensive experimental results on eight different benchmark datasets validate the superiority of our hmJND-Net over eight representative methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Wuyuan and Wang, Shukang and Tian, Sukun and Huang, Lirong and Liu, Ye and Wang, Miaohui}, year={2023}, month={Jun.}, pages={2965-2973} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25399/25171", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25399", + "pdf_size": 13578175, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5268403046532235372&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;hotmail.com; ;njupt.edu.cn; ;gmail.com", + "email": "gmail.com;hotmail.com; ;njupt.edu.cn; ;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;0", + "aff_unique_norm": "Shenzhen University;Peking University;Nanjing University of Posts and Telecommunications", + "aff_unique_dep": "Guangdong Key Laboratory of Intelligent Information Processing;;", + "aff_unique_url": "https://www.szu.edu.cn;http://www.pku.edu.cn;http://www.njupt.edu.cn", + "aff_unique_abbr": "SZU;Peking U;NJUPT", + "aff_campus_unique_index": "0;0;0;2;0", + "aff_campus_unique": "Shenzhen;;Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26565", + "title": "KICE: A Knowledge Consolidation and Expansion Framework for Relation Extraction", + "track": "main", + "status": "Technical", + "abstract": "Machine Learning is often challenged by insufficient labeled data. Previous methods employing implicit commonsense knowledge of pre-trained language models (PLMs) or pattern-based symbolic knowledge have achieved great success in mitigating manual annotation efforts. In this paper, we focus on the collaboration among different knowledge sources and present KICE, a Knowledge-evolving framework by Iterative Consolidation and Expansion with the guidance of PLMs and rule-based patterns. Specifically, starting with limited labeled data as seeds, KICE first builds a Rule Generator by prompt-tuning to stimulate the rich knowledge distributed in PLMs, generate seed rules, and initialize the rules set. Afterwards, based on the rule-labeled data, the task model is trained in a self-training pipeline where the knowledge in rules set is consolidated with self-learned high-confidence rules. Finally, for the low-confidence rules, KICE solicits human-enlightened understanding and expands the knowledge coverage for better task model training. Our framework is verified on relation extraction (RE) task, and the experiments on TACRED show that the model performance (F1) grows from 33.24% to 79.84% with the enrichment of knowledge, outperforming all the baselines including other knowledgeable methods.", + "primary_area": "speech natural language processing", + "author": "Yilin Lu; Xiaoqiang Wang; Haofeng Yang; Siliang Tang", + "authorids": "", + "aff": "School of Computer Science, Zhejiang University; School of Computer Science, Zhejiang University; School of Computer Science, Zhejiang University; School of Computer Science, Zhejiang University", + "bibtex": "@article{Lu_Wang_Yang_Tang_2023, title={KICE: A Knowledge Consolidation and Expansion Framework for Relation Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26565}, DOI={10.1609/aaai.v37i11.26565}, abstractNote={Machine Learning is often challenged by insufficient labeled data. Previous methods employing implicit commonsense knowledge of pre-trained language models (PLMs) or pattern-based symbolic knowledge have achieved great success in mitigating manual annotation efforts. In this paper, we focus on the collaboration among different knowledge sources and present KICE, a Knowledge-evolving framework by Iterative Consolidation and Expansion with the guidance of PLMs and rule-based patterns. Specifically, starting with limited labeled data as seeds, KICE first builds a Rule Generator by prompt-tuning to stimulate the rich knowledge distributed in PLMs, generate seed rules, and initialize the rules set. Afterwards, based on the rule-labeled data, the task model is trained in a self-training pipeline where the knowledge in rules set is consolidated with self-learned high-confidence rules. Finally, for the low-confidence rules, KICE solicits human-enlightened understanding and expands the knowledge coverage for better task model training. Our framework is verified on relation extraction (RE) task, and the experiments on TACRED show that the model performance (F1) grows from 33.24% to 79.84% with the enrichment of knowledge, outperforming all the baselines including other knowledgeable methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Yilin and Wang, Xiaoqiang and Yang, Haofeng and Tang, Siliang}, year={2023}, month={Jun.}, pages={13336-13343} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26565/26337", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26565", + "pdf_size": 536150, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6004598884778388707&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26646", + "title": "KPT: Keyword-Guided Pre-training for Grounded Dialog Generation", + "track": "main", + "status": "Technical", + "abstract": "Incorporating external knowledge into the response generation process is essential to building more helpful and reliable dialog agents. However, collecting knowledge-grounded conversations is often costly, calling for a better pre-trained model for grounded dialog generation that generalizes well w.r.t. different types of knowledge. In this work, we propose KPT (Keyword-guided Pre-Training), a novel self-supervised pre-training method for grounded dialog generation without relying on extra knowledge annotation. Specifically, we use a pre-trained language model to extract the most uncertain tokens in the dialog as keywords. With these keywords, we construct two kinds of knowledge and pre-train a knowledge-grounded response generation model, aiming at handling two different scenarios: (1) the knowledge should be faithfully grounded; (2) it can be selectively used. For the former, the grounding knowledge consists of keywords extracted from the response. For the latter, the grounding knowledge is additionally augmented with keywords extracted from other utterances in the same dialog. Since the knowledge is extracted from the dialog itself, KPT can be easily performed on a large volume and variety of dialogue data. We considered three data sources (open-domain, task-oriented, conversational QA) with a total of 2.5M dialogues. We conduct extensive experiments on various few-shot knowledge-grounded generation tasks, including grounding on dialog acts, knowledge graphs, persona descriptions, and Wikipedia passages. Our comprehensive experiments and analyses demonstrate that KPT consistently outperforms state-of-the-art methods on these tasks with diverse grounding knowledge.", + "primary_area": "speech natural language processing", + "author": "Qi Zhu; Fei Mi; Zheng Zhang; Yasheng Wang; Yitong Li; Xin Jiang; Qun Liu; Xiaoyan Zhu; Minlie Huang", + "authorids": "", + "aff": "CoAI Group, DCST, IAI, BNRIST, Tsinghua University; Huawei Noah\u2019s Ark Lab; CoAI Group, DCST, IAI, BNRIST, Tsinghua University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; CoAI Group, DCST, IAI, BNRIST, Tsinghua University; CoAI Group, DCST, IAI, BNRIST, Tsinghua University", + "bibtex": "@article{Zhu_Mi_Zhang_Wang_Li_Jiang_Liu_Zhu_Huang_2023, title={KPT: Keyword-Guided Pre-training for Grounded Dialog Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26646}, DOI={10.1609/aaai.v37i11.26646}, abstractNote={Incorporating external knowledge into the response generation process is essential to building more helpful and reliable dialog agents. However, collecting knowledge-grounded conversations is often costly, calling for a better pre-trained model for grounded dialog generation that generalizes well w.r.t. different types of knowledge. In this work, we propose KPT (Keyword-guided Pre-Training), a novel self-supervised pre-training method for grounded dialog generation without relying on extra knowledge annotation. Specifically, we use a pre-trained language model to extract the most uncertain tokens in the dialog as keywords. With these keywords, we construct two kinds of knowledge and pre-train a knowledge-grounded response generation model, aiming at handling two different scenarios: (1) the knowledge should be faithfully grounded; (2) it can be selectively used. For the former, the grounding knowledge consists of keywords extracted from the response. For the latter, the grounding knowledge is additionally augmented with keywords extracted from other utterances in the same dialog. Since the knowledge is extracted from the dialog itself, KPT can be easily performed on a large volume and variety of dialogue data. We considered three data sources (open-domain, task-oriented, conversational QA) with a total of 2.5M dialogues. We conduct extensive experiments on various few-shot knowledge-grounded generation tasks, including grounding on dialog acts, knowledge graphs, persona descriptions, and Wikipedia passages. Our comprehensive experiments and analyses demonstrate that KPT consistently outperforms state-of-the-art methods on these tasks with diverse grounding knowledge.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Qi and Mi, Fei and Zhang, Zheng and Wang, Yasheng and Li, Yitong and Jiang, Xin and Liu, Qun and Zhu, Xiaoyan and Huang, Minlie}, year={2023}, month={Jun.}, pages={14065-14073} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26646/26418", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26646", + "pdf_size": 296225, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3479273745922883172&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;huawei.com;tsinghua.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;huawei.com;tsinghua.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;1;1;1;1;0;0", + "aff_unique_norm": "Tsinghua University;Huawei", + "aff_unique_dep": "CoAI Group, DCST, IAI, BNRIST;Noah\u2019s Ark Lab", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "THU;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25101", + "title": "KT-Net: Knowledge Transfer for Unpaired 3D Shape Completion", + "track": "main", + "status": "Technical", + "abstract": "Unpaired 3D object completion aims to predict a complete 3D shape from an incomplete input without knowing the correspondence between the complete and incomplete shapes. In this paper, we propose the novel KTNet to solve this task from the new perspective of knowledge transfer. KTNet elaborates a teacher-assistant-student network to establish multiple knowledge transfer processes. Specifically, the teacher network takes complete shape as input and learns the knowledge of complete shape. The student network takes the incomplete one as input and restores the corresponding complete shape. And the assistant modules not only help to transfer the knowledge of complete shape from the teacher to the student, but also judge the learning effect of the student network. As a result, KTNet makes use of a more comprehensive understanding to establish the geometric correspondence between complete and incomplete shapes in a perspective of knowledge transfer, which enables more detailed geometric inference for generating high-quality complete shapes. We conduct comprehensive experiments on several datasets, and the results show that our method outperforms previous methods of unpaired point cloud completion by a large margin. Code is available at https://github.com/a4152684/KT-Net.", + "primary_area": "computer vision i", + "author": "Zhen Cao; Wenxiao Zhang; Xin Wen; Zhen Dong; Yu-Shen Liu; Xiongwu Xiao; Bisheng Yang", + "authorids": "", + "aff": "Wuhan University; Singapore University of Technology and Design; JD Logistics; Wuhan University; Tsinghua University; Wuhan University; Wuhan University", + "bibtex": "@article{Cao_Zhang_Wen_Dong_Liu_Xiao_Yang_2023, title={KT-Net: Knowledge Transfer for Unpaired 3D Shape Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25101}, DOI={10.1609/aaai.v37i1.25101}, abstractNote={Unpaired 3D object completion aims to predict a complete 3D shape from an incomplete input without knowing the correspondence between the complete and incomplete shapes. In this paper, we propose the novel KTNet to solve this task from the new perspective of knowledge transfer. KTNet elaborates a teacher-assistant-student network to establish multiple knowledge transfer processes. Specifically, the teacher network takes complete shape as input and learns the knowledge of complete shape. The student network takes the incomplete one as input and restores the corresponding complete shape. And the assistant modules not only help to transfer the knowledge of complete shape from the teacher to the student, but also judge the learning effect of the student network. As a result, KTNet makes use of a more comprehensive understanding to establish the geometric correspondence between complete and incomplete shapes in a perspective of knowledge transfer, which enables more detailed geometric inference for generating high-quality complete shapes. We conduct comprehensive experiments on several datasets, and the results show that our method outperforms previous methods of unpaired point cloud completion by a large margin. Code is available at https://github.com/a4152684/KT-Net.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Zhen and Zhang, Wenxiao and Wen, Xin and Dong, Zhen and Liu, Yu-Shen and Xiao, Xiongwu and Yang, Bisheng}, year={2023}, month={Jun.}, pages={286-294} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25101/24873", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25101", + "pdf_size": 3624089, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3250896770619957327&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 6, + "aff_domain": "whu.edu.cn;gmail.com;jd.com;whu.edu.cn;tsinghua.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;gmail.com;jd.com;whu.edu.cn;tsinghua.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "https://github.com/a4152684/KT-Net", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0;3;0;0", + "aff_unique_norm": "Wuhan University;Singapore University of Technology and Design;JD Logistics;Tsinghua University", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.whu.edu.cn/;https://www.sutd.edu.sg;https://www.jd.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "WHU;SUTD;JD;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-27075", + "title": "Kajibuntan: A House Chore Division App", + "track": "demonstrations", + "status": "Technical", + "abstract": "Couples often encounter the challenge of sharing house chores. This raises the fundamental question of how to divide chores. In this paper, we present a new application for a fair division of household chores. Our platform, called Kajibuntan, allows couples to specify the set of chores to be shared, their preferences over them, and the current allocation. Our tool visualizes the current allocation and makes proposals according to their preferences based on the theory of fair division. The goal of our tool is to provide a systematic and transparent system to divide household chores and help creating harmony in the home.", + "primary_area": "", + "author": "Ayumi Igarashi; Tomohiko Yokoyama", + "authorids": "", + "aff": "The University of Tokyo; The University of Tokyo", + "bibtex": "@article{Igarashi_Yokoyama_2024, title={Kajibuntan: A House Chore Division App}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27075}, DOI={10.1609/aaai.v37i13.27075}, abstractNote={Couples often encounter the challenge of sharing house chores. This raises the fundamental question of how to divide chores. In this paper, we present a new application for a fair division of household chores. Our platform, called Kajibuntan, allows couples to specify the set of chores to be shared, their preferences over them, and the current allocation. Our tool visualizes the current allocation and makes proposals according to their preferences based on the theory of fair division. The goal of our tool is to provide a systematic and transparent system to divide household chores and help creating harmony in the home.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Igarashi, Ayumi and Yokoyama, Tomohiko}, year={2024}, month={Jul.}, pages={16449-16451} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27075/26847", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27075", + "pdf_size": 1968538, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5017653016251418078&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mist.i.u-tokyo.ac.jp;mist.i.u-tokyo.ac.jp", + "email": "mist.i.u-tokyo.ac.jp;mist.i.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Tokyo", + "aff_unique_dep": "", + "aff_unique_url": "https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "UTokyo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26200", + "title": "Kalman Bayesian Neural Networks for Closed-Form Online Learning", + "track": "main", + "status": "Technical", + "abstract": "Compared to point estimates calculated by standard neural networks, Bayesian neural networks (BNN) provide probability distributions over the output predictions and model parameters, i.e., the weights. Training the weight distribution of a BNN, however, is more involved due to the intractability of the underlying Bayesian inference problem and thus, requires efficient approximations. In this paper, we propose a novel approach for BNN learning via closed-form Bayesian inference. For this purpose, the calculation of the predictive distribution of the output and the update of the weight distribution are treated as Bayesian filtering and smoothing problems, where the weights are modeled as Gaussian random variables. This allows closed-form expressions for training the network's parameters in a sequential/online fashion without gradient descent. We demonstrate our method on several UCI datasets and compare it to the state of the art.", + "primary_area": "machine learning iii", + "author": "Philipp Wagner; Xinyang Wu; Marco F. Huber", + "authorids": "", + "aff": "Department Cyber Cognitive Intelligence (CCI), Fraunhofer Institute for Manufacturing Engineering and Automation IPA, Stuttgart, Germany; Department Cyber Cognitive Intelligence (CCI), Fraunhofer Institute for Manufacturing Engineering and Automation IPA, Stuttgart, Germany; Department Cyber Cognitive Intelligence (CCI), Fraunhofer Institute for Manufacturing Engineering and Automation IPA, Stuttgart, Germany + Institute of Industrial Manufacturing and Management IFF, University of Stuttgart, Germany", + "bibtex": "@article{Wagner_Wu_Huber_2023, title={Kalman Bayesian Neural Networks for Closed-Form Online Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26200}, DOI={10.1609/aaai.v37i8.26200}, abstractNote={Compared to point estimates calculated by standard neural networks, Bayesian neural networks (BNN) provide probability distributions over the output predictions and model parameters, i.e., the weights. Training the weight distribution of a BNN, however, is more involved due to the intractability of the underlying Bayesian inference problem and thus, requires efficient approximations. In this paper, we propose a novel approach for BNN learning via closed-form Bayesian inference. For this purpose, the calculation of the predictive distribution of the output and the update of the weight distribution are treated as Bayesian filtering and smoothing problems, where the weights are modeled as Gaussian random variables. This allows closed-form expressions for training the network\u2019s parameters in a sequential/online fashion without gradient descent. We demonstrate our method on several UCI datasets and compare it to the state of the art.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wagner, Philipp and Wu, Xinyang and Huber, Marco F.}, year={2023}, month={Jun.}, pages={10069-10077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26200/25972", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26200", + "pdf_size": 1013437, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3422455477203086867&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ipa.fraunhofer.de;ipa.fraunhofer.de;ieee.org", + "email": "ipa.fraunhofer.de;ipa.fraunhofer.de;ieee.org", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Fraunhofer Institute for Manufacturing Engineering and Automation IPA;University of Stuttgart", + "aff_unique_dep": "Department Cyber Cognitive Intelligence (CCI);Institute of Industrial Manufacturing and Management IFF", + "aff_unique_url": "https://www.ipa.fraunhofer.de;https://www.uni-stuttgart.de", + "aff_unique_abbr": "Fraunhofer IPA;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stuttgart;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25667", + "title": "KerPrint: Local-Global Knowledge Graph Enhanced Diagnosis Prediction for Retrospective and Prospective Interpretations", + "track": "main", + "status": "Technical", + "abstract": "While recent developments of deep learning models have led to record-breaking achievements in many areas, the lack of sufficient interpretation remains a problem for many specific applications, such as the diagnosis prediction task in healthcare. The previous knowledge graph(KG) enhanced approaches mainly focus on learning clinically meaningful representations, the importance of medical concepts, and even the knowledge paths from inputs to labels. However, it is infeasible to interpret the diagnosis prediction, which needs to consider different medical concepts, various medical relationships, and the time-effectiveness of knowledge triples in different patient contexts. More importantly, the retrospective and prospective interpretations of disease processes are valuable to clinicians for the patients' confounding diseases. We propose KerPrint, a novel KG enhanced approach for retrospective and prospective interpretations to tackle these problems. Specifically, we propose a time-aware KG attention method to solve the problem of knowledge decay over time for trustworthy retrospective interpretation. We also propose a novel element-wise attention method to select candidate global knowledge using comprehensive representations from the local KG for prospective interpretation. We validate the effectiveness of our KerPrint through an extensive experimental study on a real-world dataset and a public dataset. The results show that our proposed approach not only achieves significant improvement over knowledge-enhanced methods but also gives the interpretability of diagnosis prediction in both retrospective and prospective views.", + "primary_area": "domain s of application", + "author": "Kai Yang; Yongxin Xu; Peinie Zou; Hongxin Ding; Junfeng Zhao; Yasha Wang; Bing Xie", + "authorids": "", + "aff": "Zhongguancun Laboratory, Beijing 100094, China; Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+School of Computer Science, Peking University, Beijing 100871, China; Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+School of Computer Science, Peking University, Beijing 100871, China; Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+School of Computer Science, Peking University, Beijing 100871, China; Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+School of Computer Science, Peking University, Beijing 100871, China+Peking University Information Technology Institute (Tianjin Binhai), Tianjin 300450, China; National Engineering Research Center For Software Engineering, Peking University, Beijing 100871, China+Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+Peking University Information Technology Institute (Tianjin Binhai), Tianjin 300450, China; Key Laboratory of High Confidence Software Technologies, Ministry of Education, Beijing 100871, China+School of Computer Science, Peking University, Beijing 100871, China+Peking University Information Technology Institute (Tianjin Binhai), Tianjin 300450, China", + "bibtex": "@article{Yang_Xu_Zou_Ding_Zhao_Wang_Xie_2023, title={KerPrint: Local-Global Knowledge Graph Enhanced Diagnosis Prediction for Retrospective and Prospective Interpretations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25667}, DOI={10.1609/aaai.v37i4.25667}, abstractNote={While recent developments of deep learning models have led to record-breaking achievements in many areas, the lack of sufficient interpretation remains a problem for many specific applications, such as the diagnosis prediction task in healthcare. The previous knowledge graph(KG) enhanced approaches mainly focus on learning clinically meaningful representations, the importance of medical concepts, and even the knowledge paths from inputs to labels. However, it is infeasible to interpret the diagnosis prediction, which needs to consider different medical concepts, various medical relationships, and the time-effectiveness of knowledge triples in different patient contexts. More importantly, the retrospective and prospective interpretations of disease processes are valuable to clinicians for the patients\u2019 confounding diseases. We propose KerPrint, a novel KG enhanced approach for retrospective and prospective interpretations to tackle these problems. Specifically, we propose a time-aware KG attention method to solve the problem of knowledge decay over time for trustworthy retrospective interpretation. We also propose a novel element-wise attention method to select candidate global knowledge using comprehensive representations from the local KG for prospective interpretation. We validate the effectiveness of our KerPrint through an extensive experimental study on a real-world dataset and a public dataset. The results show that our proposed approach not only achieves significant improvement over knowledge-enhanced methods but also gives the interpretability of diagnosis prediction in both retrospective and prospective views.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Kai and Xu, Yongxin and Zou, Peinie and Ding, Hongxin and Zhao, Junfeng and Wang, Yasha and Xie, Bing}, year={2023}, month={Jun.}, pages={5357-5365} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25667/25439", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25667", + "pdf_size": 2634091, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18005432697697531745&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.zgclab.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "mail.zgclab.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1+2;1+2;1+2;1+2+2;2+1+2;1+2+2", + "aff_unique_norm": "Zhongguancun Laboratory;Key Laboratory of High Confidence Software Technologies;Peking University", + "aff_unique_dep": ";Ministry of Education;School of Computer Science", + "aff_unique_url": ";;http://www.pku.edu.cn", + "aff_unique_abbr": ";;PKU", + "aff_campus_unique_index": "1;1;1;1+2;1+2;1+2", + "aff_campus_unique": ";Beijing;Tianjin Binhai", + "aff_country_unique_index": "0;0+0;0+0;0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25995", + "title": "Key Feature Replacement of In-Distribution Samples for Out-of-Distribution Detection", + "track": "main", + "status": "Technical", + "abstract": "Out-of-distribution (OOD) detection can be used in deep learning-based applications to reject outlier samples from being unreliably classified by deep neural networks. Learning to classify between OOD and in-distribution samples is difficult because data comprising the former is extremely diverse. It has been observed that an auxiliary OOD dataset is most effective in training a ``rejection'' network when its samples are semantically similar to in-distribution images. We first deduce that OOD images are perceived by a deep neural network to be semantically similar to in-distribution samples when they share a common background, as deep networks are observed to incorrectly classify such images with high confidence. We then propose a simple yet effective Key In-distribution feature Replacement BY inpainting (KIRBY) procedure that constructs a surrogate OOD dataset by replacing class-discriminative features of in-distribution samples with marginal background features. The procedure can be implemented using off-the-shelf vision algorithms, where each step within the algorithm is shown to make the surrogate data increasingly similar to in-distribution data. Design choices in each step are studied extensively, and an exhaustive comparison with state-of-the-art algorithms demonstrates KIRBY's competitiveness on various benchmarks.", + "primary_area": "machine learning ii", + "author": "Jaeyoung Kim; Seo Taek Kong; Dongbin Na; Kyu-Hwan Jung", + "authorids": "", + "aff": "VUNO, Inc.; University of Illinois, Urbana-Champaign; VUNO, Inc.; Samsung Advanced Institute for Health Sciences and Technology, Sungkyunkwan University", + "bibtex": "@article{Kim_Kong_Na_Jung_2023, title={Key Feature Replacement of In-Distribution Samples for Out-of-Distribution Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25995}, DOI={10.1609/aaai.v37i7.25995}, abstractNote={Out-of-distribution (OOD) detection can be used in deep learning-based applications to reject outlier samples from being unreliably classified by deep neural networks. Learning to classify between OOD and in-distribution samples is difficult because data comprising the former is extremely diverse. It has been observed that an auxiliary OOD dataset is most effective in training a ``rejection\u2019\u2019 network when its samples are semantically similar to in-distribution images. We first deduce that OOD images are perceived by a deep neural network to be semantically similar to in-distribution samples when they share a common background, as deep networks are observed to incorrectly classify such images with high confidence. We then propose a simple yet effective Key In-distribution feature Replacement BY inpainting (KIRBY) procedure that constructs a surrogate OOD dataset by replacing class-discriminative features of in-distribution samples with marginal background features. The procedure can be implemented using off-the-shelf vision algorithms, where each step within the algorithm is shown to make the surrogate data increasingly similar to in-distribution data. Design choices in each step are studied extensively, and an exhaustive comparison with state-of-the-art algorithms demonstrates KIRBY\u2019s competitiveness on various benchmarks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Jaeyoung and Kong, Seo Taek and Na, Dongbin and Jung, Kyu-Hwan}, year={2023}, month={Jun.}, pages={8246-8254} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25995/25767", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25995", + "pdf_size": 882108, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4232429992694683291&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "vuno.co;illinois.edu;vuno.co;skku.edu", + "email": "vuno.co;illinois.edu;vuno.co;skku.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "VUNO, Inc.;University of Illinois;Sungkyunkwan University", + "aff_unique_dep": ";;Samsung Advanced Institute for Health Sciences and Technology", + "aff_unique_url": "https://www.vuno.co.kr;https://illinois.edu;https://www.sungkyunkwan.ac.kr", + "aff_unique_abbr": "VUNO;UIUC;SKKU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-26948", + "title": "Know Your Enemy: Identifying Adversarial Behaviours in Deep Reinforcement Learning Agents (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "It has been shown that an agent can be trained with an adversarial policy which achieves high degrees of success against a state-of-the-art DRL victim despite taking unintuitive actions. This prompts the question: is this adversarial behaviour detectable through the observations of the victim alone? We find that widely used classification methods such as random forests are only able to achieve a maximum of \u224871% test set accuracy when classifying an agent for a single timestep. However, when the classifier inputs are treated as time-series data, test set classification accuracy is increased significantly to \u224898%. This is true for both classification of episodes as a whole, and for \u201clive\u201d classification at each timestep in an episode. These classifications can then be used to \u201creact\u201d to incoming attacks and increase the overall win rate against Adversarial opponents by approximately 17%. Classification of the victim\u2019s own internal activations in response to the adversary is shown to achieve similarly impressive accuracy while also offering advantages like increased transferability to other domains.", + "primary_area": "", + "author": "Se\u00e1n Caulfield Curley; Karl Mason; Patrick Mannion", + "authorids": "", + "aff": "School of Computer Science, University of Galway, Ireland; School of Computer Science, University of Galway, Ireland; School of Computer Science, University of Galway, Ireland", + "bibtex": "@article{Caulfield Curley_Mason_Mannion_2024, title={Know Your Enemy: Identifying Adversarial Behaviours in Deep Reinforcement Learning Agents (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26948}, DOI={10.1609/aaai.v37i13.26948}, abstractNote={It has been shown that an agent can be trained with an adversarial policy which achieves high degrees of success against a state-of-the-art DRL victim despite taking unintuitive actions. This prompts the question: is this adversarial behaviour detectable through the observations of the victim alone? We find that widely used classification methods such as random forests are only able to achieve a maximum of \u224871% test set accuracy when classifying an agent for a single timestep. However, when the classifier inputs are treated as time-series data, test set classification accuracy is increased significantly to \u224898%. This is true for both classification of episodes as a whole, and for \u201clive\u201d classification at each timestep in an episode. These classifications can then be used to \u201creact\u201d to incoming attacks and increase the overall win rate against Adversarial opponents by approximately 17%. Classification of the victim\u2019s own internal activations in response to the adversary is shown to achieve similarly impressive accuracy while also offering advantages like increased transferability to other domains.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Caulfield Curley, Se\u00e1n and Mason, Karl and Mannion, Patrick}, year={2024}, month={Jul.}, pages={16176-16177} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26948/26720", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26948", + "pdf_size": 120941, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:czYnsIEtQAgJ:scholar.google.com/&scioq=Know+Your+Enemy:+Identifying+Adversarial+Behaviours+in+Deep+Reinforcement+Learning+Agents+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "nuigalway.ie;universityofgalway.ie;universityofgalway.ie", + "email": "nuigalway.ie;universityofgalway.ie;universityofgalway.ie", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Galway", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.universityofgalway.ie", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Ireland" + }, + { + "id": "article-27084", + "title": "KnowGL: Knowledge Generation and Linking from Text", + "track": "demonstrations", + "status": "Technical", + "abstract": "We propose KnowGL, a tool that allows converting text into structured relational data represented as a set of ABox assertions compliant with the TBox of a given Knowledge Graph (KG), such as Wikidata. \nWe address this problem as a sequence generation task by leveraging pre-trained sequence-to-sequence language models, e.g. BART.\nGiven a sentence, we fine-tune such models to detect pairs of entity mentions and jointly generate a set of facts consisting of the full set of semantic annotations for a KG, such as entity labels, entity types, and their relationships.\nTo showcase the capabilities of our tool, we build a web application consisting of a set of UI widgets that help users to navigate through the semantic data extracted from a given input text. We make the KnowGL model available at https://huggingface.co/ibm/knowgl-large.", + "primary_area": "", + "author": "Gaetano Rossiello; Md. Faisal Mahbub Chowdhury; Nandana Mihindukulasooriya; Owen Cornec; Alfio Massimiliano Gliozzo", + "authorids": "", + "aff": "IBM Research AI; IBM Research AI; IBM Research AI; IBM Research AI; IBM Research AI", + "bibtex": "@article{Rossiello_Chowdhury_Mihindukulasooriya_Cornec_Gliozzo_2024, title={KnowGL: Knowledge Generation and Linking from Text}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27084}, DOI={10.1609/aaai.v37i13.27084}, abstractNote={We propose KnowGL, a tool that allows converting text into structured relational data represented as a set of ABox assertions compliant with the TBox of a given Knowledge Graph (KG), such as Wikidata. We address this problem as a sequence generation task by leveraging pre-trained sequence-to-sequence language models, e.g. BART.\nGiven a sentence, we fine-tune such models to detect pairs of entity mentions and jointly generate a set of facts consisting of the full set of semantic annotations for a KG, such as entity labels, entity types, and their relationships.\nTo showcase the capabilities of our tool, we build a web application consisting of a set of UI widgets that help users to navigate through the semantic data extracted from a given input text. We make the KnowGL model available at https://huggingface.co/ibm/knowgl-large.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rossiello, Gaetano and Chowdhury, Md. Faisal Mahbub and Mihindukulasooriya, Nandana and Cornec, Owen and Gliozzo, Alfio Massimiliano}, year={2024}, month={Jul.}, pages={16476-16478} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27084/26856", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27084", + "pdf_size": 126853, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8462078948335400327&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "; ; ; ; ", + "email": "; ; ; ; ", + "github": "", + "project": "https://huggingface.co/ibm/knowgl-large", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "AI", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26190", + "title": "Knowledge Amalgamation for Multi-Label Classification via Label Dependency Transfer", + "track": "main", + "status": "Technical", + "abstract": "Multi-label classification (MLC), which assigns multiple labels to each instance, is crucial to domains from computer vision to text mining. Conventional methods for MLC require huge amounts of labeled data to capture complex dependencies between labels. However, such labeled datasets are expensive, or even impossible, to acquire. Worse yet, these pre-trained MLC models can only be used for the particular label set covered in the training data. Despite this severe limitation, few methods exist for expanding the set of labels predicted by pre-trained models. Instead, we acquire vast amounts of new labeled data and retrain a new model from scratch. Here, we propose combining the knowledge from multiple pre-trained models (teachers) to train a new student model that covers the union of the labels predicted by this set of teachers. This student supports a broader label set than any one of its teachers without using labeled data. We call this new problem knowledge amalgamation for multi-label classification. Our new method, Adaptive KNowledge Transfer (ANT), trains a student by learning from each teacher\u2019s partial knowledge of label dependencies to infer the global dependencies between all labels across the teachers. We show that ANT succeeds in unifying label dependencies among teachers, outperforming five state-of-the-art methods on eight real-world datasets.", + "primary_area": "machine learning iii", + "author": "Jidapa Thadajarassiri; Thomas Hartvigsen; Walter Gerych; Xiangnan Kong; Elke Rundensteiner", + "authorids": "", + "aff": "Worcester Polytechnic Institute; Massachusetts Institute of Technology; Worcester Polytechnic Institute; Worcester Polytechnic Institute; Worcester Polytechnic Institute", + "bibtex": "@article{Thadajarassiri_Hartvigsen_Gerych_Kong_Rundensteiner_2023, title={Knowledge Amalgamation for Multi-Label Classification via Label Dependency Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26190}, DOI={10.1609/aaai.v37i8.26190}, abstractNote={Multi-label classification (MLC), which assigns multiple labels to each instance, is crucial to domains from computer vision to text mining. Conventional methods for MLC require huge amounts of labeled data to capture complex dependencies between labels. However, such labeled datasets are expensive, or even impossible, to acquire. Worse yet, these pre-trained MLC models can only be used for the particular label set covered in the training data. Despite this severe limitation, few methods exist for expanding the set of labels predicted by pre-trained models. Instead, we acquire vast amounts of new labeled data and retrain a new model from scratch. Here, we propose combining the knowledge from multiple pre-trained models (teachers) to train a new student model that covers the union of the labels predicted by this set of teachers. This student supports a broader label set than any one of its teachers without using labeled data. We call this new problem knowledge amalgamation for multi-label classification. Our new method, Adaptive KNowledge Transfer (ANT), trains a student by learning from each teacher\u2019s partial knowledge of label dependencies to infer the global dependencies between all labels across the teachers. We show that ANT succeeds in unifying label dependencies among teachers, outperforming five state-of-the-art methods on eight real-world datasets.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Thadajarassiri, Jidapa and Hartvigsen, Thomas and Gerych, Walter and Kong, Xiangnan and Rundensteiner, Elke}, year={2023}, month={Jun.}, pages={9980-9988} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26190/25962", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26190", + "pdf_size": 737464, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4461587070327299780&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "wpi.edu;mit.edu;wpi.edu;wpi.edu;wpi.edu", + "email": "wpi.edu;mit.edu;wpi.edu;wpi.edu;wpi.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Worcester Polytechnic Institute;Massachusetts Institute of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.wpi.edu;https://web.mit.edu", + "aff_unique_abbr": "WPI;MIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25600", + "title": "Knowledge Graph Embedding by Normalizing Flows", + "track": "main", + "status": "Technical", + "abstract": "A key to knowledge graph embedding (KGE) is to choose a proper representation space, e.g., point-wise Euclidean space and complex vector space. In this paper, we propose a unified perspective of embedding and introduce uncertainty into KGE from the view of group theory. Our model can incorporate existing models (i.e., generality), ensure the computation is tractable (i.e., efficiency) and enjoy the expressive power of complex random variables (i.e., expressiveness). The core idea is that we embed entities/relations as elements of a symmetric group, i.e., permutations of a set. Permutations of different sets can reflect different properties of embedding. And the group operation of symmetric groups is easy to compute. In specific, we show that the embedding of many existing models, point vectors, can be seen as elements of a symmetric group. To reflect uncertainty, we first embed entities/relations as permutations of a set of random variables. A permutation can transform a simple random variable into a complex random variable for greater expressiveness, called a normalizing flow. We then define scoring functions by measuring the similarity of two normalizing flows, namely NFE. We construct several instantiating models and prove that they are able to learn logical rules. Experimental results demonstrate the effectiveness of introducing uncertainty and our model. The code is available at https://github.com/changyi7231/NFE.", + "primary_area": "data mining and knowledge management", + "author": "Changyi Xiao; Xiangnan He; Yixin Cao", + "authorids": "", + "aff": "School of Data Science, University of Science and Technology of China; School of Data Science, University of Science and Technology of China; School of Computing and Information System, Singapore Management University", + "bibtex": "@article{Xiao_He_Cao_2023, title={Knowledge Graph Embedding by Normalizing Flows}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25600}, DOI={10.1609/aaai.v37i4.25600}, abstractNote={A key to knowledge graph embedding (KGE) is to choose a proper representation space, e.g., point-wise Euclidean space and complex vector space. In this paper, we propose a unified perspective of embedding and introduce uncertainty into KGE from the view of group theory. Our model can incorporate existing models (i.e., generality), ensure the computation is tractable (i.e., efficiency) and enjoy the expressive power of complex random variables (i.e., expressiveness). The core idea is that we embed entities/relations as elements of a symmetric group, i.e., permutations of a set. Permutations of different sets can reflect different properties of embedding. And the group operation of symmetric groups is easy to compute. In specific, we show that the embedding of many existing models, point vectors, can be seen as elements of a symmetric group. To reflect uncertainty, we first embed entities/relations as permutations of a set of random variables. A permutation can transform a simple random variable into a complex random variable for greater expressiveness, called a normalizing flow. We then define scoring functions by measuring the similarity of two normalizing flows, namely NFE. We construct several instantiating models and prove that they are able to learn logical rules. Experimental results demonstrate the effectiveness of introducing uncertainty and our model. The code is available at https://github.com/changyi7231/NFE.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Changyi and He, Xiangnan and Cao, Yixin}, year={2023}, month={Jun.}, pages={4756-4764} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25600/25372", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25600", + "pdf_size": 152721, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6223669641398481343&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.ustc.edu.cn;gmail.com;gmail.com", + "email": "mail.ustc.edu.cn;gmail.com;gmail.com", + "github": "https://github.com/changyi7231/NFE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of Science and Technology of China;Singapore Management University", + "aff_unique_dep": "School of Data Science;School of Computing and Information System", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.smu.edu.sg", + "aff_unique_abbr": "USTC;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26641", + "title": "Knowledge-Bridged Causal Interaction Network for Causal Emotion Entailment", + "track": "main", + "status": "Technical", + "abstract": "Causal Emotion Entailment aims to identify causal utterances that are responsible for the target utterance with a non-neutral emotion in conversations. Previous works are limited in thorough understanding of the conversational context and accurate reasoning of the emotion cause. To this end, we propose Knowledge-Bridged Causal Interaction Network (KBCIN) with commonsense knowledge (CSK) leveraged as three bridges. Specifically, we construct a conversational graph for each conversation and leverage the event-centered CSK as the semantics-level bridge (S-bridge) to capture the deep inter-utterance dependencies in the conversational context via the CSK-Enhanced Graph Attention module. Moreover, social-interaction CSK serves as emotion-level bridge (E-bridge) and action-level bridge (A-bridge) to connect candidate utterances with the target one, which provides explicit causal clues for the Emotional Interaction module and Actional Interaction module to reason the target emotion. Experimental results show that our model achieves better performance over most baseline models. Our source code is publicly available at https://github.com/circle-hit/KBCIN.", + "primary_area": "speech natural language processing", + "author": "Weixiang Zhao; Yanyan Zhao; Zhuojun Li; Bing Qin", + "authorids": "", + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Insititute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Insititute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Insititute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Insititute of Technology, China", + "bibtex": "@article{Zhao_Zhao_Li_Qin_2023, title={Knowledge-Bridged Causal Interaction Network for Causal Emotion Entailment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26641}, DOI={10.1609/aaai.v37i11.26641}, abstractNote={Causal Emotion Entailment aims to identify causal utterances that are responsible for the target utterance with a non-neutral emotion in conversations. Previous works are limited in thorough understanding of the conversational context and accurate reasoning of the emotion cause. To this end, we propose Knowledge-Bridged Causal Interaction Network (KBCIN) with commonsense knowledge (CSK) leveraged as three bridges. Specifically, we construct a conversational graph for each conversation and leverage the event-centered CSK as the semantics-level bridge (S-bridge) to capture the deep inter-utterance dependencies in the conversational context via the CSK-Enhanced Graph Attention module. Moreover, social-interaction CSK serves as emotion-level bridge (E-bridge) and action-level bridge (A-bridge) to connect candidate utterances with the target one, which provides explicit causal clues for the Emotional Interaction module and Actional Interaction module to reason the target emotion. Experimental results show that our model achieves better performance over most baseline models. Our source code is publicly available at https://github.com/circle-hit/KBCIN.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Weixiang and Zhao, Yanyan and Li, Zhuojun and Qin, Bing}, year={2023}, month={Jun.}, pages={14020-14028} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26641/26413", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26641", + "pdf_size": 782877, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3344396124641374659&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "https://github.com/circle-hit/KBCIN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25983", + "title": "Knowledge-Constrained Answer Generation for Open-Ended Video Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Open-ended Video question answering (open-ended VideoQA) aims to understand video content and question semantics to generate the correct answers. Most of the best performing models define the problem as a discriminative task of multi-label classification. In real-world scenarios, however, it is difficult to define a candidate set that includes all possible answers. In this paper, we propose a Knowledge-constrained Generative VideoQA Algorithm (KcGA) with an encoder-decoder pipeline, which enables out-of-domain answer generation through an adaptive external knowledge module and a multi-stream information control mechanism. We use ClipBERT to extract the video-question features, extract framewise object-level external knowledge from a commonsense knowledge base and compute the contextual-aware episode memory units via an attention based GRU to form the external knowledge features, and exploit multi-stream information control mechanism to fuse video-question and external knowledge features such that the semantic complementation and alignment are well achieved. We evaluate our model on two open-ended benchmark datasets to demonstrate that we can effectively and robustly generate high-quality answers without restrictions of training data.", + "primary_area": "machine learning ii", + "author": "Yao Jin; Guocheng Niu; Xinyan Xiao; Jian Zhang; Xi Peng; Jun Yu", + "authorids": "", + "aff": "Hangzhou Dianzi University; Baidu Inc.; Baidu Inc.; Zhejiang International Studies University; College of Computer Science, Sichuan Univerisity; Hangzhou Dianzi University", + "bibtex": "@article{Jin_Niu_Xiao_Zhang_Peng_Yu_2023, title={Knowledge-Constrained Answer Generation for Open-Ended Video Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25983}, DOI={10.1609/aaai.v37i7.25983}, abstractNote={Open-ended Video question answering (open-ended VideoQA) aims to understand video content and question semantics to generate the correct answers. Most of the best performing models define the problem as a discriminative task of multi-label classification. In real-world scenarios, however, it is difficult to define a candidate set that includes all possible answers. In this paper, we propose a Knowledge-constrained Generative VideoQA Algorithm (KcGA) with an encoder-decoder pipeline, which enables out-of-domain answer generation through an adaptive external knowledge module and a multi-stream information control mechanism. We use ClipBERT to extract the video-question features, extract framewise object-level external knowledge from a commonsense knowledge base and compute the contextual-aware episode memory units via an attention based GRU to form the external knowledge features, and exploit multi-stream information control mechanism to fuse video-question and external knowledge features such that the semantic complementation and alignment are well achieved. We evaluate our model on two open-ended benchmark datasets to demonstrate that we can effectively and robustly generate high-quality answers without restrictions of training data.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Yao and Niu, Guocheng and Xiao, Xinyan and Zhang, Jian and Peng, Xi and Yu, Jun}, year={2023}, month={Jun.}, pages={8141-8149} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25983/25755", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25983", + "pdf_size": 898269, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10549143922868876208&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;baidu.com;baidu.com;outlook.com;gmail.com;hdu.edu.cn", + "email": "gmail.com;baidu.com;baidu.com;outlook.com;gmail.com;hdu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;2;3;0", + "aff_unique_norm": "Hangzhou Dianzi University;Baidu Inc.;Zhejiang International Studies University;Sichuan University", + "aff_unique_dep": ";;;College of Computer Science", + "aff_unique_url": "http://www.hdu.edu.cn/;https://www.baidu.com;http://www.zisu.edu.cn;http://www.scu.edu.cn", + "aff_unique_abbr": "HGHDU;Baidu;;SCU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26926", + "title": "Knowledge-Embedded Narrative Construction from Open Source Intelligence", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Storytelling is an innate part of language-based communication. Today, current events are reported via Open Source Intelligence (OSINT) sources like news websites, blogs, and discussion forums. Scattered and fragmented sources such as these can be better understood when organized as chains of event plot points, or narratives, that have the ability to communicate end-end stories. Though search engines can retrieve aggregated event information, they lack the ability to sequence relevant events together to form narratives about different topics. I propose an AI system inspired by Gustav Freytag\u2019s narrative theory called the Plot Element Pyramid and use knowledge graphs to represent, chain, and reason over narratives from disparately sourced event details to better comprehend convoluted, noisy information about critical events during intelligence analysis.", + "primary_area": "", + "author": "Priyanka Ranade", + "authorids": "", + "aff": "University of Maryland, Baltimore County, Department of Computer Science & Electrical Engineering", + "bibtex": "@article{Ranade_2024, title={Knowledge-Embedded Narrative Construction from Open Source Intelligence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26926}, DOI={10.1609/aaai.v37i13.26926}, abstractNote={Storytelling is an innate part of language-based communication. Today, current events are reported via Open Source Intelligence (OSINT) sources like news websites, blogs, and discussion forums. Scattered and fragmented sources such as these can be better understood when organized as chains of event plot points, or narratives, that have the ability to communicate end-end stories. Though search engines can retrieve aggregated event information, they lack the ability to sequence relevant events together to form narratives about different topics. I propose an AI system inspired by Gustav Freytag\u2019s narrative theory called the Plot Element Pyramid and use knowledge graphs to represent, chain, and reason over narratives from disparately sourced event details to better comprehend convoluted, noisy information about critical events during intelligence analysis.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ranade, Priyanka}, year={2024}, month={Jul.}, pages={16131-16132} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26926/26698", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26926", + "pdf_size": 62572, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2318884777150805520&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff_domain": "umbc.edu", + "email": "umbc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Maryland, Baltimore County", + "aff_unique_dep": "Department of Computer Science & Electrical Engineering", + "aff_unique_url": "https://www.umbc.edu", + "aff_unique_abbr": "UMBC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Baltimore County", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26554", + "title": "LADA-Trans-NER: Adaptive Efficient Transformer for Chinese Named Entity Recognition Using Lexicon-Attention and Data-Augmentation", + "track": "main", + "status": "Technical", + "abstract": "Recently, word enhancement has become very popular for Chinese Named Entity Recognition (NER), reducing segmentation errors and increasing the semantic and boundary information of Chinese words. However, these methods tend to ignore the semantic relationship before and after the sentence after integrating lexical information. Therefore, the regularity of word length information has not been fully explored in various word-character fusion methods. In this work, we propose a Lexicon-Attention and Data-Augmentation (LADA) method for Chinese NER. We discuss the challenges of using existing methods in incorporating word information for NER and show how our proposed methods could be leveraged to overcome those challenges. LADA is based on a Transformer Encoder that utilizes lexicon to construct a directed graph and fuses word information through updating the optimal edge of the graph. Specially, we introduce the advanced data augmentation method to obtain the optimal representation for the NER task. Experimental results show that the augmentation done using LADA can considerably boost the performance of our NER system and achieve significantly better results than previous state-of-the-art methods and variant models in the literature on four publicly available NER datasets, namely Resume, MSRA, Weibo, and OntoNotes v4. We also observe better generalization and application to a real-world setting from LADA on multi-source complex entities.", + "primary_area": "speech natural language processing", + "author": "Jiguo Liu; Chao Liu; Nan Li; Shihao Gao; Mingqi Liu; Dali Zhu", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Liu_Liu_Li_Gao_Liu_Zhu_2023, title={LADA-Trans-NER: Adaptive Efficient Transformer for Chinese Named Entity Recognition Using Lexicon-Attention and Data-Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26554}, DOI={10.1609/aaai.v37i11.26554}, abstractNote={Recently, word enhancement has become very popular for Chinese Named Entity Recognition (NER), reducing segmentation errors and increasing the semantic and boundary information of Chinese words. However, these methods tend to ignore the semantic relationship before and after the sentence after integrating lexical information. Therefore, the regularity of word length information has not been fully explored in various word-character fusion methods. In this work, we propose a Lexicon-Attention and Data-Augmentation (LADA) method for Chinese NER. We discuss the challenges of using existing methods in incorporating word information for NER and show how our proposed methods could be leveraged to overcome those challenges. LADA is based on a Transformer Encoder that utilizes lexicon to construct a directed graph and fuses word information through updating the optimal edge of the graph. Specially, we introduce the advanced data augmentation method to obtain the optimal representation for the NER task. Experimental results show that the augmentation done using LADA can considerably boost the performance of our NER system and achieve significantly better results than previous state-of-the-art methods and variant models in the literature on four publicly available NER datasets, namely Resume, MSRA, Weibo, and OntoNotes v4. We also observe better generalization and application to a real-world setting from LADA on multi-source complex entities.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jiguo and Liu, Chao and Li, Nan and Gao, Shihao and Liu, Mingqi and Zhu, Dali}, year={2023}, month={Jun.}, pages={13236-13245} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26554/26326", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26554", + "pdf_size": 970126, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10675921455749170722&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25530", + "title": "LANCER: A Lifetime-Aware News Recommender System", + "track": "main", + "status": "Technical", + "abstract": "From the observation that users reading news tend to not click outdated news, we propose the notion of 'lifetime' of news, with two hypotheses: (i) news has a shorter lifetime, compared to other types of items such as movies or e-commerce products; (ii) news only competes with other news whose lifetimes have not ended, and which has an overlapping lifetime (i.e., limited competitions). By further developing the characteristics of the lifetime of news, then we present a novel approach for news recommendation, namely, Lifetime-Aware News reCommEndeR System (LANCER) that carefully exploits the lifetime of news during training and recommendation. Using real-world news datasets (e.g., Adressa and MIND), we successfully demonstrate that state-of-the-art news recommendation models can get significantly benefited by integrating the notion of lifetime and LANCER, by up to about 40% increases in recommendation accuracy.", + "primary_area": "data mining and knowledge management", + "author": "Hong-Kyun Bae; Jeewon Ahn; Dongwon Lee; Sang-Wook Kim", + "authorids": "", + "aff": "Department of Computer Science, Hanyang University, South Korea; Department of Computer Science, Hanyang University, South Korea; College of Information Sciences and Technology, The Pennsylvania State University, USA; Department of Computer Science, Hanyang University, South Korea", + "bibtex": "@article{Bae_Ahn_Lee_Kim_2023, title={LANCER: A Lifetime-Aware News Recommender System}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25530}, DOI={10.1609/aaai.v37i4.25530}, abstractNote={From the observation that users reading news tend to not click outdated news, we propose the notion of \u2019lifetime\u2019 of news, with two hypotheses: (i) news has a shorter lifetime, compared to other types of items such as movies or e-commerce products; (ii) news only competes with other news whose lifetimes have not ended, and which has an overlapping lifetime (i.e., limited competitions). By further developing the characteristics of the lifetime of news, then we present a novel approach for news recommendation, namely, Lifetime-Aware News reCommEndeR System (LANCER) that carefully exploits the lifetime of news during training and recommendation. Using real-world news datasets (e.g., Adressa and MIND), we successfully demonstrate that state-of-the-art news recommendation models can get significantly benefited by integrating the notion of lifetime and LANCER, by up to about 40% increases in recommendation accuracy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bae, Hong-Kyun and Ahn, Jeewon and Lee, Dongwon and Kim, Sang-Wook}, year={2023}, month={Jun.}, pages={4141-4148} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25530/25302", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25530", + "pdf_size": 376457, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7283577414193026307&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "hanyang.ac.kr;hanyang.ac.kr;psu.edu;hanyang.ac.kr", + "email": "hanyang.ac.kr;hanyang.ac.kr;psu.edu;hanyang.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Hanyang University;The Pennsylvania State University", + "aff_unique_dep": "Department of Computer Science;College of Information Sciences and Technology", + "aff_unique_url": "http://www.hanyang.ac.kr;https://www.psu.edu", + "aff_unique_abbr": "HYU;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-26965", + "title": "LEAN-DMKDE: Quantum Latent Density Estimation for Anomaly Detection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "This paper presents an anomaly detection model that combines the strong statistical foundation of density-estimation-based anomaly detection methods with the representation-learning ability of deep-learning models. The method combines an autoencoder, that learns a low-dimensional representation of the data, with a density-estimation model based on density matrices in an end-to-end architecture that can be trained using gradient-based optimization techniques. A systematic experimental evaluation was performed on different benchmark datasets. The experimental results show that the method is able to outperform other state-of-the-art methods.", + "primary_area": "", + "author": "Joseph A. Gallego-Mejia; Oscar A. Bustos-Brinez; Fabio A. Gonz\u00e1lez", + "authorids": "", + "aff": "Universidad Nacional de Colombia; Universidad Nacional de Colombia; Universidad Nacional de Colombia", + "bibtex": "@article{Gallego-Mejia_Bustos-Brinez_Gonz\u00e1lez_2024, title={LEAN-DMKDE: Quantum Latent Density Estimation for Anomaly Detection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26965}, DOI={10.1609/aaai.v37i13.26965}, abstractNote={This paper presents an anomaly detection model that combines the strong statistical foundation of density-estimation-based anomaly detection methods with the representation-learning ability of deep-learning models. The method combines an autoencoder, that learns a low-dimensional representation of the data, with a density-estimation model based on density matrices in an end-to-end architecture that can be trained using gradient-based optimization techniques. A systematic experimental evaluation was performed on different benchmark datasets. The experimental results show that the method is able to outperform other state-of-the-art methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gallego-Mejia, Joseph A. and Bustos-Brinez, Oscar A. and Gonz\u00e1lez, Fabio A.}, year={2024}, month={Jul.}, pages={16210-16211} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26965/26737", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26965", + "pdf_size": 148891, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:AcE9R3cFeMoJ:scholar.google.com/&scioq=LEAN-DMKDE:+Quantum+Latent+Density+Estimation+for+Anomaly+Detection+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "unal.edu.co;unal.edu.co;unal.edu.co", + "email": "unal.edu.co;unal.edu.co;unal.edu.co", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Universidad Nacional de Colombia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unal.edu.co", + "aff_unique_abbr": "UNAL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Colombia" + }, + { + "id": "article-26086", + "title": "LIMIP: Lifelong Learning to Solve Mixed Integer Programs", + "track": "main", + "status": "Technical", + "abstract": "Mixed Integer programs (MIPs) are typically solved by the Branch-and-Bound algorithm. Recently, Learning to imitate fast approximations of the expert strong branching heuristic has gained attention due to its success in reducing the running time for solving MIPs. However, existing learning-to-branch methods assume that the entire training data is available in a single session of training. This assumption is often not true, and if the training data is supplied in continual fashion over time, existing techniques suffer from catastrophic forgetting. In this work, we study the hitherto unexplored paradigm of Lifelong Learning to Branch on Mixed Integer Programs. To mitigate catastrophic forgetting, we propose LIMIP, which is powered by the idea of modeling an MIP instance in the form of a bipartite graph, which we map to an embedding space using a bipartite Graph Attention Network. This rich embedding space avoids catastrophic forgetting through the application of knowledge distillation and elastic weight consolidation, wherein we learn the parameters key towards retaining efficacy and are therefore protected from significant drift. We evaluate LIMIP on a series of NP-hard problems and establish that in comparison to existing baselines, LIMIP is up to 50% better when confronted with lifelong learning", + "primary_area": "machine learning ii", + "author": "Sahil Manchanda; Sayan Ranu", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Indian Institute of Technology, Delhi; Department of Computer Science and Engineering, Indian Institute of Technology, Delhi", + "bibtex": "@article{Manchanda_Ranu_2023, title={LIMIP: Lifelong Learning to Solve Mixed Integer Programs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26086}, DOI={10.1609/aaai.v37i7.26086}, abstractNote={Mixed Integer programs (MIPs) are typically solved by the Branch-and-Bound algorithm. Recently, Learning to imitate fast approximations of the expert strong branching heuristic has gained attention due to its success in reducing the running time for solving MIPs. However, existing learning-to-branch methods assume that the entire training data is available in a single session of training. This assumption is often not true, and if the training data is supplied in continual fashion over time, existing techniques suffer from catastrophic forgetting. In this work, we study the hitherto unexplored paradigm of Lifelong Learning to Branch on Mixed Integer Programs. To mitigate catastrophic forgetting, we propose LIMIP, which is powered by the idea of modeling an MIP instance in the form of a bipartite graph, which we map to an embedding space using a bipartite Graph Attention Network. This rich embedding space avoids catastrophic forgetting through the application of knowledge distillation and elastic weight consolidation, wherein we learn the parameters key towards retaining efficacy and are therefore protected from significant drift. We evaluate LIMIP on a series of NP-hard problems and establish that in comparison to existing baselines, LIMIP is up to 50% better when confronted with lifelong learning}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Manchanda, Sahil and Ranu, Sayan}, year={2023}, month={Jun.}, pages={9047-9054} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26086/25858", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26086", + "pdf_size": 528305, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12017050119331903305&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "cse.iitd.ac.in;cse.iitd.ac.in", + "email": "cse.iitd.ac.in;cse.iitd.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indian Institute of Technology, Delhi", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitd.ac.in", + "aff_unique_abbr": "IIT Delhi", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Delhi", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26529", + "title": "LIQUID: A Framework for List Question Answering Dataset Generation", + "track": "main", + "status": "Technical", + "abstract": "Question answering (QA) models often rely on large-scale training datasets, which necessitates the development of a data generation framework to reduce the cost of manual annotations. Although several recent studies have aimed to generate synthetic questions with single-span answers, no study has been conducted on the creation of list questions with multiple, non-contiguous spans as answers. To address this gap, we propose LIQUID, an automated framework for generating list QA datasets from unlabeled corpora. We first convert a passage from Wikipedia or PubMed into a summary and extract named entities from the summarized text as candidate answers. This allows us to select answers that are semantically correlated in context and is, therefore, suitable for constructing list questions. We then create questions using an off-the-shelf question generator with the extracted entities and original passage. Finally, iterative filtering and answer expansion are performed to ensure the accuracy and completeness of the answers. Using our synthetic data, we significantly improve the performance of the previous best list QA models by exact-match F1 scores of 5.0 on MultiSpanQA, 1.9 on Quoref, and 2.8 averaged across three BioASQ benchmarks.", + "primary_area": "speech natural language processing", + "author": "Seongyun Lee; Hyunjae Kim; Jaewoo Kang", + "authorids": "", + "aff": "Korea University; Korea University; Korea University+AIGEN Sciences", + "bibtex": "@article{Lee_Kim_Kang_2023, title={LIQUID: A Framework for List Question Answering Dataset Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26529}, DOI={10.1609/aaai.v37i11.26529}, abstractNote={Question answering (QA) models often rely on large-scale training datasets, which necessitates the development of a data generation framework to reduce the cost of manual annotations. Although several recent studies have aimed to generate synthetic questions with single-span answers, no study has been conducted on the creation of list questions with multiple, non-contiguous spans as answers. To address this gap, we propose LIQUID, an automated framework for generating list QA datasets from unlabeled corpora. We first convert a passage from Wikipedia or PubMed into a summary and extract named entities from the summarized text as candidate answers. This allows us to select answers that are semantically correlated in context and is, therefore, suitable for constructing list questions. We then create questions using an off-the-shelf question generator with the extracted entities and original passage. Finally, iterative filtering and answer expansion are performed to ensure the accuracy and completeness of the answers. Using our synthetic data, we significantly improve the performance of the previous best list QA models by exact-match F1 scores of 5.0 on MultiSpanQA, 1.9 on Quoref, and 2.8 averaged across three BioASQ benchmarks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Seongyun and Kim, Hyunjae and Kang, Jaewoo}, year={2023}, month={Jun.}, pages={13014-13024} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26529/26301", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26529", + "pdf_size": 295670, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4393152042462631024&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Korea University;AIGEN Sciences", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.korea.ac.kr;", + "aff_unique_abbr": "KU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-25402", + "title": "LORE: Logical Location Regression Network for Table Structure Recognition", + "track": "main", + "status": "Technical", + "abstract": "Table structure recognition (TSR) aims at extracting tables in images into machine-understandable formats. Recent methods solve this problem by predicting the adjacency relations of detected cell boxes, or learning to generate the corresponding markup sequences from the table images. However, they either count on additional heuristic rules to recover the table structures, or require a huge amount of training data and time-consuming sequential decoders. In this paper, we propose an alternative paradigm. We model TSR as a logical location regression problem and propose a new TSR framework called LORE, standing for LOgical location REgression network, which for the first time combines logical location regression together with spatial location regression of table cells. Our proposed LORE is conceptually simpler, easier to train and more accurate than previous TSR models of other paradigms. Experiments on standard benchmarks demonstrate that LORE consistently outperforms prior arts. Code is available at https:// github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/DocumentUnderstanding/LORE-TSR.", + "primary_area": "computer vision iii", + "author": "Hangdi Xing; Feiyu Gao; Rujiao Long; Jiajun Bu; Qi Zheng; Liangcheng Li; Cong Yao; Zhi Yu", + "authorids": "", + "aff": "Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University; DAMO Academy, Alibaba Group, Hangzhou, China; Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University; DAMO Academy, Alibaba Group, Hangzhou, China; Zhejiang Provincial Key Laboratory of Service Robot, School of Software Technology, Zhejiang University", + "bibtex": "@article{Xing_Gao_Long_Bu_Zheng_Li_Yao_Yu_2023, title={LORE: Logical Location Regression Network for Table Structure Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25402}, DOI={10.1609/aaai.v37i3.25402}, abstractNote={Table structure recognition (TSR) aims at extracting tables in images into machine-understandable formats. Recent methods solve this problem by predicting the adjacency relations of detected cell boxes, or learning to generate the corresponding markup sequences from the table images. However, they either count on additional heuristic rules to recover the table structures, or require a huge amount of training data and time-consuming sequential decoders. In this paper, we propose an alternative paradigm. We model TSR as a logical location regression problem and propose a new TSR framework called LORE, standing for LOgical location REgression network, which for the first time combines logical location regression together with spatial location regression of table cells. Our proposed LORE is conceptually simpler, easier to train and more accurate than previous TSR models of other paradigms. Experiments on standard benchmarks demonstrate that LORE consistently outperforms prior arts. Code is available at https:// github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/DocumentUnderstanding/LORE-TSR.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xing, Hangdi and Gao, Feiyu and Long, Rujiao and Bu, Jiajun and Zheng, Qi and Li, Liangcheng and Yao, Cong and Yu, Zhi}, year={2023}, month={Jun.}, pages={2992-3000} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25402/25174", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25402", + "pdf_size": 1590029, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14027962366310268230&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;alibaba-inc.com;gmail.com;zju.edu.cn;taobao.com;zju.edu.cn;gmail.com;zju.edu.cn", + "email": "zju.edu.cn;alibaba-inc.com;gmail.com;zju.edu.cn;taobao.com;zju.edu.cn;gmail.com;zju.edu.cn", + "github": "https://github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/DocumentUnderstanding/LORE-TSR", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;0;1;0;1;0", + "aff_unique_norm": "Zhejiang University;Alibaba Group", + "aff_unique_dep": "College of Computer Science;DAMO Academy", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ZJU;Alibaba", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26683", + "title": "LUCID: Exposing Algorithmic Bias through Inverse Design", + "track": "aaai special track", + "status": "Technical", + "abstract": "AI systems can create, propagate, support, and automate bias in decision-making processes. To mitigate biased decisions, we both need to understand the origin of the bias and define what it means for an algorithm to make fair decisions. Most group fairness notions assess a model's equality of outcome by computing statistical metrics on the outputs. We argue that these output metrics encounter intrinsic obstacles and present a complementary approach that aligns with the increasing focus on equality of treatment. By Locating Unfairness through Canonical Inverse Design (LUCID), we generate a canonical set that shows the desired inputs for a model given a preferred output. The canonical set reveals the model's internal logic and exposes potential unethical biases by repeatedly interrogating the decision-making process. We evaluate LUCID on the UCI Adult and COMPAS data sets and find that some biases detected by a canonical set differ from those of output metrics. The results show that by shifting the focus towards equality of treatment and looking into the algorithm's internal workings, the canonical sets are a valuable addition to the toolbox of algorithmic fairness evaluation.", + "primary_area": "ai for social impact", + "author": "Carmen Mazijn; Carina Prunkl; Andres Algaba; Jan Danckaert; Vincent Ginis", + "authorids": "", + "aff": "Data Analytics Lab, Vrije Universiteit Brussel+Applied Physics, Vrije Universiteit Brussel; Institute for Ethics in AI, Oxford University; Data Analytics Lab, Vrije Universiteit Brussel; Applied Physics, Vrije Universiteit Brussel; Data Analytics Lab, Vrije Universiteit Brussel+Applied Physics, Vrije Universiteit Brussel+School of Engineering and Applied Sciences, Harvard University", + "bibtex": "@article{Mazijn_Prunkl_Algaba_Danckaert_Ginis_2023, title={LUCID: Exposing Algorithmic Bias through Inverse Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26683}, DOI={10.1609/aaai.v37i12.26683}, abstractNote={AI systems can create, propagate, support, and automate bias in decision-making processes. To mitigate biased decisions, we both need to understand the origin of the bias and define what it means for an algorithm to make fair decisions. Most group fairness notions assess a model\u2019s equality of outcome by computing statistical metrics on the outputs. We argue that these output metrics encounter intrinsic obstacles and present a complementary approach that aligns with the increasing focus on equality of treatment. By Locating Unfairness through Canonical Inverse Design (LUCID), we generate a canonical set that shows the desired inputs for a model given a preferred output. The canonical set reveals the model\u2019s internal logic and exposes potential unethical biases by repeatedly interrogating the decision-making process. We evaluate LUCID on the UCI Adult and COMPAS data sets and find that some biases detected by a canonical set differ from those of output metrics. The results show that by shifting the focus towards equality of treatment and looking into the algorithm\u2019s internal workings, the canonical sets are a valuable addition to the toolbox of algorithmic fairness evaluation.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mazijn, Carmen and Prunkl, Carina and Algaba, Andres and Danckaert, Jan and Ginis, Vincent}, year={2023}, month={Jun.}, pages={14391-14399} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26683/26455", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26683", + "pdf_size": 2026750, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7752836053120414053&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "vub.be;philosophy.ox.ac.uk;vub.be;vub.be;vub.be", + "email": "vub.be;philosophy.ox.ac.uk;vub.be;vub.be;vub.be", + "github": "https://github.com/Integrated-Intelligence-Lab/canonical sets", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;0;0;0+0+2", + "aff_unique_norm": "Vrije Universiteit Brussel;Oxford University;Harvard University", + "aff_unique_dep": "Data Analytics Lab;Institute for Ethics in AI;School of Engineering and Applied Sciences", + "aff_unique_url": "https://www.vub.be;https://www.ox.ac.uk;https://www.harvard.edu", + "aff_unique_abbr": "VUB;Oxford;Harvard", + "aff_campus_unique_index": "0;2;0;0+3", + "aff_campus_unique": "Brussel;;Oxford;Cambridge", + "aff_country_unique_index": "0+0;1;0;0;0+0+2", + "aff_country_unique": "Belgium;United Kingdom;United States" + }, + { + "id": "article-27007", + "title": "LVRNet: Lightweight Image Restoration for Aerial Images under Low Visibility (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Learning to recover clear images from images having a combination of degrading factors is a challenging task. That being said, autonomous surveillance in low visibility conditions caused by high pollution/smoke, poor air quality index, low light, atmospheric scattering, and haze during a blizzard, etc, becomes even more important to prevent accidents. It is thus crucial to form a solution that can not only result in a high-quality image but also which is efficient enough to be deployed for everyday use. However, the lack of proper datasets available to tackle this task limits the performance of the previous methods proposed. To this end, we generate the LowVis-AFO dataset, containing 3647 paired dark-hazy and clear images. We also introduce a new lightweight deep learning model called Low-Visibility Restoration Network (LVRNet). It outperforms previous image restoration methods with low latency, achieving a PSNR value of 25.744 and an SSIM of 0.905, hence making our approach scalable and ready for practical use.", + "primary_area": "", + "author": "Esha Pahwa; Achleshwar Luthra; Pratik Narang", + "authorids": "", + "aff": "BITS Pilani; Carnegie Mellon University; BITS Pilani", + "bibtex": "@article{Pahwa_Luthra_Narang_2024, title={LVRNet: Lightweight Image Restoration for Aerial Images under Low Visibility (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27007}, DOI={10.1609/aaai.v37i13.27007}, abstractNote={Learning to recover clear images from images having a combination of degrading factors is a challenging task. That being said, autonomous surveillance in low visibility conditions caused by high pollution/smoke, poor air quality index, low light, atmospheric scattering, and haze during a blizzard, etc, becomes even more important to prevent accidents. It is thus crucial to form a solution that can not only result in a high-quality image but also which is efficient enough to be deployed for everyday use. However, the lack of proper datasets available to tackle this task limits the performance of the previous methods proposed. To this end, we generate the LowVis-AFO dataset, containing 3647 paired dark-hazy and clear images. We also introduce a new lightweight deep learning model called Low-Visibility Restoration Network (LVRNet). It outperforms previous image restoration methods with low latency, achieving a PSNR value of 25.744 and an SSIM of 0.905, hence making our approach scalable and ready for practical use.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pahwa, Esha and Luthra, Achleshwar and Narang, Pratik}, year={2024}, month={Jul.}, pages={16294-16295} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27007/26779", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27007", + "pdf_size": 1572577, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Pi8NJhCMPhUJ:scholar.google.com/&scioq=LVRNet:+Lightweight+Image+Restoration+for+Aerial+Images+under+Low+Visibility+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "pilani.bits-pilani.ac.in;andrew.cmu.edu;pilani.bits-pilani.ac.in", + "email": "pilani.bits-pilani.ac.in;andrew.cmu.edu;pilani.bits-pilani.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Birla Institute of Technology and Science, Pilani;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bits-pilani.ac.in;https://www.cmu.edu", + "aff_unique_abbr": "BITS Pilani;CMU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pilani;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-25228", + "title": "LWSIS: LiDAR-Guided Weakly Supervised Instance Segmentation for Autonomous Driving", + "track": "main", + "status": "Technical", + "abstract": "Image instance segmentation is a fundamental research topic in autonomous driving, which is crucial for scene understanding and road safety. Advanced learning-based approaches often rely on the costly 2D mask annotations for training. \nIn this paper, we present a more artful framework, LiDAR-guided Weakly Supervised Instance Segmentation (LWSIS), which leverages the off-the-shelf 3D data, i.e., Point Cloud, together with the 3D boxes, as natural weak supervisions for\ntraining the 2D image instance segmentation models. Our LWSIS not only exploits the complementary information in multimodal data during training but also significantly reduces the annotation cost of the dense 2D masks. In detail, LWSIS consists of two crucial modules, Point Label Assignment (PLA) and Graph-based Consistency Regularization (GCR). The former module aims to automatically assign the 3D point cloud as 2D point-wise labels, while the atter further refines the predictions by enforcing geometry and appearance consistency of the multimodal data. Moreover, we conduct a secondary instance segmentation annotation on the nuScenes, named nuInsSeg, to encourage further research on multimodal perception tasks. Extensive experiments on the nuInsSeg, as well as the large-scale Waymo, show that LWSIS can substantially improve existing weakly supervised segmentation models by only involving 3D data during training. Additionally, LWSIS can also be incorporated into 3D object detectors like PointPainting to boost the 3D detection performance for free. The code and dataset are available at https://github.com/Serenos/LWSIS.", + "primary_area": "computer vision ii", + "author": "Xiang Li; Junbo Yin; Botian Shi; Yikang Li; Ruigang Yang; Jianbing Shen", + "authorids": "", + "aff": "School of Computer Science, Beijing Institute of Technology; School of Computer Science, Beijing Institute of Technology; Shanghai AI Laboratory; Shanghai AI Laboratory; Inceptio; SKL-IOTSC, CIS, University of Macau", + "bibtex": "@article{Li_Yin_Shi_Li_Yang_Shen_2023, title={LWSIS: LiDAR-Guided Weakly Supervised Instance Segmentation for Autonomous Driving}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25228}, DOI={10.1609/aaai.v37i2.25228}, abstractNote={Image instance segmentation is a fundamental research topic in autonomous driving, which is crucial for scene understanding and road safety. Advanced learning-based approaches often rely on the costly 2D mask annotations for training. In this paper, we present a more artful framework, LiDAR-guided Weakly Supervised Instance Segmentation (LWSIS), which leverages the off-the-shelf 3D data, i.e., Point Cloud, together with the 3D boxes, as natural weak supervisions for\ntraining the 2D image instance segmentation models. Our LWSIS not only exploits the complementary information in multimodal data during training but also significantly reduces the annotation cost of the dense 2D masks. In detail, LWSIS consists of two crucial modules, Point Label Assignment (PLA) and Graph-based Consistency Regularization (GCR). The former module aims to automatically assign the 3D point cloud as 2D point-wise labels, while the atter further refines the predictions by enforcing geometry and appearance consistency of the multimodal data. Moreover, we conduct a secondary instance segmentation annotation on the nuScenes, named nuInsSeg, to encourage further research on multimodal perception tasks. Extensive experiments on the nuInsSeg, as well as the large-scale Waymo, show that LWSIS can substantially improve existing weakly supervised segmentation models by only involving 3D data during training. Additionally, LWSIS can also be incorporated into 3D object detectors like PointPainting to boost the 3D detection performance for free. The code and dataset are available at https://github.com/Serenos/LWSIS.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiang and Yin, Junbo and Shi, Botian and Li, Yikang and Yang, Ruigang and Shen, Jianbing}, year={2023}, month={Jun.}, pages={1433-1441} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25228/25000", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25228", + "pdf_size": 3888262, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7009899739079239409&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com; ; ; ;um.edu.mo", + "email": "gmail.com;gmail.com; ; ; ;um.edu.mo", + "github": "https://github.com/Serenos/LWSIS", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;2;3", + "aff_unique_norm": "Beijing Institute of Technology;Shanghai AI Laboratory;Inceptio;University of Macau", + "aff_unique_dep": "School of Computer Science;;;Department of Computer and Information Science", + "aff_unique_url": "http://www.bit.edu.cn;https://www.shanghai-ai-lab.com;;https://www.um.edu.mo", + "aff_unique_abbr": "BIT;SAIL;;UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;2", + "aff_country_unique": "China;;Macau" + }, + { + "id": "article-26377", + "title": "LaCAM: Search-Based Algorithm for Quick Multi-Agent Pathfinding", + "track": "main", + "status": "Technical", + "abstract": "We propose a novel complete algorithm for multi-agent pathfinding (MAPF) called lazy constraints addition search for MAPF (LaCAM). MAPF is a problem of finding collision-free paths for multiple agents on graphs and is the foundation of multi-robot coordination. LaCAM uses a two-level search to find solutions quickly, even with hundreds of agents or more. At the low-level, it searches constraints about agents' locations. At the high-level, it searches a sequence of all agents' locations, following the constraints specified by the low-level. Our exhaustive experiments reveal that LaCAM is comparable to or outperforms state-of-the-art sub-optimal MAPF algorithms in a variety of scenarios, regarding success rate, planning time, and solution quality of sum-of-costs.", + "primary_area": "multiagent systems", + "author": "Keisuke Okumura", + "authorids": "", + "aff": "Tokyo Institute of Technology", + "bibtex": "@article{Okumura_2023, title={LaCAM: Search-Based Algorithm for Quick Multi-Agent Pathfinding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26377}, DOI={10.1609/aaai.v37i10.26377}, abstractNote={We propose a novel complete algorithm for multi-agent pathfinding (MAPF) called lazy constraints addition search for MAPF (LaCAM). MAPF is a problem of finding collision-free paths for multiple agents on graphs and is the foundation of multi-robot coordination. LaCAM uses a two-level search to find solutions quickly, even with hundreds of agents or more. At the low-level, it searches constraints about agents\u2019 locations. At the high-level, it searches a sequence of all agents\u2019 locations, following the constraints specified by the low-level. Our exhaustive experiments reveal that LaCAM is comparable to or outperforms state-of-the-art sub-optimal MAPF algorithms in a variety of scenarios, regarding success rate, planning time, and solution quality of sum-of-costs.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Okumura, Keisuke}, year={2023}, month={Jun.}, pages={11655-11662} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26377/26149", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26377", + "pdf_size": 714868, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17622661641348666488&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "coord.c.titech.ac.jp", + "email": "coord.c.titech.ac.jp", + "github": "", + "project": "https://kei18.github.io/lacam", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Tokyo Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.titech.ac.jp", + "aff_unique_abbr": "Titech", + "aff_country_unique_index": "0", + "aff_country_unique": "Japan" + }, + { + "id": "article-27001", + "title": "Label Smoothing for Emotion Detection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Automatically detecting emotions from text has countless\napplications, ranging from large scale opinion mining to\nsocial robots in healthcare and education. However, emotions\nare subjective in nature and are often expressed in ambiguous\nways. At the same time, detecting emotions can also require\nimplicit reasoning, which may not be available as surface-\nlevel, lexical information. In this work, we conjecture that\nthe overconfidence of pre-trained language models such as\nBERT is a critical problem in emotion detection and show\nthat alleviating this problem can considerably improve the\ngeneralization performance. We carry out comprehensive\nexperiments on four emotion detection benchmark datasets\nand show that calibrating our model predictions leads to an\naverage improvement of 1.35% in weighted F1 score.", + "primary_area": "", + "author": "George Maratos; Tiberiu Sosea; Cornelia Caragea", + "authorids": "", + "aff": "Computer Science, University of Illinois at Chicago; Computer Science, University of Illinois at Chicago; Computer Science, University of Illinois at Chicago", + "bibtex": "@article{Maratos_Sosea_Caragea_2024, title={Label Smoothing for Emotion Detection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27001}, DOI={10.1609/aaai.v37i13.27001}, abstractNote={Automatically detecting emotions from text has countless\napplications, ranging from large scale opinion mining to\nsocial robots in healthcare and education. However, emotions\nare subjective in nature and are often expressed in ambiguous\nways. At the same time, detecting emotions can also require\nimplicit reasoning, which may not be available as surface-\nlevel, lexical information. In this work, we conjecture that\nthe overconfidence of pre-trained language models such as\nBERT is a critical problem in emotion detection and show\nthat alleviating this problem can considerably improve the\ngeneralization performance. We carry out comprehensive\nexperiments on four emotion detection benchmark datasets\nand show that calibrating our model predictions leads to an\naverage improvement of 1.35% in weighted F1 score.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Maratos, George and Sosea, Tiberiu and Caragea, Cornelia}, year={2024}, month={Jul.}, pages={16282-16283} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27001/26773", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27001", + "pdf_size": 65169, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:SkDRB4JC1OgJ:scholar.google.com/&scioq=Label+Smoothing+for+Emotion+Detection+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "uic.edu;uic.edu;uic.edu", + "email": "uic.edu;uic.edu;uic.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois at Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26259", + "title": "Label-Specific Feature Augmentation for Long-Tailed Multi-Label Text Classification", + "track": "main", + "status": "Technical", + "abstract": "Multi-label text classification (MLTC) involves tagging a document with its most relevant subset of labels from a label set. In real applications, labels usually follow a long-tailed distribution, where most labels (called as tail-label) only contain a small number of documents and limit the performance of MLTC. To facilitate this low-resource problem, researchers introduced a simple but effective strategy, data augmentation (DA). However, most existing DA approaches struggle in multi-label settings. The main reason is that the augmented documents for one label may inevitably influence the other co-occurring labels and further exaggerate the long-tailed problem. To mitigate this issue, we propose a new pair-level augmentation framework for MLTC, called Label-Specific Feature Augmentation (LSFA), which merely augments positive feature-label pairs for the tail-labels. LSFA contains two main parts. The first is for label-specific document representation learning in the high-level latent space, the second is for augmenting tail-label features in latent space by transferring the documents second-order statistics (intra-class semantic variations) from head labels to tail labels. At last, we design a new loss function for adjusting classifiers based on augmented datasets. The whole learning procedure can be effectively trained. Comprehensive experiments on benchmark datasets have shown that the proposed LSFA outperforms the state-of-the-art counterparts.", + "primary_area": "machine learning iv", + "author": "Pengyu Xu; Lin Xiao; Bing Liu; Sijin Lu; Liping Jing; Jian Yu", + "authorids": "", + "aff": "Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China", + "bibtex": "@article{Xu_Xiao_Liu_Lu_Jing_Yu_2023, title={Label-Specific Feature Augmentation for Long-Tailed Multi-Label Text Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26259}, DOI={10.1609/aaai.v37i9.26259}, abstractNote={Multi-label text classification (MLTC) involves tagging a document with its most relevant subset of labels from a label set. In real applications, labels usually follow a long-tailed distribution, where most labels (called as tail-label) only contain a small number of documents and limit the performance of MLTC. To facilitate this low-resource problem, researchers introduced a simple but effective strategy, data augmentation (DA). However, most existing DA approaches struggle in multi-label settings. The main reason is that the augmented documents for one label may inevitably influence the other co-occurring labels and further exaggerate the long-tailed problem. To mitigate this issue, we propose a new pair-level augmentation framework for MLTC, called Label-Specific Feature Augmentation (LSFA), which merely augments positive feature-label pairs for the tail-labels. LSFA contains two main parts. The first is for label-specific document representation learning in the high-level latent space, the second is for augmenting tail-label features in latent space by transferring the documents second-order statistics (intra-class semantic variations) from head labels to tail labels. At last, we design a new loss function for adjusting classifiers based on augmented datasets. The whole learning procedure can be effectively trained. Comprehensive experiments on benchmark datasets have shown that the proposed LSFA outperforms the state-of-the-art counterparts.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Pengyu and Xiao, Lin and Liu, Bing and Lu, Sijin and Jing, Liping and Yu, Jian}, year={2023}, month={Jun.}, pages={10602-10610} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26259/26031", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26259", + "pdf_size": 403582, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10565072348640436803&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beijing Jiaotong University", + "aff_unique_dep": "Beijing Key Lab of Traffic Data Analysis and Mining", + "aff_unique_url": "http://www.bjtu.edu.cn", + "aff_unique_abbr": "BJTU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25646", + "title": "LagNet: Deep Lagrangian Mechanics for Plug-and-Play Molecular Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Molecular representation learning is a fundamental problem in the field of drug discovery and molecular science. Whereas incorporating molecular 3D information in the representations of molecule seems beneficial, which is related to computational chemistry with the basic task of predicting stable 3D structures (conformations) of molecules. Existing machine learning methods either rely on 1D and 2D molecular properties or simulate molecular force field to use additional 3D structure information via Hamiltonian network. The former has the disadvantage of ignoring important 3D structure features, while the latter has the disadvantage that existing Hamiltonian neural network must satisfy the \u201ccanonial\u201d constraint, which is difficult to be obeyed in many cases. In this paper, we propose a novel plug-and-play architecture LagNet by simulating molecular force field only with parameterized position coordinates, which implements Lagrangian mechanics to learn molecular representation by preserving 3D conformation without obeying any additional restrictions. LagNet is designed to generate known conformations and generalize for unknown ones from molecular SMILES. Implicit positions in LagNet are learned iteratively using discrete-time Lagrangian equations. Experimental results show that LagNet can well learn 3D molecular structure features, and outperforms previous state-of-the-art baselines related molecular representation by a significant margin.", + "primary_area": "domain s of application", + "author": "Chunyan Li; Junfeng Yao; Jinsong Su; Zhaoyang Liu; Xiangxiang Zeng; Chenxi Huang", + "authorids": "", + "aff": "School of Informatics, Xiamen University, Xiamen, China+School of Informatics, Yunnan Normal University, Kunming, China; School of Informatics, Xiamen University, Xiamen, China; School of Informatics, Xiamen University, Xiamen, China; School of Information and Control Engineering, China University of Mining and Technology, Xuzhou, China; College of Computer Science and Electronic Engineering, Hunan University, Changsha, China; School of Informatics, Xiamen University, Xiamen, China", + "bibtex": "@article{Li_Yao_Su_Liu_Zeng_Huang_2023, title={LagNet: Deep Lagrangian Mechanics for Plug-and-Play Molecular Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25646}, DOI={10.1609/aaai.v37i4.25646}, abstractNote={Molecular representation learning is a fundamental problem in the field of drug discovery and molecular science. Whereas incorporating molecular 3D information in the representations of molecule seems beneficial, which is related to computational chemistry with the basic task of predicting stable 3D structures (conformations) of molecules. Existing machine learning methods either rely on 1D and 2D molecular properties or simulate molecular force field to use additional 3D structure information via Hamiltonian network. The former has the disadvantage of ignoring important 3D structure features, while the latter has the disadvantage that existing Hamiltonian neural network must satisfy the \u201ccanonial\u201d constraint, which is difficult to be obeyed in many cases. In this paper, we propose a novel plug-and-play architecture LagNet by simulating molecular force field only with parameterized position coordinates, which implements Lagrangian mechanics to learn molecular representation by preserving 3D conformation without obeying any additional restrictions. LagNet is designed to generate known conformations and generalize for unknown ones from molecular SMILES. Implicit positions in LagNet are learned iteratively using discrete-time Lagrangian equations. Experimental results show that LagNet can well learn 3D molecular structure features, and outperforms previous state-of-the-art baselines related molecular representation by a significant margin.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Chunyan and Yao, Junfeng and Su, Jinsong and Liu, Zhaoyang and Zeng, Xiangxiang and Huang, Chenxi}, year={2023}, month={Jun.}, pages={5169-5177} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25646/25418", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25646", + "pdf_size": 1413570, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17237094376180168153&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; ; ;hnu.edu.cn; ;xmu.edu.cn", + "email": "gmail.com; ; ;hnu.edu.cn; ;xmu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;2;3;0", + "aff_unique_norm": "Xiamen University;Yunnan Normal University;China University of Mining and Technology;Hunan University", + "aff_unique_dep": "School of Informatics;School of Informatics;School of Information and Control Engineering;College of Computer Science and Electronic Engineering", + "aff_unique_url": "https://www.xmu.edu.cn;http://www.ynnu.edu.cn;http://www.cumt.edu.cn/;http://www.hnu.edu.cn", + "aff_unique_abbr": "XMU;;CUMT;", + "aff_campus_unique_index": "0+1;0;0;2;3;0", + "aff_campus_unique": "Xiamen;Kunming;Xuzhou;Changsha", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26639", + "title": "Language Model Pre-training on True Negatives", + "track": "main", + "status": "Technical", + "abstract": "Discriminative pre-trained language models (PrLMs) learn to predict original texts from intentionally corrupted ones. Taking the former text as positive and the latter as negative samples, the PrLM can be trained effectively for contextualized representation. However, the training of such a type of PrLMs highly relies on the quality of the automatically constructed samples. Existing PrLMs simply treat all corrupted texts as equal negative without any examination, which actually lets the resulting model inevitably suffer from the false negative issue where training is carried out on pseudo-negative data and leads to less efficiency and less robustness in the resulting PrLMs. In this work, on the basis of defining the false negative issue in discriminative PrLMs that has been ignored for a long time, we design enhanced pre-training methods to counteract false negative predictions and encourage pre-training language models on true negatives by correcting the harmful gradient updates subject to false negative predictions. Experimental results on GLUE and SQuAD benchmarks show that our counter-false-negative pre-training methods indeed bring about better performance together with stronger robustness.", + "primary_area": "speech natural language processing", + "author": "Zhuosheng Zhang; Hai Zhao; Masao Utiyama; Eiichiro Sumita", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University + Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University + Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University, Shanghai, China; National Institute of Information and Communications Technology (NICT), Kyoto, Japan; National Institute of Information and Communications Technology (NICT), Kyoto, Japan", + "bibtex": "@article{Zhang_Zhao_Utiyama_Sumita_2023, title={Language Model Pre-training on True Negatives}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26639}, DOI={10.1609/aaai.v37i11.26639}, abstractNote={Discriminative pre-trained language models (PrLMs) learn to predict original texts from intentionally corrupted ones. Taking the former text as positive and the latter as negative samples, the PrLM can be trained effectively for contextualized representation. However, the training of such a type of PrLMs highly relies on the quality of the automatically constructed samples. Existing PrLMs simply treat all corrupted texts as equal negative without any examination, which actually lets the resulting model inevitably suffer from the false negative issue where training is carried out on pseudo-negative data and leads to less efficiency and less robustness in the resulting PrLMs. In this work, on the basis of defining the false negative issue in discriminative PrLMs that has been ignored for a long time, we design enhanced pre-training methods to counteract false negative predictions and encourage pre-training language models on true negatives by correcting the harmful gradient updates subject to false negative predictions. Experimental results on GLUE and SQuAD benchmarks show that our counter-false-negative pre-training methods indeed bring about better performance together with stronger robustness.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhuosheng and Zhao, Hai and Utiyama, Masao and Sumita, Eiichiro}, year={2023}, month={Jun.}, pages={14002-14010} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26639/26411", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26639", + "pdf_size": 1132565, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16373105254005293169&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;cs.sjtu.edu.cn;nict.go.jp;nict.go.jp", + "email": "sjtu.edu.cn;cs.sjtu.edu.cn;nict.go.jp;nict.go.jp", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1;1", + "aff_unique_norm": "Shanghai Jiao Tong University;National Institute of Information and Communications Technology", + "aff_unique_dep": "Department of Computer Science and Engineering;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.nict.go.jp", + "aff_unique_abbr": "SJTU;NICT", + "aff_campus_unique_index": "1;1;2;2", + "aff_campus_unique": ";Shanghai;Kyoto", + "aff_country_unique_index": "0+0;0+0;1;1", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-25453", + "title": "Language-Assisted 3D Feature Learning for Semantic Scene Understanding", + "track": "main", + "status": "Technical", + "abstract": "Learning descriptive 3D features is crucial for understanding 3D scenes with diverse objects and complex structures. However, it is usually unknown whether important geometric attributes and scene context obtain enough emphasis in an end-to-end trained 3D scene understanding network. To guide 3D feature learning toward important geometric attributes and scene context, we explore the help of textual scene descriptions. Given some free-form descriptions paired with 3D scenes, we extract the knowledge regarding the object relationships and object attributes. We then inject the knowledge to 3D feature learning through three classification-based auxiliary tasks. This language-assisted training can be combined with modern object detection and instance segmentation methods to promote 3D semantic scene understanding, especially in a label-deficient regime. Moreover, the 3D feature learned with language assistance is better aligned with the language features, which can benefit various 3D-language multimodal tasks. Experiments on several benchmarks of 3D-only and 3D-language tasks demonstrate the effectiveness of our language-assisted 3D feature learning. Code is available at https://github.com/Asterisci/Language-Assisted-3D.", + "primary_area": "computer vision iii", + "author": "Junbo Zhang; Guofan Fan; Guanghan Wang; Zhengyuan Su; Kaisheng Ma; Li Yi", + "authorids": "", + "aff": "Tsinghua University; Xi\u2019an Jiaotong University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University+Shanghai Artificial Intelligence Laboratory+Shanghai Qi Zhi Institute", + "bibtex": "@article{Zhang_Fan_Wang_Su_Ma_Yi_2023, title={Language-Assisted 3D Feature Learning for Semantic Scene Understanding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25453}, DOI={10.1609/aaai.v37i3.25453}, abstractNote={Learning descriptive 3D features is crucial for understanding 3D scenes with diverse objects and complex structures. However, it is usually unknown whether important geometric attributes and scene context obtain enough emphasis in an end-to-end trained 3D scene understanding network. To guide 3D feature learning toward important geometric attributes and scene context, we explore the help of textual scene descriptions. Given some free-form descriptions paired with 3D scenes, we extract the knowledge regarding the object relationships and object attributes. We then inject the knowledge to 3D feature learning through three classification-based auxiliary tasks. This language-assisted training can be combined with modern object detection and instance segmentation methods to promote 3D semantic scene understanding, especially in a label-deficient regime. Moreover, the 3D feature learned with language assistance is better aligned with the language features, which can benefit various 3D-language multimodal tasks. Experiments on several benchmarks of 3D-only and 3D-language tasks demonstrate the effectiveness of our language-assisted 3D feature learning. Code is available at https://github.com/Asterisci/Language-Assisted-3D.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Junbo and Fan, Guofan and Wang, Guanghan and Su, Zhengyuan and Ma, Kaisheng and Yi, Li}, year={2023}, month={Jun.}, pages={3445-3453} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25453/25225", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25453", + "pdf_size": 974339, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11324659065551406076&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.tsinghua.edu.cn; ; ;mail.tsinghua.edu.cn; ;mail.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn; ; ;mail.tsinghua.edu.cn; ;mail.tsinghua.edu.cn", + "github": "https://github.com/Asterisci/Language-Assisted-3D", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0+2+3", + "aff_unique_norm": "Tsinghua University;Xi'an Jiaotong University;Shanghai Artificial Intelligence Laboratory;Shanghai Qi Zhi Institute", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.xjtu.edu.cn;http://www.shailab.org/;https://www.qz.io", + "aff_unique_abbr": "THU;XJTU;Shanghai AI Lab;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26466", + "title": "Large-State Reinforcement Learning for Hyper-Heuristics", + "track": "main", + "status": "Technical", + "abstract": "Hyper-heuristics are a domain-independent problem solving approach where the main task is to select effective chains of problem-specific low-level heuristics on the fly for an unseen instance. This task can be seen as a reinforcement learning problem, however, the information available to the hyper-heuristic is very limited, usually leading to very limited state representations. In this work, for the first time we use the trajectory of solution changes for a larger set of features for reinforcement learning in the novel hyper-heuristic LAST-RL (Large-State Reinforcement Learning). Further, we introduce a probability distribution for the exploration case in our epsilon-greedy policy that is based on the idea of Iterated Local Search to increase the chance to sample good chains of low-level heuristics. The benefit of the collaboration of our novel components is shown on the academic benchmark of the Cross Domain Heuristic Challenge 2011 consisting of six different problem domains. Our approach can provide state-of-the-art results on this benchmark where it outperforms recent hyper-heuristics based on reinforcement learning, and also demonstrates high performance on a benchmark of complex real-life personnel scheduling domains.", + "primary_area": "search and optimization", + "author": "Lucas Kletzander; Nysret Musliu", + "authorids": "", + "aff": "Christian Doppler Laboratory for Artificial Intelligence and Optimization for Planning and Scheduling; DBAI, TU Wien, Karlsplatz 13, 1040 Vienna, Austria", + "bibtex": "@article{Kletzander_Musliu_2023, title={Large-State Reinforcement Learning for Hyper-Heuristics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26466}, DOI={10.1609/aaai.v37i10.26466}, abstractNote={Hyper-heuristics are a domain-independent problem solving approach where the main task is to select effective chains of problem-specific low-level heuristics on the fly for an unseen instance. This task can be seen as a reinforcement learning problem, however, the information available to the hyper-heuristic is very limited, usually leading to very limited state representations. In this work, for the first time we use the trajectory of solution changes for a larger set of features for reinforcement learning in the novel hyper-heuristic LAST-RL (Large-State Reinforcement Learning). Further, we introduce a probability distribution for the exploration case in our epsilon-greedy policy that is based on the idea of Iterated Local Search to increase the chance to sample good chains of low-level heuristics. The benefit of the collaboration of our novel components is shown on the academic benchmark of the Cross Domain Heuristic Challenge 2011 consisting of six different problem domains. Our approach can provide state-of-the-art results on this benchmark where it outperforms recent hyper-heuristics based on reinforcement learning, and also demonstrates high performance on a benchmark of complex real-life personnel scheduling domains.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kletzander, Lucas and Musliu, Nysret}, year={2023}, month={Jun.}, pages={12444-12452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26466/26238", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26466", + "pdf_size": 136972, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=525876927005065375&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "tuwien.ac.at;tuwien.ac.at", + "email": "tuwien.ac.at;tuwien.ac.at", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Christian Doppler Laboratory;Technical University of Vienna", + "aff_unique_dep": "Artificial Intelligence and Optimization for Planning and Scheduling;Database and Artificial Intelligence Group", + "aff_unique_url": ";https://www.tuwien.ac.at", + "aff_unique_abbr": ";TU Wien", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Vienna", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Austria" + }, + { + "id": "article-26131", + "title": "Latent Autoregressive Source Separation", + "track": "main", + "status": "Technical", + "abstract": "Autoregressive models have achieved impressive results over a wide range of domains in terms of generation quality and downstream task performance. In the continuous domain, a key factor behind this success is the usage of quantized latent spaces (e.g., obtained via VQ-VAE autoencoders), which allow for dimensionality reduction and faster inference times. However, using existing pre-trained models to perform new non-trivial tasks is difficult since it requires additional fine-tuning or extensive training to elicit prompting. This paper introduces LASS as a way to perform vector-quantized Latent Autoregressive Source Separation (i.e., de-mixing an input signal into its constituent sources) without requiring additional gradient-based optimization or modifications of existing models. Our separation method relies on the Bayesian formulation in which the autoregressive models are the priors, and a discrete (non-parametric) likelihood function is constructed by performing frequency counts over latent sums of addend tokens. We test our method on images and audio with several sampling strategies (e.g., ancestral, beam search) showing competitive results with existing approaches in terms of separation quality while offering at the same time significant speedups in terms of inference time and scalability to higher dimensional data.", + "primary_area": "machine learning iii", + "author": "Emilian Postolache; Giorgio Mariani; Michele Mancusi; Andrea Santilli; Luca Cosmo; Emanuele Rodol\u00e0", + "authorids": "", + "aff": "Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Ca\u2019 Foscari University of Venice, Italy+University of Lugano, Switzerland; Sapienza University of Rome, Italy", + "bibtex": "@article{Postolache_Mariani_Mancusi_Santilli_Cosmo_Rodol\u00e0_2023, title={Latent Autoregressive Source Separation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26131}, DOI={10.1609/aaai.v37i8.26131}, abstractNote={Autoregressive models have achieved impressive results over a wide range of domains in terms of generation quality and downstream task performance. In the continuous domain, a key factor behind this success is the usage of quantized latent spaces (e.g., obtained via VQ-VAE autoencoders), which allow for dimensionality reduction and faster inference times. However, using existing pre-trained models to perform new non-trivial tasks is difficult since it requires additional fine-tuning or extensive training to elicit prompting. This paper introduces LASS as a way to perform vector-quantized Latent Autoregressive Source Separation (i.e., de-mixing an input signal into its constituent sources) without requiring additional gradient-based optimization or modifications of existing models. Our separation method relies on the Bayesian formulation in which the autoregressive models are the priors, and a discrete (non-parametric) likelihood function is constructed by performing frequency counts over latent sums of addend tokens. We test our method on images and audio with several sampling strategies (e.g., ancestral, beam search) showing competitive results with existing approaches in terms of separation quality while offering at the same time significant speedups in terms of inference time and scalability to higher dimensional data.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Postolache, Emilian and Mariani, Giorgio and Mancusi, Michele and Santilli, Andrea and Cosmo, Luca and Rodol\u00e0, Emanuele}, year={2023}, month={Jun.}, pages={9444-9452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26131/25903", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26131", + "pdf_size": 1206433, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18329047830535909276&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "di.uniroma1.it;di.uniroma1.it;di.uniroma1.it; ; ; ", + "email": "di.uniroma1.it;di.uniroma1.it;di.uniroma1.it; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1+2;0", + "aff_unique_norm": "Sapienza University of Rome;Ca\u2019 Foscari University of Venice;University of Lugano", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uniroma1.it;https://www.unive.it;https://www.unilu.ch", + "aff_unique_abbr": "Sapienza;Ca\u2019 Foscari;UNILU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+1;0", + "aff_country_unique": "Italy;Switzerland" + }, + { + "id": "article-26600", + "title": "Latent Constraints on Unsupervised Text-Graph Alignment with Information Asymmetry", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised text-graph alignment (UTGA) is a fundamental task that bidirectionally generates texts and graphs without parallel data. Most available models of UTGA suffer from information asymmetry, a common phenomenon that texts and graphs include additional information invisible to each other. On the one hand, these models fail to supplement asymmetric information effectively due to the lack of ground truths. On the other hand, it is challenging to indicate asymmetric information with explicit indicators because it cannot be decoupled from the data directly. To address the challenge posed by information asymmetry, we propose the assumption that asymmetric information is encoded in unobservable latent variables and only affects the one-way generation processes. These latent variables corresponding to asymmetric information should obey prior distributions recovered approximately from original data. Therefore, we first propose a taxonomy of the latent variable that classifies the latent variable into transferrable (TV) and non-transferable (NTV) variables and further distinguish NTV as the dependent variable (DV) and the independent variable (IV). Next, we propose three latent VAE-based regularizations on TV, DV, and IV to constrain their distributions to well-designed prior distributions to introduce asymmetric information into models and enhance the preservation of shared contents. Finally, we impose the three proposed constraints on a cycle-consistent learning framework, back-translation (BT), named ConstrainedBT. Experimental results on three UTGA tasks demonstrate the effectiveness of ConstrainedBT on the information-asymmetric challenge.", + "primary_area": "speech natural language processing", + "author": "Jidong Tian; Wenqing Chen; Yitian Li; Caoyun Fan; Hao He; Yaohui Jin", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University+State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; School of Software Engineering, Sun Yat-sen University; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University+State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University+State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University+State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University+State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University", + "bibtex": "@article{Tian_Chen_Li_Fan_He_Jin_2023, title={Latent Constraints on Unsupervised Text-Graph Alignment with Information Asymmetry}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26600}, DOI={10.1609/aaai.v37i11.26600}, abstractNote={Unsupervised text-graph alignment (UTGA) is a fundamental task that bidirectionally generates texts and graphs without parallel data. Most available models of UTGA suffer from information asymmetry, a common phenomenon that texts and graphs include additional information invisible to each other. On the one hand, these models fail to supplement asymmetric information effectively due to the lack of ground truths. On the other hand, it is challenging to indicate asymmetric information with explicit indicators because it cannot be decoupled from the data directly. To address the challenge posed by information asymmetry, we propose the assumption that asymmetric information is encoded in unobservable latent variables and only affects the one-way generation processes. These latent variables corresponding to asymmetric information should obey prior distributions recovered approximately from original data. Therefore, we first propose a taxonomy of the latent variable that classifies the latent variable into transferrable (TV) and non-transferable (NTV) variables and further distinguish NTV as the dependent variable (DV) and the independent variable (IV). Next, we propose three latent VAE-based regularizations on TV, DV, and IV to constrain their distributions to well-designed prior distributions to introduce asymmetric information into models and enhance the preservation of shared contents. Finally, we impose the three proposed constraints on a cycle-consistent learning framework, back-translation (BT), named ConstrainedBT. Experimental results on three UTGA tasks demonstrate the effectiveness of ConstrainedBT on the information-asymmetric challenge.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Jidong and Chen, Wenqing and Li, Yitian and Fan, Caoyun and He, Hao and Jin, Yaohui}, year={2023}, month={Jun.}, pages={13655-13663} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26600/26372", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26600", + "pdf_size": 322210, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:k9ZobYMBw2MJ:scholar.google.com/&scioq=Latent+Constraints+on+Unsupervised+Text-Graph+Alignment+with+Information+Asymmetry&hl=en&as_sdt=0,10", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;mail.sysu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;mail.sysu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Shanghai Jiao Tong University;Sun Yat-sen University", + "aff_unique_dep": "AI Institute;School of Software Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn;http://www.sysu.edu.cn", + "aff_unique_abbr": "SJTU;SYSU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26943", + "title": "Latent Space Evolution under Incremental Learning with Concept Drift (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "This work investigates the evolution of latent space when deep learning models are trained incrementally in non-stationary environments that stem from concept drift. We propose a methodology for visualizing the incurred change in latent representations. We further show that classes not targeted by concept drift can be negatively affected, suggesting that the observation of all classes during learning may regularize the latent space.", + "primary_area": "", + "author": "Charles Bourbeau; Audrey Durand", + "authorids": "", + "aff": "D\u00b4epartement d\u2019informatique et de g \u00b4enie logiciel, Universit \u00b4e Laval, Qu \u00b4ebec (QC), Canada+D\u00b4epartement de g \u00b4enie \u00b4electrique et de g \u00b4enie informatique, Universit \u00b4e Laval, Qu \u00b4ebec (QC), Canada+Canada-CIFAR AI Chair, Mila, Qu \u00b4ebec (QC), Canada; D\u00b4epartement d\u2019informatique et de g \u00b4enie logiciel, Universit \u00b4e Laval, Qu \u00b4ebec (QC), Canada+D\u00b4epartement de g \u00b4enie \u00b4electrique et de g \u00b4enie informatique, Universit \u00b4e Laval, Qu \u00b4ebec (QC), Canada+Canada-CIFAR AI Chair, Mila, Qu \u00b4ebec (QC), Canada", + "bibtex": "@article{Bourbeau_Durand_2024, title={Latent Space Evolution under Incremental Learning with Concept Drift (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26943}, DOI={10.1609/aaai.v37i13.26943}, abstractNote={This work investigates the evolution of latent space when deep learning models are trained incrementally in non-stationary environments that stem from concept drift. We propose a methodology for visualizing the incurred change in latent representations. We further show that classes not targeted by concept drift can be negatively affected, suggesting that the observation of all classes during learning may regularize the latent space.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bourbeau, Charles and Durand, Audrey}, year={2024}, month={Jul.}, pages={16166-16167} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26943/26715", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26943", + "pdf_size": 94844, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:1nU8aFl3w18J:scholar.google.com/&scioq=Latent+Space+Evolution+under+Incremental+Learning+with+Concept+Drift+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "ulaval.ca; ", + "email": "ulaval.ca; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0+1;0+0+1", + "aff_unique_norm": "Universit\u00e9 Laval;Mila", + "aff_unique_dep": "D\u00e9partement d\u2019informatique et de g\u00e9nie logiciel;Canada-CIFAR AI Chair", + "aff_unique_url": "https://www.ulaval.ca;https://mila.quebec", + "aff_unique_abbr": "UL;Mila", + "aff_campus_unique_index": "0+0+1;0+0+1", + "aff_campus_unique": "Qu\u00e9bec;Qu\u00c3\u00a9bec", + "aff_country_unique_index": "0+0+0;0+0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26023", + "title": "Layer-Wise Adaptive Model Aggregation for Scalable Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "In Federated Learning (FL), a common approach for aggregating local solutions across clients is periodic full model averaging. It is, however, known that different layers of neural networks can have a different degree of model discrepancy across the clients. The conventional full aggregation scheme does not consider such a difference and synchronizes the whole model parameters at once, resulting in inefficient network bandwidth consumption. Aggregating the parameters that are similar across the clients does not make meaningful training progress while increasing the communication cost. We propose FedLAMA, a layer-wise adaptive model aggregation scheme for scalable FL. FedLAMA adjusts the aggregation interval in a layer-wise manner, jointly considering the model discrepancy and the communication cost. This fine-grained aggregation strategy enables to reduce the communication cost without significantly harming the model accuracy. Our extensive empirical study shows that, as the aggregation interval increases, FedLAMA shows a remarkably smaller accuracy drop than the periodic full aggregation, while achieving comparable communication efficiency.", + "primary_area": "machine learning ii", + "author": "Sunwoo Lee; Tuo Zhang; A. Salman Avestimehr", + "authorids": "", + "aff": "University of Southern California, USA+Inha University, South Korea; University of Southern California, USA; University of Southern California, USA", + "bibtex": "@article{Lee_Zhang_Avestimehr_2023, title={Layer-Wise Adaptive Model Aggregation for Scalable Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26023}, DOI={10.1609/aaai.v37i7.26023}, abstractNote={In Federated Learning (FL), a common approach for aggregating local solutions across clients is periodic full model averaging. It is, however, known that different layers of neural networks can have a different degree of model discrepancy across the clients. The conventional full aggregation scheme does not consider such a difference and synchronizes the whole model parameters at once, resulting in inefficient network bandwidth consumption. Aggregating the parameters that are similar across the clients does not make meaningful training progress while increasing the communication cost. We propose FedLAMA, a layer-wise adaptive model aggregation scheme for scalable FL. FedLAMA adjusts the aggregation interval in a layer-wise manner, jointly considering the model discrepancy and the communication cost. This fine-grained aggregation strategy enables to reduce the communication cost without significantly harming the model accuracy. Our extensive empirical study shows that, as the aggregation interval increases, FedLAMA shows a remarkably smaller accuracy drop than the periodic full aggregation, while achieving comparable communication efficiency.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Sunwoo and Zhang, Tuo and Avestimehr, A. Salman}, year={2023}, month={Jun.}, pages={8491-8499} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26023/25795", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26023", + "pdf_size": 691163, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6899854277302127585&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "University of Southern California;Inha University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.usc.edu;https://www.inha.edu/", + "aff_unique_abbr": "USC;Inha", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "article-26277", + "title": "Layout Generation as Intermediate Action Sequence Prediction", + "track": "main", + "status": "Technical", + "abstract": "Layout generation plays a crucial role in graphic design intelligence. One important characteristic of the graphic layouts is that they usually follow certain design principles. For example, the principle of repetition emphasizes the reuse of similar visual elements throughout the design. To generate a layout, previous works mainly attempt at predicting the absolute value of bounding box for each element, where such target representation has hidden the information of higher-order design operations like repetition (e.g. copy the size of the previously generated element). In this paper, we introduce a novel action schema to encode these operations for better modeling the generation process. Instead of predicting the bounding box values, our approach autoregressively outputs the intermediate action sequence, which can then be deterministically converted to the final layout. We achieve state-of-the-art performances on three datasets. Both automatic and human evaluations show that our approach generates high-quality and diverse layouts. Furthermore, we revisit the commonly used evaluation metric FID adapted in this task, and observe that previous works use different settings to train the feature extractor for obtaining real/generated data distribution, which leads to inconsistent conclusions. We conduct an in-depth analysis on this metric and settle for a more robust and reliable evaluation setting. Code is available at this website.", + "primary_area": "machine learning iv", + "author": "Huiting Yang; Danqing Huang; Chin-Yew Lin; Shengfeng He", + "authorids": "", + "aff": "South China University of Technology+Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Singapore Management University", + "bibtex": "@article{Yang_Huang_Lin_He_2023, title={Layout Generation as Intermediate Action Sequence Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26277}, DOI={10.1609/aaai.v37i9.26277}, abstractNote={Layout generation plays a crucial role in graphic design intelligence. One important characteristic of the graphic layouts is that they usually follow certain design principles. For example, the principle of repetition emphasizes the reuse of similar visual elements throughout the design. To generate a layout, previous works mainly attempt at predicting the absolute value of bounding box for each element, where such target representation has hidden the information of higher-order design operations like repetition (e.g. copy the size of the previously generated element). In this paper, we introduce a novel action schema to encode these operations for better modeling the generation process. Instead of predicting the bounding box values, our approach autoregressively outputs the intermediate action sequence, which can then be deterministically converted to the final layout. We achieve state-of-the-art performances on three datasets. Both automatic and human evaluations show that our approach generates high-quality and diverse layouts. Furthermore, we revisit the commonly used evaluation metric FID adapted in this task, and observe that previous works use different settings to train the feature extractor for obtaining real/generated data distribution, which leads to inconsistent conclusions. We conduct an in-depth analysis on this metric and settle for a more robust and reliable evaluation setting. Code is available at this website.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Huiting and Huang, Danqing and Lin, Chin-Yew and He, Shengfeng}, year={2023}, month={Jun.}, pages={10762-10770} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26277/26049", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26277", + "pdf_size": 1394975, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1994311189932854707&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;microsoft.com;microsoft.com;smu.edu.sg", + "email": "gmail.com;microsoft.com;microsoft.com;smu.edu.sg", + "github": "https://github.com/microsoft/KC/tree/main/papers/LayoutAction", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;2", + "aff_unique_norm": "South China University of Technology;Microsoft Research;Singapore Management University", + "aff_unique_dep": ";Research;", + "aff_unique_url": "https://www.scut.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.smu.edu.sg", + "aff_unique_abbr": "SCUT;MSR Asia;SMU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0+0;0;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25092", + "title": "Layout Representation Learning with Spatial and Structural Hierarchies", + "track": "main", + "status": "Technical", + "abstract": "We present a novel hierarchical modeling method for layout representation learning, the core of design documents (e.g., user interface, poster, template).\nExisting works on layout representation often ignore element hierarchies, which is an important facet of layouts, and mainly rely on the spatial bounding boxes for feature extraction.\nThis paper proposes a Spatial-Structural Hierarchical Auto-Encoder (SSH-AE) that learns hierarchical representation by treating a hierarchically annotated layout as a tree format.\nOn the one side, we model SSH-AE from both spatial (semantic views) and structural (organization and relationships) perspectives, which are two complementary aspects to represent a layout.\nOn the other side, the semantic/geometric properties are associated at multiple resolutions/granularities, naturally handling complex layouts.\nOur learned representations are used for effective layout search from both spatial and structural similarity perspectives.\nWe also newly involve the tree-edit distance (TED) as an evaluation metric to construct a comprehensive evaluation protocol for layout similarity assessment, which benefits a systematic and customized layout search.\nWe further present a new dataset of POSTER layouts which we believe will be useful for future layout research.\nWe show that our proposed SSH-AE outperforms the existing methods achieving state-of-the-art performance on two benchmark datasets.\nCode is available at github.com/yueb17/SSH-AE.", + "primary_area": "computer vision i", + "author": "Yue Bai; Dipu Manandhar; Zhaowen Wang; John Collomosse; Yun Fu", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Northeastern University; Centre for Vision, Speech and Signal Processing, University of Surrey; Adobe Research; Adobe Research; Department of Electrical and Computer Engineering, Northeastern University+Khoury College of Computer Science, Northeastern University", + "bibtex": "@article{Bai_Manandhar_Wang_Collomosse_Fu_2023, title={Layout Representation Learning with Spatial and Structural Hierarchies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25092}, DOI={10.1609/aaai.v37i1.25092}, abstractNote={We present a novel hierarchical modeling method for layout representation learning, the core of design documents (e.g., user interface, poster, template).\nExisting works on layout representation often ignore element hierarchies, which is an important facet of layouts, and mainly rely on the spatial bounding boxes for feature extraction.\nThis paper proposes a Spatial-Structural Hierarchical Auto-Encoder (SSH-AE) that learns hierarchical representation by treating a hierarchically annotated layout as a tree format.\nOn the one side, we model SSH-AE from both spatial (semantic views) and structural (organization and relationships) perspectives, which are two complementary aspects to represent a layout.\nOn the other side, the semantic/geometric properties are associated at multiple resolutions/granularities, naturally handling complex layouts.\nOur learned representations are used for effective layout search from both spatial and structural similarity perspectives.\nWe also newly involve the tree-edit distance (TED) as an evaluation metric to construct a comprehensive evaluation protocol for layout similarity assessment, which benefits a systematic and customized layout search.\nWe further present a new dataset of POSTER layouts which we believe will be useful for future layout research.\nWe show that our proposed SSH-AE outperforms the existing methods achieving state-of-the-art performance on two benchmark datasets.\nCode is available at github.com/yueb17/SSH-AE.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Yue and Manandhar, Dipu and Wang, Zhaowen and Collomosse, John and Fu, Yun}, year={2023}, month={Jun.}, pages={206-214} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25092/24864", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25092", + "pdf_size": 2624925, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=105592825845551842&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "northeastern.edu; ; ; ; ", + "email": "northeastern.edu; ; ; ; ", + "github": "github.com/yueb17/SSH-AE", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;0+0", + "aff_unique_norm": "Northeastern University;University of Surrey;Adobe", + "aff_unique_dep": "Department of Electrical and Computer Engineering;Centre for Vision, Speech and Signal Processing;Adobe Research", + "aff_unique_url": "https://www.northeastern.edu;https://www.surrey.ac.uk;https://research.adobe.com", + "aff_unique_abbr": "NU;Surrey;Adobe", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0+0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-25223", + "title": "Layout-Aware Dreamer for Embodied Visual Referring Expression Grounding", + "track": "main", + "status": "Technical", + "abstract": "In this work, we study the problem of Embodied Referring Expression Grounding, where an agent needs to navigate in a previously unseen environment and localize a remote object described by a concise high-level natural language instruction. When facing such a situation, a human tends to imagine what the destination may look like and to explore the environment based on prior knowledge of the environmental layout, such as the fact that a bathroom is more likely to be found near a bedroom than a kitchen. We have designed an autonomous agent called Layout-aware Dreamer (LAD), including two novel modules, that is, the Layout Learner and the Goal Dreamer to mimic this cognitive decision process. The Layout Learner learns to infer the room category distribution of neighboring unexplored areas along the path for coarse layout estimation, which effectively introduces layout common sense of room-to-room transitions to our agent. To learn an effective exploration of the environment, the Goal Dreamer imagines the destination beforehand. Our agent achieves new state-of-the-art performance on the public leaderboard of REVERIE dataset in challenging unseen test environments with improvement on navigation success rate (SR) by 4.02% and remote grounding success (RGS) by 3.43% comparing to previous previous state of the art. The code\nis released at https://github.com/zehao-wang/LAD.", + "primary_area": "computer vision i", + "author": "Mingxiao Li; Zehao Wang; Tinne Tuytelaars; Marie-Francine Moens", + "authorids": "", + "aff": "Computer Science Department of KU Leuven; Electrical Engineering Department (ESAT-PSI) of KU Leuven; Electrical Engineering Department (ESAT-PSI) of KU Leuven; Computer Science Department of KU Leuven", + "bibtex": "@article{Li_Wang_Tuytelaars_Moens_2023, title={Layout-Aware Dreamer for Embodied Visual Referring Expression Grounding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25223}, DOI={10.1609/aaai.v37i1.25223}, abstractNote={In this work, we study the problem of Embodied Referring Expression Grounding, where an agent needs to navigate in a previously unseen environment and localize a remote object described by a concise high-level natural language instruction. When facing such a situation, a human tends to imagine what the destination may look like and to explore the environment based on prior knowledge of the environmental layout, such as the fact that a bathroom is more likely to be found near a bedroom than a kitchen. We have designed an autonomous agent called Layout-aware Dreamer (LAD), including two novel modules, that is, the Layout Learner and the Goal Dreamer to mimic this cognitive decision process. The Layout Learner learns to infer the room category distribution of neighboring unexplored areas along the path for coarse layout estimation, which effectively introduces layout common sense of room-to-room transitions to our agent. To learn an effective exploration of the environment, the Goal Dreamer imagines the destination beforehand. Our agent achieves new state-of-the-art performance on the public leaderboard of REVERIE dataset in challenging unseen test environments with improvement on navigation success rate (SR) by 4.02% and remote grounding success (RGS) by 3.43% comparing to previous previous state of the art. The code\nis released at https://github.com/zehao-wang/LAD.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Mingxiao and Wang, Zehao and Tuytelaars, Tinne and Moens, Marie-Francine}, year={2023}, month={Jun.}, pages={1386-1395} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25223/24995", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25223", + "pdf_size": 11206295, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13724763044130592989&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.kuleuven.be;esat.kuleuven.be;esat.kuleuven.be;cs.kuleuven.be", + "email": "cs.kuleuven.be;esat.kuleuven.be;esat.kuleuven.be;cs.kuleuven.be", + "github": "https://github.com/zehao-wang/LAD", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "KU Leuven", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.kuleuven.be", + "aff_unique_abbr": "KU Leuven", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Belgium" + }, + { + "id": "article-25351", + "title": "LeNo: Adversarial Robust Salient Object Detection Networks with Learnable Noise", + "track": "main", + "status": "Technical", + "abstract": "Pixel-wise prediction with deep neural network has become an effective paradigm for salient object detection (SOD) and achieved remarkable performance. However, very few SOD models are robust against adversarial attacks which are visually imperceptible for human visual attention. The previous work robust saliency (ROSA) shuffles the pre-segmented superpixels and then refines the coarse saliency map by the densely connected conditional random field (CRF). Different from ROSA that rely on various pre- and post-processings, this paper proposes a light-weight Learnable Noise (LeNo) to defend adversarial attacks for SOD models. LeNo preserves accuracy of SOD models on both adversarial and clean images, as well as inference speed. In general, LeNo consists of a simple shallow noise and noise estimation that embedded in the encoder and decoder of arbitrary SOD networks respectively. Inspired by the center prior of human visual attention mechanism, we initialize the shallow noise with a cross-shaped gaussian distribution for better defense against adversarial attacks. Instead of adding additional network components for post-processing, the proposed noise estimation modifies only one channel of the decoder. With the deeply-supervised noise-decoupled training on state-of-the-art RGB and RGB-D SOD networks, LeNo outperforms previous works not only on adversarial images but also on clean images, which contributes stronger robustness for SOD. Our code is available at https://github.com/ssecv/LeNo.", + "primary_area": "computer vision ii", + "author": "He Wang; Lin Wan; He Tang", + "authorids": "", + "aff": "School of Software Engineering, Huazhong University of Science and Technology + School of Cyber Science and Engineering, Huazhong University of Science and Technology; School of Software Engineering, Huazhong University of Science and Technology; School of Software Engineering, Huazhong University of Science and Technology", + "bibtex": "@article{Wang_Wan_Tang_2023, title={LeNo: Adversarial Robust Salient Object Detection Networks with Learnable Noise}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25351}, DOI={10.1609/aaai.v37i2.25351}, abstractNote={Pixel-wise prediction with deep neural network has become an effective paradigm for salient object detection (SOD) and achieved remarkable performance. However, very few SOD models are robust against adversarial attacks which are visually imperceptible for human visual attention. The previous work robust saliency (ROSA) shuffles the pre-segmented superpixels and then refines the coarse saliency map by the densely connected conditional random field (CRF). Different from ROSA that rely on various pre- and post-processings, this paper proposes a light-weight Learnable Noise (LeNo) to defend adversarial attacks for SOD models. LeNo preserves accuracy of SOD models on both adversarial and clean images, as well as inference speed. In general, LeNo consists of a simple shallow noise and noise estimation that embedded in the encoder and decoder of arbitrary SOD networks respectively. Inspired by the center prior of human visual attention mechanism, we initialize the shallow noise with a cross-shaped gaussian distribution for better defense against adversarial attacks. Instead of adding additional network components for post-processing, the proposed noise estimation modifies only one channel of the decoder. With the deeply-supervised noise-decoupled training on state-of-the-art RGB and RGB-D SOD networks, LeNo outperforms previous works not only on adversarial images but also on clean images, which contributes stronger robustness for SOD. Our code is available at https://github.com/ssecv/LeNo.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, He and Wan, Lin and Tang, He}, year={2023}, month={Jun.}, pages={2537-2545} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25351/25123", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25351", + "pdf_size": 1195922, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3347905271534692010&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/ssecv/LeNo", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Software Engineering", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25501", + "title": "Learn More for Food Recognition via Progressive Self-Distillation", + "track": "main", + "status": "Technical", + "abstract": "Food recognition has a wide range of applications, such as health-aware recommendation and self-service restaurants. Most previous methods of food recognition firstly locate informative regions in some weakly-supervised manners and then aggregate their features. However, location errors of informative regions limit the effectiveness of these methods to some extent. Instead of locating multiple regions, we propose a Progressive Self-Distillation (PSD) method, which progressively enhances the ability of network to mine more details for food recognition. The training of PSD simultaneously contains multiple self-distillations, in which a teacher network and a student network share the same embedding network. Since the student network receives a modified image from its teacher network by masking some informative regions, the teacher network outputs stronger semantic representations than the student network. Guided by such teacher network with stronger semantics, the student network is encouraged to mine more useful regions from the modified image by enhancing its own ability. The ability of the teacher network is also enhanced with the shared embedding network. By using progressive training, the teacher network incrementally improves its ability to mine more discriminative regions. In inference phase, only the teacher network is used without the help of the student network. Extensive experiments on three datasets demonstrate the effectiveness of our proposed method and state-of-the-art performance.", + "primary_area": "computer vision iii", + "author": "Yaohui Zhu; Linhu Liu; Jiang Tian", + "authorids": "", + "aff": "School of Artifical Intelligance, Beijing Normal University, Beijing 10875, China; AI Lab, Lenovo Research, Beijing, China; AI Lab, Lenovo Research, Beijing, China", + "bibtex": "@article{Zhu_Liu_Tian_2023, title={Learn More for Food Recognition via Progressive Self-Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25501}, DOI={10.1609/aaai.v37i3.25501}, abstractNote={Food recognition has a wide range of applications, such as health-aware recommendation and self-service restaurants. Most previous methods of food recognition firstly locate informative regions in some weakly-supervised manners and then aggregate their features. However, location errors of informative regions limit the effectiveness of these methods to some extent. Instead of locating multiple regions, we propose a Progressive Self-Distillation (PSD) method, which progressively enhances the ability of network to mine more details for food recognition. The training of PSD simultaneously contains multiple self-distillations, in which a teacher network and a student network share the same embedding network. Since the student network receives a modified image from its teacher network by masking some informative regions, the teacher network outputs stronger semantic representations than the student network. Guided by such teacher network with stronger semantics, the student network is encouraged to mine more useful regions from the modified image by enhancing its own ability. The ability of the teacher network is also enhanced with the shared embedding network. By using progressive training, the teacher network incrementally improves its ability to mine more discriminative regions. In inference phase, only the teacher network is used without the help of the student network. Extensive experiments on three datasets demonstrate the effectiveness of our proposed method and state-of-the-art performance.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Yaohui and Liu, Linhu and Tian, Jiang}, year={2023}, month={Jun.}, pages={3879-3887} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25501/25273", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25501", + "pdf_size": 520165, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2003285162095987010&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "bnu.edu.cn;lenovo.com;lenovo.com", + "email": "bnu.edu.cn;lenovo.com;lenovo.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Beijing Normal University;Lenovo Research", + "aff_unique_dep": "School of Artificial Intelligence;AI Lab", + "aff_unique_url": "https://www.bnu.edu.cn;https://www.lenovo.com", + "aff_unique_abbr": "BNU;Lenovo", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26492", + "title": "Learn from Yesterday: A Semi-supervised Continual Learning Method for Supervision-Limited Text-to-SQL Task Streams", + "track": "main", + "status": "Technical", + "abstract": "Conventional text-to-SQL studies are limited to a single task with a fixed-size training and test set. When confronted with a stream of tasks common in real-world applications, existing methods struggle with the problems of insufficient supervised data and high retraining costs. The former tends to cause overfitting on unseen databases for the new task, while the latter makes a full review of instances from past tasks impractical for the model, resulting in forgetting of learned SQL structures and database schemas. To address the problems, this paper proposes integrating semi-supervised learning (SSL) and continual learning (CL) in a stream of text-to-SQL tasks and offers two promising solutions in turn. The first solution Vanilla is to perform self-training, augmenting the supervised training data with predicted pseudo-labeled instances of the current task, while replacing the full volume retraining with episodic memory replay to balance the training efficiency with the performance of previous tasks. The improved solution SFNet takes advantage of the intrinsic connection between CL and SSL. It uses in-memory past information to help current SSL, while adding high-quality pseudo instances in memory to improve future replay. The experiments on two datasets shows that SFNet outperforms the widely-used SSL-only and CL-only baselines on multiple metrics.", + "primary_area": "speech natural language processing", + "author": "Yongrui Chen; Xinnan Guo; Tongtong Wu; Guilin Qi; Yang Li; Yang Dong", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University + Department of Data Science & AI, Monash University; School of Computer Science and Engineering, Southeast University; Alibaba Group; Ant Group", + "bibtex": "@article{Chen_Guo_Wu_Qi_Li_Dong_2023, title={Learn from Yesterday: A Semi-supervised Continual Learning Method for Supervision-Limited Text-to-SQL Task Streams}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26492}, DOI={10.1609/aaai.v37i11.26492}, abstractNote={Conventional text-to-SQL studies are limited to a single task with a fixed-size training and test set. When confronted with a stream of tasks common in real-world applications, existing methods struggle with the problems of insufficient supervised data and high retraining costs. The former tends to cause overfitting on unseen databases for the new task, while the latter makes a full review of instances from past tasks impractical for the model, resulting in forgetting of learned SQL structures and database schemas. To address the problems, this paper proposes integrating semi-supervised learning (SSL) and continual learning (CL) in a stream of text-to-SQL tasks and offers two promising solutions in turn. The first solution Vanilla is to perform self-training, augmenting the supervised training data with predicted pseudo-labeled instances of the current task, while replacing the full volume retraining with episodic memory replay to balance the training efficiency with the performance of previous tasks. The improved solution SFNet takes advantage of the intrinsic connection between CL and SSL. It uses in-memory past information to help current SSL, while adding high-quality pseudo instances in memory to improve future replay. The experiments on two datasets shows that SFNet outperforms the widely-used SSL-only and CL-only baselines on multiple metrics.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Yongrui and Guo, Xinnan and Wu, Tongtong and Qi, Guilin and Li, Yang and Dong, Yang}, year={2023}, month={Jun.}, pages={12682-12690} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26492/26264", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26492", + "pdf_size": 501174, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7340927814699000067&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "seu.edu.cn;163.com;seu.edu.cn;seu.edu.cn;alibaba-inc.com;gmail.com", + "email": "seu.edu.cn;163.com;seu.edu.cn;seu.edu.cn;alibaba-inc.com;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1;0;2;3", + "aff_unique_norm": "Southeast University;Monash University;Alibaba Group;Ant Group", + "aff_unique_dep": "School of Computer Science and Engineering;Department of Data Science & AI;;", + "aff_unique_url": "https://www.seu.edu.cn/;https://www.monash.edu;https://www.alibaba.com;https://www.antgroup.com", + "aff_unique_abbr": "SEU;Monash;Alibaba;Ant Group", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25446", + "title": "Learnable Blur Kernel for Single-Image Defocus Deblurring in the Wild", + "track": "main", + "status": "Technical", + "abstract": "Recent research showed that the dual-pixel sensor has made great progress in defocus map estimation and image defocus deblurring.\nHowever, extracting real-time dual-pixel views is troublesome and complex in algorithm deployment.\nMoreover, the deblurred image generated by the defocus deblurring network lacks high-frequency details, which is unsatisfactory in human perception. To overcome this issue, we propose a novel defocus deblurring method that uses the guidance of the defocus map to implement image deblurring.\nThe proposed method consists of a learnable blur kernel to estimate the defocus map, which is an unsupervised method, and a single-image defocus deblurring generative adversarial network (DefocusGAN) for the first time.\nThe proposed network can learn the deblurring of different regions and recover realistic details. We propose a defocus adversarial loss to guide this training process.\nCompetitive experimental results confirm that with a learnable blur kernel, the generated defocus map can achieve results comparable to supervised methods.\nIn the single-image defocus deblurring task, the proposed method achieves state-of-the-art results, especially significant improvements in perceptual quality, where PSNR reaches 25.56 dB and LPIPS reaches 0.111.", + "primary_area": "computer vision iii", + "author": "Jucai Zhai; Pengcheng Zeng; Chihao Ma; Jie Chen; Yong Zhao", + "authorids": "", + "aff": "Shenzhen Graduate School, Peking University; Shenzhen Graduate School, Peking University; Shenzhen Graduate School, Peking University; Shenzhen Graduate School, Peking University + Peng Cheng Laborator; Shenzhen Graduate School, Peking University", + "bibtex": "@article{Zhai_Zeng_Ma_Chen_Zhao_2023, title={Learnable Blur Kernel for Single-Image Defocus Deblurring in the Wild}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25446}, DOI={10.1609/aaai.v37i3.25446}, abstractNote={Recent research showed that the dual-pixel sensor has made great progress in defocus map estimation and image defocus deblurring.\nHowever, extracting real-time dual-pixel views is troublesome and complex in algorithm deployment.\nMoreover, the deblurred image generated by the defocus deblurring network lacks high-frequency details, which is unsatisfactory in human perception. To overcome this issue, we propose a novel defocus deblurring method that uses the guidance of the defocus map to implement image deblurring.\nThe proposed method consists of a learnable blur kernel to estimate the defocus map, which is an unsupervised method, and a single-image defocus deblurring generative adversarial network (DefocusGAN) for the first time.\nThe proposed network can learn the deblurring of different regions and recover realistic details. We propose a defocus adversarial loss to guide this training process.\nCompetitive experimental results confirm that with a learnable blur kernel, the generated defocus map can achieve results comparable to supervised methods.\nIn the single-image defocus deblurring task, the proposed method achieves state-of-the-art results, especially significant improvements in perceptual quality, where PSNR reaches 25.56 dB and LPIPS reaches 0.111.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhai, Jucai and Zeng, Pengcheng and Ma, Chihao and Chen, Jie and Zhao, Yong}, year={2023}, month={Jun.}, pages={3384-3392} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25446/25218", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25446", + "pdf_size": 1636492, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3400169255139283050&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pcl.ac.cn;pkusz.edu.cn", + "email": "stu.pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pcl.ac.cn;pkusz.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+1;0", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": "Graduate School;", + "aff_unique_url": "http://www.pku.edu.cn;http://www.pcl.ac.cn", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25969", + "title": "Learnable Path in Neural Controlled Differential Equations", + "track": "main", + "status": "Technical", + "abstract": "Neural controlled differential equations (NCDEs), which are continuous analogues to recurrent neural networks (RNNs), are a specialized model in (irregular) time-series processing. In comparison with similar models, e.g., neural ordinary differential equations (NODEs), the key distinctive characteristics of NCDEs are i) the adoption of the continuous path created by an interpolation algorithm from each raw discrete time-series sample and ii) the adoption of the Riemann--Stieltjes integral. It is the continuous path which makes NCDEs be analogues to continuous RNNs. However, NCDEs use existing interpolation algorithms to create the path, which is unclear whether they can create an optimal path. To this end, we present a method to generate another latent path (rather than relying on existing interpolation algorithms), which is identical to learning an appropriate interpolation method. We design an encoder-decoder module based on NCDEs and NODEs, and a special training method for it. Our method shows the best performance in both time-series classification and forecasting.", + "primary_area": "machine learning ii", + "author": "Sheo Yon Jhin; Minju Jo; Seungji Kook; Noseong Park", + "authorids": "", + "aff": "Yonsei University; Yonsei University; Yonsei University; Yonsei University", + "bibtex": "@article{Jhin_Jo_Kook_Park_2023, title={Learnable Path in Neural Controlled Differential Equations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25969}, DOI={10.1609/aaai.v37i7.25969}, abstractNote={Neural controlled differential equations (NCDEs), which are continuous analogues to recurrent neural networks (RNNs), are a specialized model in (irregular) time-series processing. In comparison with similar models, e.g., neural ordinary differential equations (NODEs), the key distinctive characteristics of NCDEs are i) the adoption of the continuous path created by an interpolation algorithm from each raw discrete time-series sample and ii) the adoption of the Riemann--Stieltjes integral. It is the continuous path which makes NCDEs be analogues to continuous RNNs. However, NCDEs use existing interpolation algorithms to create the path, which is unclear whether they can create an optimal path. To this end, we present a method to generate another latent path (rather than relying on existing interpolation algorithms), which is identical to learning an appropriate interpolation method. We design an encoder-decoder module based on NCDEs and NODEs, and a special training method for it. Our method shows the best performance in both time-series classification and forecasting.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jhin, Sheo Yon and Jo, Minju and Kook, Seungji and Park, Noseong}, year={2023}, month={Jun.}, pages={8014-8022} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25969/25741", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25969", + "pdf_size": 1185546, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7719569368287109145&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Yonsei University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.yonsei.ac.kr", + "aff_unique_abbr": "Yonsei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25831", + "title": "Learnable Spectral Wavelets on Dynamic Graphs to Capture Global Interactions", + "track": "main", + "status": "Technical", + "abstract": "Learning on evolving(dynamic) graphs has caught the attention of researchers as static methods exhibit limited performance in this setting. The existing methods for dynamic graphs learn spatial features by local neighborhood aggregation, which essentially only captures the low pass signals and local interactions. In this work, we go beyond current approaches to incorporate global features for effectively learning representations of a dynamically evolving graph. \nWe propose to do so by capturing the spectrum of the dynamic graph. Since static methods to learn the graph spectrum would not consider the history of the evolution of the spectrum as the graph evolves with time, we propose an approach to learn the graph wavelets to capture this evolving spectra.\nFurther, we propose a framework that integrates the dynamically captured spectra in the form of these learnable wavelets into spatial features for incorporating local and global interactions. Experiments on eight standard datasets show that our method significantly outperforms related methods on various tasks for dynamic graphs.", + "primary_area": "machine learning i", + "author": "Anson Bastos; Abhishek Nadgeri; Kuldeep Singh; Toyotaro Suzumura; Manish Singh", + "authorids": "", + "aff": "Indian Institute of Technology Hyderabad, India; RWTH Aachen, Germany; Zerotha Research + Cerence Gmbh, Germany; The University of Tokyo, Japan; Indian Institute of Technology Hyderabad, India", + "bibtex": "@article{Bastos_Nadgeri_Singh_Suzumura_Singh_2023, title={Learnable Spectral Wavelets on Dynamic Graphs to Capture Global Interactions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25831}, DOI={10.1609/aaai.v37i6.25831}, abstractNote={Learning on evolving(dynamic) graphs has caught the attention of researchers as static methods exhibit limited performance in this setting. The existing methods for dynamic graphs learn spatial features by local neighborhood aggregation, which essentially only captures the low pass signals and local interactions. In this work, we go beyond current approaches to incorporate global features for effectively learning representations of a dynamically evolving graph. We propose to do so by capturing the spectrum of the dynamic graph. Since static methods to learn the graph spectrum would not consider the history of the evolution of the spectrum as the graph evolves with time, we propose an approach to learn the graph wavelets to capture this evolving spectra.\nFurther, we propose a framework that integrates the dynamically captured spectra in the form of these learnable wavelets into spatial features for incorporating local and global interactions. Experiments on eight standard datasets show that our method significantly outperforms related methods on various tasks for dynamic graphs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bastos, Anson and Nadgeri, Abhishek and Singh, Kuldeep and Suzumura, Toyotaro and Singh, Manish}, year={2023}, month={Jun.}, pages={6779-6787} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25831/25603", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25831", + "pdf_size": 388443, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10474930668360328619&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "iith.ac.in;rwth-aachen.de;cerence.com;acm.org;cse.iith.ac.in", + "email": "iith.ac.in;rwth-aachen.de;cerence.com;acm.org;cse.iith.ac.in", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2+3;4;0", + "aff_unique_norm": "Indian Institute of Technology Hyderabad;RWTH Aachen University;Zerotha Research;Cerence GmbH;The University of Tokyo", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.iith.ac.in;https://www.rwth-aachen.de;;https://www.cerence.com;https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "IIT Hyderabad;RWTH;;Cerence;UTokyo", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Hyderabad;", + "aff_country_unique_index": "0;1;1;3;0", + "aff_country_unique": "India;Germany;;Japan" + }, + { + "id": "article-25551", + "title": "Learned Distributed Image Compression with Multi-Scale Patch Matching in Feature Domain", + "track": "main", + "status": "Technical", + "abstract": "Beyond achieving higher compression efficiency over classical image compression codecs, deep image compression is expected to be improved with additional side information, e.g., another image from a different perspective of the same scene. To better utilize the side information under the distributed compression scenario, the existing method only implements patch matching at the image domain to solve the parallax problem caused by the difference in viewing points. However, the patch matching at the image domain is not robust to the variance of scale, shape, and illumination caused by the different viewing angles, and can not make full use of the rich texture information of the side information image. To resolve this issue, we propose Multi-Scale Feature Domain Patch Matching (MSFDPM) to fully utilizes side information at the decoder of the distributed image compression model. Specifically, MSFDPM consists of a side information feature extractor, a multi-scale feature domain patch matching module, and a multi-scale feature fusion network. Furthermore, we reuse inter-patch correlation from the shallow layer to accelerate the patch matching of the deep layer. Finally, we find that our patch matching in a multi-scale feature domain further improves compression rate by about 20% compared with the patch matching method at image domain.", + "primary_area": "data mining and knowledge management", + "author": "Yujun Huang; Bin Chen; Shiyu Qin; Jiawei Li; Yaowei Wang; Tao Dai; Shu-Tao Xia", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University + Research Center of Artificial Intelligence, Peng Cheng Laboratory; Harbin Institute of Technology, Shenzhen + Research Center of Artificial Intelligence, Peng Cheng Laboratory + Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Institute of Technology, Shenzhen; HUAWEI Machine Co., Ltd. DongGuan; Research Center of Artificial Intelligence, Peng Cheng Laboratory; Shenzhen University; Tsinghua Shenzhen International Graduate School, Tsinghua University + Research Center of Artificial Intelligence, Peng Cheng Laboratory", + "bibtex": "@article{Huang_Chen_Qin_Li_Wang_Dai_Xia_2023, title={Learned Distributed Image Compression with Multi-Scale Patch Matching in Feature Domain}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25551}, DOI={10.1609/aaai.v37i4.25551}, abstractNote={Beyond achieving higher compression efficiency over classical image compression codecs, deep image compression is expected to be improved with additional side information, e.g., another image from a different perspective of the same scene. To better utilize the side information under the distributed compression scenario, the existing method only implements patch matching at the image domain to solve the parallax problem caused by the difference in viewing points. However, the patch matching at the image domain is not robust to the variance of scale, shape, and illumination caused by the different viewing angles, and can not make full use of the rich texture information of the side information image. To resolve this issue, we propose Multi-Scale Feature Domain Patch Matching (MSFDPM) to fully utilizes side information at the decoder of the distributed image compression model. Specifically, MSFDPM consists of a side information feature extractor, a multi-scale feature domain patch matching module, and a multi-scale feature fusion network. Furthermore, we reuse inter-patch correlation from the shallow layer to accelerate the patch matching of the deep layer. Finally, we find that our patch matching in a multi-scale feature domain further improves compression rate by about 20% compared with the patch matching method at image domain.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Yujun and Chen, Bin and Qin, Shiyu and Li, Jiawei and Wang, Yaowei and Dai, Tao and Xia, Shu-Tao}, year={2023}, month={Jun.}, pages={4322-4329} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25551/25323", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25551", + "pdf_size": 2521586, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4811857723803040641&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;hit.edu.cn;stu.hit.edu.cn;tsinghua.org.cn;pcl.ac.cn;gmail.com;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;hit.edu.cn;stu.hit.edu.cn;tsinghua.org.cn;pcl.ac.cn;gmail.com;sz.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2+1+3;2;4;1;5;0+1", + "aff_unique_norm": "Tsinghua University;Peng Cheng Laboratory;Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;HUAWEI Machine Co., Ltd.;Shenzhen University", + "aff_unique_dep": "International Graduate School;Research Center of Artificial Intelligence;;Provincial Key Laboratory of Novel Security Intelligence Technologies;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.pcl.ac.cn;http://en.hhit.edu.cn/;;https://www.huawei.com;https://www.szu.edu.cn", + "aff_unique_abbr": "THU;;HIT;;HUAWEI;SZU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0+0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26909", + "title": "Learning Adaptive Game Soundtrack Control", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "In this paper, we demonstrate a novel technique for dynamically generating an emotionally-directed video game soundtrack. We begin with a human Conductor observing gameplay and directing associated emotions that would enhance the observed gameplay experience. We apply supervised learning to data sampled from synchronized input gameplay features and Conductor output emotional direction features in order to fit a mathematical model to the Conductor's emotional direction. Then, during gameplay, the emotional direction model maps gameplay state input to emotional direction output, which is then input to a music generation module that dynamically generates emotionally-relevant music during gameplay. Our empirical study suggests that random forests serve well for modeling the Conductor for our two experimental game genres.", + "primary_area": "", + "author": "Aaron Dorsey; Todd W. Neller; Hien G. Tran; Veysel Yilmaz", + "authorids": "", + "aff": "Gettysburg College; Gettysburg College; Gettysburg College; Gettysburg College", + "bibtex": "@article{Dorsey_Neller_Tran_Yilmaz_2024, title={Learning Adaptive Game Soundtrack Control}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26909}, DOI={10.1609/aaai.v37i13.26909}, abstractNote={In this paper, we demonstrate a novel technique for dynamically generating an emotionally-directed video game soundtrack. We begin with a human Conductor observing gameplay and directing associated emotions that would enhance the observed gameplay experience. We apply supervised learning to data sampled from synchronized input gameplay features and Conductor output emotional direction features in order to fit a mathematical model to the Conductor\u2019s emotional direction. Then, during gameplay, the emotional direction model maps gameplay state input to emotional direction output, which is then input to a music generation module that dynamically generates emotionally-relevant music during gameplay. Our empirical study suggests that random forests serve well for modeling the Conductor for our two experimental game genres.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dorsey, Aaron and Neller, Todd W. and Tran, Hien G. and Yilmaz, Veysel}, year={2024}, month={Jul.}, pages={16070-16077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26909/26681", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26909", + "pdf_size": 692074, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:6HETd4p_lZsJ:scholar.google.com/&scioq=Learning+Adaptive+Game+Soundtrack+Control&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "gettysburg.edu;gettysburg.edu;gettysburg.edu;gettysburg.edu", + "email": "gettysburg.edu;gettysburg.edu;gettysburg.edu;gettysburg.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Gettysburg College", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gettysburg.edu", + "aff_unique_abbr": "Gettysburg College", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26027", + "title": "Learning Adversarially Robust Sparse Networks via Weight Reparameterization", + "track": "main", + "status": "Technical", + "abstract": "Although increasing model size can enhance the adversarial robustness of deep neural networks, in resource-constrained environments, there exist critical sparsity constraints. While the recent robust pruning technologies show promising direction to obtain adversarially robust sparse networks, they perform poorly with high sparsity. In this work, we bridge this performance gap by reparameterizing network parameters to simultaneously learn the sparse structure and the robustness. Specifically, we introduce Twin-Rep, which reparameterizes original weights into the product of two factors during training and performs pruning on the reparameterized weights to satisfy the target sparsity constraint. Twin-Rep implicitly adds the sparsity constraint without changing the robust training objective, thus can enhance robustness under high sparsity. We also introduce another variant of weight reparameterization for better channel pruning. When inferring, we restore the original weight structure to obtain compact and robust networks. Extensive experiments on diverse datasets demonstrate that our method achieves state-of-the-art results, outperforming the current sparse robust training method and robustness-aware pruning method. Our code is available at\nhttps://github.com/UCAS-LCH/Twin-Rep.", + "primary_area": "machine learning ii", + "author": "Chenhao Li; Qiang Qiu; Zhibin Zhang; Jiafeng Guo; Xueqi Cheng", + "authorids": "", + "aff": "CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China + University of Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Li_Qiu_Zhang_Guo_Cheng_2023, title={Learning Adversarially Robust Sparse Networks via Weight Reparameterization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26027}, DOI={10.1609/aaai.v37i7.26027}, abstractNote={Although increasing model size can enhance the adversarial robustness of deep neural networks, in resource-constrained environments, there exist critical sparsity constraints. While the recent robust pruning technologies show promising direction to obtain adversarially robust sparse networks, they perform poorly with high sparsity. In this work, we bridge this performance gap by reparameterizing network parameters to simultaneously learn the sparse structure and the robustness. Specifically, we introduce Twin-Rep, which reparameterizes original weights into the product of two factors during training and performs pruning on the reparameterized weights to satisfy the target sparsity constraint. Twin-Rep implicitly adds the sparsity constraint without changing the robust training objective, thus can enhance robustness under high sparsity. We also introduce another variant of weight reparameterization for better channel pruning. When inferring, we restore the original weight structure to obtain compact and robust networks. Extensive experiments on diverse datasets demonstrate that our method achieves state-of-the-art results, outperforming the current sparse robust training method and robustness-aware pruning method. Our code is available at\nhttps://github.com/UCAS-LCH/Twin-Rep.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Chenhao and Qiu, Qiang and Zhang, Zhibin and Guo, Jiafeng and Cheng, Xueqi}, year={2023}, month={Jun.}, pages={8527-8535} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26027/25799", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26027", + "pdf_size": 237149, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2170486259102094146&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/UCAS-LCH/Twin-Rep", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.ac.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0+0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26883", + "title": "Learning Affects Trust: Design Recommendations and Concepts for Teaching Children\u2014and Nearly Anyone\u2014about Conversational Agents", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "Conversational agents are rapidly becoming commonplace. However, since these systems are typically blackboxed, users\u2014including vulnerable populations, like children\u2014often do not understand them deeply. For example, they might assume agents are overly intelligent, leading to frustration and distrust. Users may also overtrust agents, and thus overshare personal information or rely heavily on agents' advice. Despite this, little research investigates users' perceptions of conversational agents in-depth, and even less investigates how education might change these perceptions to be more healthy. We present workshops with associated educational conversational AI concepts to encourage healthier understanding of agents. Through studies with the curriculum with children and parents from various countries, we found participants' perceptions of agents\u2014specifically their partner models and trust\u2014changed. When participants discussed changes in trust of agents, we found they most often mentioned learning something. For example, they frequently mentioned learning where agents obtained information, what agents do with this information and how agents are programmed. Based on the results, we developed recommendations for teaching conversational agent concepts, including emphasizing the concepts students found most challenging, like training, turn-taking and terminology; supplementing agent development activities with related learning activities; fostering appropriate levels of trust towards agents; and fostering accurate partner models of agents. Through such pedagogy, students can learn to better understand conversational AI and what it means to have it in the world.", + "primary_area": "", + "author": "Jessica Van Brummelen; Mingyan Claire Tian; Maura Kelleher; Nghi Hoang Nguyen", + "authorids": "", + "aff": "Massachusetts Institute of Technology; Wellesley College; Massachusetts Institute of Technology; Massachusetts Institute of Technology", + "bibtex": "@article{Van Brummelen_Tian_Kelleher_Nguyen_2024, title={Learning Affects Trust: Design Recommendations and Concepts for Teaching Children\u2014and Nearly Anyone\u2014about Conversational Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26883}, DOI={10.1609/aaai.v37i13.26883}, abstractNote={Conversational agents are rapidly becoming commonplace. However, since these systems are typically blackboxed, users\u2014including vulnerable populations, like children\u2014often do not understand them deeply. For example, they might assume agents are overly intelligent, leading to frustration and distrust. Users may also overtrust agents, and thus overshare personal information or rely heavily on agents\u2019 advice. Despite this, little research investigates users\u2019 perceptions of conversational agents in-depth, and even less investigates how education might change these perceptions to be more healthy. We present workshops with associated educational conversational AI concepts to encourage healthier understanding of agents. Through studies with the curriculum with children and parents from various countries, we found participants\u2019 perceptions of agents\u2014specifically their partner models and trust\u2014changed. When participants discussed changes in trust of agents, we found they most often mentioned learning something. For example, they frequently mentioned learning where agents obtained information, what agents do with this information and how agents are programmed. Based on the results, we developed recommendations for teaching conversational agent concepts, including emphasizing the concepts students found most challenging, like training, turn-taking and terminology; supplementing agent development activities with related learning activities; fostering appropriate levels of trust towards agents; and fostering accurate partner models of agents. Through such pedagogy, students can learn to better understand conversational AI and what it means to have it in the world.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Van Brummelen, Jessica and Tian, Mingyan Claire and Kelleher, Maura and Nguyen, Nghi Hoang}, year={2024}, month={Jul.}, pages={15860-15868} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26883/26655", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26883", + "pdf_size": 1102202, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1428299140743906136&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff_domain": "csail.mit.edu;wellesley.edu;mit.edu;mit.edu", + "email": "csail.mit.edu;wellesley.edu;mit.edu;mit.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Wellesley College", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.wellesley.edu", + "aff_unique_abbr": "MIT;Wellesley", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26927", + "title": "Learning Better Representations Using Auxiliary Knowledge", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Representation Learning is the core of Machine Learning and Artificial Intelligence as it summarizes input data points into low dimensional vectors. This low dimensional vectors should be accurate portrayals of the input data, thus it is crucial to find the most effective and robust representation possible for given input as the performance of the ML task is dependent on the resulting representations. In this summary, we discuss an approach to augment representation learning which relies on external knowledge. We briefly describe the shortcoming of the existing techniques and describe how an auxiliary knowledge source could result in obtaining improved representations.", + "primary_area": "", + "author": "Saed Rezayi", + "authorids": "", + "aff": "University of Georgia, Department of Computer Science", + "bibtex": "@article{Rezayi_2024, title={Learning Better Representations Using Auxiliary Knowledge}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26927}, DOI={10.1609/aaai.v37i13.26927}, abstractNote={Representation Learning is the core of Machine Learning and Artificial Intelligence as it summarizes input data points into low dimensional vectors. This low dimensional vectors should be accurate portrayals of the input data, thus it is crucial to find the most effective and robust representation possible for given input as the performance of the ML task is dependent on the resulting representations. In this summary, we discuss an approach to augment representation learning which relies on external knowledge. We briefly describe the shortcoming of the existing techniques and describe how an auxiliary knowledge source could result in obtaining improved representations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rezayi, Saed}, year={2024}, month={Jul.}, pages={16133-16134} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26927/26699", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26927", + "pdf_size": 55904, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13302233567050847701&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "uga.edu", + "email": "uga.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Georgia", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.uga.edu", + "aff_unique_abbr": "UGA", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25640", + "title": "Learning Chemical Rules of Retrosynthesis with Pre-training", + "track": "main", + "status": "Technical", + "abstract": "Retrosynthesis aided by artificial intelligence has been a very active and bourgeoning area of research, for its critical role in drug discovery as well as material science. Three categories of solutions, i.e., template-based, template-free, and semi-template methods, constitute mainstream solutions to this problem. In this paper, we focus on template-free methods which are known to be less bothered by the template generalization issue and the atom mapping challenge. Among several remaining problems regarding template-free methods, failing to conform to chemical rules is pronounced. To address the issue, we seek for a pre-training solution to empower the pre-trained model with chemical rules encoded. Concretely, we enforce the atom conservation rule via a molecule reconstruction pre-training task, and the reaction rule that dictates reaction centers via a reaction type guided contrastive pre-training task. In our empirical evaluation, the proposed pre-training solution substantially improves the single-step retrosynthesis accuracies in three downstream datasets.", + "primary_area": "domain s of application", + "author": "Yinjie Jiang; Ying WEI; Fei Wu; Zhengxing Huang; Kun Kuang; Zhihua Wang", + "authorids": "", + "aff": "Zhejiang University; City University of Hong Kong; Zhejiang University+Shanghai Institute for Advanced Study of Zhejiang University; Zhejiang University; Zhejiang University; Shanghai Institute for Advanced Study of Zhejiang University", + "bibtex": "@article{Jiang_WEI_Wu_Huang_Kuang_Wang_2023, title={Learning Chemical Rules of Retrosynthesis with Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25640}, DOI={10.1609/aaai.v37i4.25640}, abstractNote={Retrosynthesis aided by artificial intelligence has been a very active and bourgeoning area of research, for its critical role in drug discovery as well as material science. Three categories of solutions, i.e., template-based, template-free, and semi-template methods, constitute mainstream solutions to this problem. In this paper, we focus on template-free methods which are known to be less bothered by the template generalization issue and the atom mapping challenge. Among several remaining problems regarding template-free methods, failing to conform to chemical rules is pronounced. To address the issue, we seek for a pre-training solution to empower the pre-trained model with chemical rules encoded. Concretely, we enforce the atom conservation rule via a molecule reconstruction pre-training task, and the reaction rule that dictates reaction centers via a reaction type guided contrastive pre-training task. In our empirical evaluation, the proposed pre-training solution substantially improves the single-step retrosynthesis accuracies in three downstream datasets.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Yinjie and WEI, Ying and Wu, Fei and Huang, Zhengxing and Kuang, Kun and Wang, Zhihua}, year={2023}, month={Jun.}, pages={5113-5121} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25640/25412", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25640", + "pdf_size": 451106, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14258641528693288845&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;cityu.edu.hk;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;cityu.edu.hk;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0+0;0;0;0", + "aff_unique_norm": "Zhejiang University;City University of Hong Kong", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.zju.edu.cn;https://www.cityu.edu.hk", + "aff_unique_abbr": "ZJU;CityU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26044", + "title": "Learning Compact Features via In-Training Representation Alignment", + "track": "main", + "status": "Technical", + "abstract": "Deep neural networks (DNNs) for supervised learning can be viewed as a pipeline of the feature extractor (i.e., last hidden layer) and a linear classifier (i.e., output layer) that are trained jointly with stochastic gradient descent (SGD) on the loss function (e.g., cross-entropy). In each epoch, the true gradient of the loss function is estimated using a mini-batch sampled from the training set and model parameters are then updated with the mini-batch gradients. Although the latter provides an unbiased estimation of the former, they are subject to substantial variances derived from the size and number of sampled mini-batches, leading to noisy and jumpy updates. To stabilize such undesirable variance in estimating the true gradients, we propose In-Training Representation Alignment (ITRA) that explicitly aligns feature distributions of two different mini-batches with a matching loss in the SGD training process. We also provide a rigorous analysis of the desirable effects of the matching loss on feature representation learning: (1) extracting compact feature representation; (2) reducing over-adaption on mini-batches via an adaptively weighting mechanism; and (3) accommodating to multi-modalities. Finally, we conduct large-scale experiments on both image and text classifications to demonstrate its superior performance to the strong baselines.", + "primary_area": "machine learning ii", + "author": "Xin Li; Xiangrui Li; Deng Pan; Yao Qiang; Dongxiao Zhu", + "authorids": "", + "aff": "Department of Computer Science, Wayne State University; Department of Computer Science, Wayne State University; Department of Computer Science, Wayne State University; Department of Computer Science, Wayne State University; Department of Computer Science, Wayne State University", + "bibtex": "@article{Li_Li_Pan_Qiang_Zhu_2023, title={Learning Compact Features via In-Training Representation Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26044}, DOI={10.1609/aaai.v37i7.26044}, abstractNote={Deep neural networks (DNNs) for supervised learning can be viewed as a pipeline of the feature extractor (i.e., last hidden layer) and a linear classifier (i.e., output layer) that are trained jointly with stochastic gradient descent (SGD) on the loss function (e.g., cross-entropy). In each epoch, the true gradient of the loss function is estimated using a mini-batch sampled from the training set and model parameters are then updated with the mini-batch gradients. Although the latter provides an unbiased estimation of the former, they are subject to substantial variances derived from the size and number of sampled mini-batches, leading to noisy and jumpy updates. To stabilize such undesirable variance in estimating the true gradients, we propose In-Training Representation Alignment (ITRA) that explicitly aligns feature distributions of two different mini-batches with a matching loss in the SGD training process. We also provide a rigorous analysis of the desirable effects of the matching loss on feature representation learning: (1) extracting compact feature representation; (2) reducing over-adaption on mini-batches via an adaptively weighting mechanism; and (3) accommodating to multi-modalities. Finally, we conduct large-scale experiments on both image and text classifications to demonstrate its superior performance to the strong baselines.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xin and Li, Xiangrui and Pan, Deng and Qiang, Yao and Zhu, Dongxiao}, year={2023}, month={Jun.}, pages={8675-8683} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26044/25816", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26044", + "pdf_size": 899261, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5787931456485386460&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "wayne.edu;wayne.edu;wayne.edu;wayne.edu;wayne.edu", + "email": "wayne.edu;wayne.edu;wayne.edu;wayne.edu;wayne.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Wayne State University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://wayne.edu", + "aff_unique_abbr": "WSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26561", + "title": "Learning Compositional Tasks from Language Instructions", + "track": "main", + "status": "Technical", + "abstract": "The ability to combine learned knowledge and skills to solve novel tasks is a key aspect of generalization in humans that allows us to understand and perform tasks described by novel language utterances. While progress has been made in supervised learning settings, no work has yet studied compositional generalization of a reinforcement learning agent following natural language instructions in an embodied environment. We develop a set of tasks in a photo-realistic simulated kitchen environment that allow us to study the degree to which a behavioral policy captures the systematicity in language by studying its zero-shot generalization performance on held out natural language instructions. We show that our agent which leverages a novel additive action-value decomposition in tandem with attention based subgoal prediction is able to exploit composition in text instructions to generalize to unseen tasks.", + "primary_area": "speech natural language processing", + "author": "Lajanugen Logeswaran; Wilka Carvalho; Honglak Lee", + "authorids": "", + "aff": "LG AI Research; University of Michigan, Ann Arbor; LG AI Research+University of Michigan, Ann Arbor", + "bibtex": "@article{Logeswaran_Carvalho_Lee_2023, title={Learning Compositional Tasks from Language Instructions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26561}, DOI={10.1609/aaai.v37i11.26561}, abstractNote={The ability to combine learned knowledge and skills to solve novel tasks is a key aspect of generalization in humans that allows us to understand and perform tasks described by novel language utterances. While progress has been made in supervised learning settings, no work has yet studied compositional generalization of a reinforcement learning agent following natural language instructions in an embodied environment. We develop a set of tasks in a photo-realistic simulated kitchen environment that allow us to study the degree to which a behavioral policy captures the systematicity in language by studying its zero-shot generalization performance on held out natural language instructions. We show that our agent which leverages a novel additive action-value decomposition in tandem with attention based subgoal prediction is able to exploit composition in text instructions to generalize to unseen tasks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Logeswaran, Lajanugen and Carvalho, Wilka and Lee, Honglak}, year={2023}, month={Jun.}, pages={13300-13308} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26561/26333", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26561", + "pdf_size": 2740113, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9815298726683039852&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "lgresearch.ai; ; ", + "email": "lgresearch.ai; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "LG AI Research;University of Michigan", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.lgaires.com;https://www.umich.edu", + "aff_unique_abbr": "LG AI;UM", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Ann Arbor", + "aff_country_unique_index": "0;1;0+1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-26312", + "title": "Learning Conflict-Noticed Architecture for Multi-Task Learning", + "track": "main", + "status": "Technical", + "abstract": "Multi-task learning has been widely used in many applications to enable more efficient learning by sharing part of the architecture across multiple tasks. However, a major challenge is the gradient conflict when optimizing the shared parameters, where the gradients of different tasks could have opposite directions. Directly averaging those gradients will impair the performance of some tasks and cause negative transfer. Different from most existing works that manipulate gradients to mitigate the gradient conflict, in this paper, we address this problem from the perspective of architecture learning and propose a Conflict-Noticed Architecture Learning (CoNAL) method to alleviate the gradient conflict by learning architectures. By introducing purely-specific modules specific to each task in the search space, the CoNAL method can automatically learn when to switch to purely-specific modules in the tree-structured network architectures when the gradient conflict occurs. To handle multi-task problems with a large number of tasks, we propose a progressive extension of the CoNAL method. Extensive experiments on computer vision, natural language processing, and reinforcement learning benchmarks demonstrate the effectiveness of the proposed methods.", + "primary_area": "machine learning iv", + "author": "Zhixiong Yue; Yu Zhang; Jie Liang", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China; Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen, China + University of Technology Sydney; University of Technology Sydney", + "bibtex": "@article{Yue_Zhang_Liang_2023, title={Learning Conflict-Noticed Architecture for Multi-Task Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26312}, DOI={10.1609/aaai.v37i9.26312}, abstractNote={Multi-task learning has been widely used in many applications to enable more efficient learning by sharing part of the architecture across multiple tasks. However, a major challenge is the gradient conflict when optimizing the shared parameters, where the gradients of different tasks could have opposite directions. Directly averaging those gradients will impair the performance of some tasks and cause negative transfer. Different from most existing works that manipulate gradients to mitigate the gradient conflict, in this paper, we address this problem from the perspective of architecture learning and propose a Conflict-Noticed Architecture Learning (CoNAL) method to alleviate the gradient conflict by learning architectures. By introducing purely-specific modules specific to each task in the search space, the CoNAL method can automatically learn when to switch to purely-specific modules in the tree-structured network architectures when the gradient conflict occurs. To handle multi-task problems with a large number of tasks, we propose a progressive extension of the CoNAL method. Extensive experiments on computer vision, natural language processing, and reinforcement learning benchmarks demonstrate the effectiveness of the proposed methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yue, Zhixiong and Zhang, Yu and Liang, Jie}, year={2023}, month={Jun.}, pages={11078-11086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26312/26084", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26312", + "pdf_size": 291505, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16342390770405516434&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.sustech.edu.cn;gmail.com;uts.edu.au", + "email": "mail.sustech.edu.cn;gmail.com;uts.edu.au", + "github": "https://github.com/yuezhixiong/CoNAL", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+2;2", + "aff_unique_norm": "Southern University of Science and Technology;Peng Cheng Laboratory;University of Technology Sydney", + "aff_unique_dep": "Department of Computer Science and Engineering;;", + "aff_unique_url": "https://www.sustech.edu.cn;;https://www.uts.edu.au", + "aff_unique_abbr": "SUSTech;;UTS", + "aff_campus_unique_index": "0+0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+1;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25340", + "title": "Learning Context-Aware Classifier for Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Semantic segmentation is still a challenging task for parsing diverse contexts in different scenes, thus the fixed classifier might not be able to well address varying feature distributions during testing. Different from the mainstream literature where the efficacy of strong backbones and effective decoder heads has been well studied, in this paper, additional contextual hints are instead exploited via learning a context-aware classifier whose content is data-conditioned, decently adapting to different latent distributions. Since only the classifier is dynamically altered, our method is model-agnostic and can be easily applied to generic segmentation models. Notably, with only negligible additional parameters and +2\\% inference time, decent performance gain has been achieved on both small and large models with challenging benchmarks, manifesting substantial practical merits brought by our simple yet effective method. The implementation is available at https://github.com/tianzhuotao/CAC.", + "primary_area": "computer vision ii", + "author": "Zhuotao Tian; Jiequan Cui; Li Jiang; Xiaojuan Qi; Xin Lai; Yixin Chen; Shu Liu; Jiaya Jia", + "authorids": "", + "aff": "The Chinese University of Hong Kong; The Chinese University of Hong Kong; Max Planck Institute for Informatics; The University of Hong Kong; The Chinese University of Hong Kong + SmartMore Corporation; The Chinese University of Hong Kong; SmartMore Corporation; The Chinese University of Hong Kong + SmartMore Corporation", + "bibtex": "@article{Tian_Cui_Jiang_Qi_Lai_Chen_Liu_Jia_2023, title={Learning Context-Aware Classifier for Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25340}, DOI={10.1609/aaai.v37i2.25340}, abstractNote={Semantic segmentation is still a challenging task for parsing diverse contexts in different scenes, thus the fixed classifier might not be able to well address varying feature distributions during testing. Different from the mainstream literature where the efficacy of strong backbones and effective decoder heads has been well studied, in this paper, additional contextual hints are instead exploited via learning a context-aware classifier whose content is data-conditioned, decently adapting to different latent distributions. Since only the classifier is dynamically altered, our method is model-agnostic and can be easily applied to generic segmentation models. Notably, with only negligible additional parameters and +2\\% inference time, decent performance gain has been achieved on both small and large models with challenging benchmarks, manifesting substantial practical merits brought by our simple yet effective method. The implementation is available at https://github.com/tianzhuotao/CAC.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Zhuotao and Cui, Jiequan and Jiang, Li and Qi, Xiaojuan and Lai, Xin and Chen, Yixin and Liu, Shu and Jia, Jiaya}, year={2023}, month={Jun.}, pages={2438-2446} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25340/25112", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25340", + "pdf_size": 1501105, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2463954128258858526&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/tianzhuotao/CAC", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;0+3;0;3;0+3", + "aff_unique_norm": "The Chinese University of Hong Kong;Max Planck Institute for Informatics;The University of Hong Kong;SmartMore Corporation", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cuhk.edu.hk;https://mpi-inf.mpg.de;https://www.hku.hk;https://www.smartmore.com/", + "aff_unique_abbr": "CUHK;MPII;HKU;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0+0;0;0;0+0", + "aff_country_unique": "China;Germany" + }, + { + "id": "article-25369", + "title": "Learning Continuous Depth Representation via Geometric Spatial Aggregator", + "track": "main", + "status": "Technical", + "abstract": "Depth map super-resolution (DSR) has been a fundamental task for 3D computer vision. While arbitrary scale DSR is a more realistic setting in this scenario, previous approaches predominantly suffer from the issue of inefficient real-numbered scale upsampling. To explicitly address this issue, we propose a novel continuous depth representation for DSR. The heart of this representation is our proposed Geometric Spatial Aggregator (GSA), which exploits a distance field modulated by arbitrarily upsampled target gridding, through which the geometric information is explicitly introduced into feature aggregation and target generation. Furthermore, bricking with GSA, we present a transformer-style backbone named GeoDSR, which possesses a principled way to construct the functional mapping between local coordinates and the high-resolution output results, empowering our model with the advantage of arbitrary shape transformation ready to help diverse zooming demand. Extensive experimental results on standard depth map benchmarks, e.g., NYU v2, have demonstrated that the proposed framework achieves significant restoration gain in arbitrary scale depth map super-resolution compared with the prior art. Our codes are available at https://github.com/nana01219/GeoDSR.", + "primary_area": "computer vision iii", + "author": "Xiaohang Wang; Xuanhong Chen; Bingbing Ni; Zhengyan Tong; Hang Wang", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Wang_Chen_Ni_Tong_Wang_2023, title={Learning Continuous Depth Representation via Geometric Spatial Aggregator}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25369}, DOI={10.1609/aaai.v37i3.25369}, abstractNote={Depth map super-resolution (DSR) has been a fundamental task for 3D computer vision. While arbitrary scale DSR is a more realistic setting in this scenario, previous approaches predominantly suffer from the issue of inefficient real-numbered scale upsampling. To explicitly address this issue, we propose a novel continuous depth representation for DSR. The heart of this representation is our proposed Geometric Spatial Aggregator (GSA), which exploits a distance field modulated by arbitrarily upsampled target gridding, through which the geometric information is explicitly introduced into feature aggregation and target generation. Furthermore, bricking with GSA, we present a transformer-style backbone named GeoDSR, which possesses a principled way to construct the functional mapping between local coordinates and the high-resolution output results, empowering our model with the advantage of arbitrary shape transformation ready to help diverse zooming demand. Extensive experimental results on standard depth map benchmarks, e.g., NYU v2, have demonstrated that the proposed framework achieves significant restoration gain in arbitrary scale depth map super-resolution compared with the prior art. Our codes are available at https://github.com/nana01219/GeoDSR.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xiaohang and Chen, Xuanhong and Ni, Bingbing and Tong, Zhengyan and Wang, Hang}, year={2023}, month={Jun.}, pages={2698-2706} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25369/25141", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25369", + "pdf_size": 1834519, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10588992635576257661&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "github": "https://github.com/nana01219/GeoDSR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26407", + "title": "Learning Control Policies for Stochastic Systems with Reach-Avoid Guarantees", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold p in [0,1] over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on 3 stochastic non-linear reinforcement learning tasks.", + "primary_area": "philosophy and ethics of ai", + "author": "\u0110or\u0111e \u017dikeli\u0107; Mathias Lechner; Thomas A. Henzinger; Krishnendu Chatterjee", + "authorids": "", + "aff": "Institute of Science and Technology Austria (ISTA); Massachusetts Institute of Technology (MIT); Institute of Science and Technology Austria (ISTA); Institute of Science and Technology Austria (ISTA)", + "bibtex": "@article{\u017dikeli\u0107_Lechner_Henzinger_Chatterjee_2023, title={Learning Control Policies for Stochastic Systems with Reach-Avoid Guarantees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26407}, DOI={10.1609/aaai.v37i10.26407}, abstractNote={We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold p in [0,1] over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on 3 stochastic non-linear reinforcement learning tasks.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={\u017dikeli\u0107, \u0110or\u0111e and Lechner, Mathias and Henzinger, Thomas A. and Chatterjee, Krishnendu}, year={2023}, month={Jun.}, pages={11926-11935} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26407/26179", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26407", + "pdf_size": 346524, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=488398558393450220&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ist.ac.at;mit.edu;ist.ac.at;ist.ac.at", + "email": "ist.ac.at;mit.edu;ist.ac.at;ist.ac.at", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Institute of Science and Technology Austria;Massachusetts Institute of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ista.ac.at;https://web.mit.edu", + "aff_unique_abbr": "ISTA;MIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Austria;United States" + }, + { + "id": "article-25915", + "title": "Learning Decomposed Spatial Relations for Multi-Variate Time-Series Modeling", + "track": "main", + "status": "Technical", + "abstract": "Modeling multi-variate time-series (MVTS) data is a long-standing research subject and has found wide applications. Recently, there is a surge of interest in modeling spatial relations between variables as graphs, i.e., first learning one static graph for each dataset and then exploiting the graph structure via graph neural networks. However, as spatial relations may differ substantially across samples, building one static graph for all the samples inherently limits flexibility and severely degrades the performance in practice. To address this issue, we propose a framework for fine-grained modeling and utilization of spatial correlation between variables. By analyzing the statistical properties of real-world datasets, a universal decomposition of spatial correlation graphs is first identified. Specifically, the hidden spatial relations can be decomposed into a prior part, which applies across all the samples, and a dynamic part, which varies between samples, and building different graphs is necessary to model these relations. To better coordinate the learning of the two relational graphs, we propose a min-max learning paradigm that not only regulates the common part of different dynamic graphs but also guarantees spatial distinguishability among samples. The experimental results show that our proposed model outperforms the state-of-the-art baseline methods on both time-series forecasting and time-series point prediction tasks.", + "primary_area": "machine learning i", + "author": "Yuchen Fang; Kan Ren; Caihua Shan; Yifei Shen; You Li; Weinan Zhang; Yong Yu; Dongsheng Li", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Central South University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Microsoft Research Asia", + "bibtex": "@article{Fang_Ren_Shan_Shen_Li_Zhang_Yu_Li_2023, title={Learning Decomposed Spatial Relations for Multi-Variate Time-Series Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25915}, DOI={10.1609/aaai.v37i6.25915}, abstractNote={Modeling multi-variate time-series (MVTS) data is a long-standing research subject and has found wide applications. Recently, there is a surge of interest in modeling spatial relations between variables as graphs, i.e., first learning one static graph for each dataset and then exploiting the graph structure via graph neural networks. However, as spatial relations may differ substantially across samples, building one static graph for all the samples inherently limits flexibility and severely degrades the performance in practice. To address this issue, we propose a framework for fine-grained modeling and utilization of spatial correlation between variables. By analyzing the statistical properties of real-world datasets, a universal decomposition of spatial correlation graphs is first identified. Specifically, the hidden spatial relations can be decomposed into a prior part, which applies across all the samples, and a dynamic part, which varies between samples, and building different graphs is necessary to model these relations. To better coordinate the learning of the two relational graphs, we propose a min-max learning paradigm that not only regulates the common part of different dynamic graphs but also guarantees spatial distinguishability among samples. The experimental results show that our proposed model outperforms the state-of-the-art baseline methods on both time-series forecasting and time-series point prediction tasks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Yuchen and Ren, Kan and Shan, Caihua and Shen, Yifei and Li, You and Zhang, Weinan and Yu, Yong and Li, Dongsheng}, year={2023}, month={Jun.}, pages={7530-7538} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25915/25687", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25915", + "pdf_size": 1141667, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1579383834495934714&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sjtu.edu.cn;microsoft.com; ; ; ;sjtu.edu.cn; ; ", + "email": "sjtu.edu.cn;microsoft.com; ; ; ;sjtu.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;2;0;0;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Microsoft Research;Central South University", + "aff_unique_dep": ";Research;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.csu.edu.cn", + "aff_unique_abbr": "SJTU;MSR Asia;CSU", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25749", + "title": "Learning Deep Hierarchical Features with Spatial Regularization for One-Class Facial Expression Recognition", + "track": "main", + "status": "Technical", + "abstract": "Existing methods on facial expression recognition (FER) are mainly trained in the setting when multi-class data is available. However, to detect the alien expressions that are absent during training, this type of methods cannot work. To address this problem, we develop a Hierarchical Spatial One Class Facial Expression Recognition Network (HS-OCFER) which can construct the decision boundary of a given expression class (called normal class) by training on only one-class data. Specifically, HS-OCFER consists of three novel components. First, hierarchical bottleneck modules are proposed to enrich the representation power of the model and extract detailed feature hierarchy from different levels. Second, multi-scale spatial regularization with facial geometric information is employed to guide the feature extraction towards emotional facial representations and prevent the model from overfitting extraneous disturbing factors. Third, compact intra-class variation is adopted to separate the normal class from alien classes in the decision space. Extensive evaluations on 4 typical FER datasets from both laboratory and wild scenarios show that our method consistently outperforms state-of-the-art One-Class Classification (OCC) approaches.", + "primary_area": "humans and ai", + "author": "Bingjun Luo; Junjie Zhu; Tianyu Yang; Sicheng Zhao; Chao Hu; Xibin Zhao; Yue Gao", + "authorids": "", + "aff": "BNRist, KLISS, School of Software, Tsinghua University; BNRist, KLISS, School of Software, Tsinghua University; BNRist, KLISS, School of Software, Tsinghua University; BNRist, Tsinghua University; Central South University; BNRist, KLISS, School of Software, Tsinghua University; BNRist, KLISS, School of Software, Tsinghua University", + "bibtex": "@article{Luo_Zhu_Yang_Zhao_Hu_Zhao_Gao_2023, title={Learning Deep Hierarchical Features with Spatial Regularization for One-Class Facial Expression Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25749}, DOI={10.1609/aaai.v37i5.25749}, abstractNote={Existing methods on facial expression recognition (FER) are mainly trained in the setting when multi-class data is available. However, to detect the alien expressions that are absent during training, this type of methods cannot work. To address this problem, we develop a Hierarchical Spatial One Class Facial Expression Recognition Network (HS-OCFER) which can construct the decision boundary of a given expression class (called normal class) by training on only one-class data. Specifically, HS-OCFER consists of three novel components. First, hierarchical bottleneck modules are proposed to enrich the representation power of the model and extract detailed feature hierarchy from different levels. Second, multi-scale spatial regularization with facial geometric information is employed to guide the feature extraction towards emotional facial representations and prevent the model from overfitting extraneous disturbing factors. Third, compact intra-class variation is adopted to separate the normal class from alien classes in the decision space. Extensive evaluations on 4 typical FER datasets from both laboratory and wild scenarios show that our method consistently outperforms state-of-the-art One-Class Classification (OCC) approaches.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Bingjun and Zhu, Junjie and Yang, Tianyu and Zhao, Sicheng and Hu, Chao and Zhao, Xibin and Gao, Yue}, year={2023}, month={Jun.}, pages={6065-6073} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25749/25521", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25749", + "pdf_size": 1969147, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4877853873561641219&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;mails.tsinghua.edu.cn;gmail.com;gmail.com;csu.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "gmail.com;mails.tsinghua.edu.cn;gmail.com;gmail.com;csu.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;0;0", + "aff_unique_norm": "Tsinghua University;Central South University", + "aff_unique_dep": "School of Software;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.csu.edu.cn", + "aff_unique_abbr": "THU;CSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26291", + "title": "Learning Dynamic Latent Spaces for Lifelong Generative Modelling", + "track": "main", + "status": "Technical", + "abstract": "Task Free Continual Learning (TFCL) aims to capture novel concepts from non-stationary data streams without forgetting previously learned knowledge. Mixture models, which add new components when certain conditions are met, have shown promising results in TFCL tasks. However, such approaches do not make use of the knowledge already accumulated for positive knowledge transfer. In this paper, we develop a new model, namely the Online Recursive Variational Autoencoder (ORVAE). ORVAE utilizes the prior knowledge by selectively incorporating the newly learnt information, by adding new components, according to the knowledge already known from the past learnt data. We introduce a new attention mechanism to regularize the structural latent space in which the most important information is reused while the information that interferes with novel samples is inactivated. The proposed attention mechanism can maximize the benefit from the forward transfer for learning novel information without forgetting previously learnt knowledge. We perform several experiments which show that ORVAE achieves state-of-the-art results under TFCL.", + "primary_area": "machine learning iv", + "author": "Fei Ye; Adrian G. Bors", + "authorids": "", + "aff": "Department of Computer Science, University of York, York YO10 5GH, UK; Department of Computer Science, University of York, York YO10 5GH, UK", + "bibtex": "@article{Ye_Bors_2023, title={Learning Dynamic Latent Spaces for Lifelong Generative Modelling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26291}, DOI={10.1609/aaai.v37i9.26291}, abstractNote={Task Free Continual Learning (TFCL) aims to capture novel concepts from non-stationary data streams without forgetting previously learned knowledge. Mixture models, which add new components when certain conditions are met, have shown promising results in TFCL tasks. However, such approaches do not make use of the knowledge already accumulated for positive knowledge transfer. In this paper, we develop a new model, namely the Online Recursive Variational Autoencoder (ORVAE). ORVAE utilizes the prior knowledge by selectively incorporating the newly learnt information, by adding new components, according to the knowledge already known from the past learnt data. We introduce a new attention mechanism to regularize the structural latent space in which the most important information is reused while the information that interferes with novel samples is inactivated. The proposed attention mechanism can maximize the benefit from the forward transfer for learning novel information without forgetting previously learnt knowledge. We perform several experiments which show that ORVAE achieves state-of-the-art results under TFCL.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Fei and Bors, Adrian G.}, year={2023}, month={Jun.}, pages={10891-10899} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26291/26063", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26291", + "pdf_size": 521153, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8760861144078824075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "york.ac.uk;york.ac.uk", + "email": "york.ac.uk;york.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of York", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.york.ac.uk", + "aff_unique_abbr": "York", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "York", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-27039", + "title": "Learning Dynamic Temporal Relations with Continuous Graph for Multivariate Time Series Forecasting (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The recent advance in graph neural networks (GNNs) has inspired a few studies to leverage the dependencies of variables for time series prediction. Despite the promising results, existing GNN-based models cannot capture the global dynamic relations between variables owing to the inherent limitation of their graph learning module. Besides, multi-scale temporal information is usually ignored or simply concatenated in prior methods, resulting in inaccurate predictions. To overcome these limitations, we present CGMF, a Continuous Graph learning method for Multivariate time series Forecasting (CGMF). Our CGMF consists of a continuous graph module incorporating differential equations to capture the long-range intra- and inter-relations of the temporal embedding sequence. We also introduce a controlled differential equation-based fusion mechanism that efficiently exploits multi-scale representations to form continuous evolutional dynamics and learn rich relations and patterns shared across different scales. Comprehensive experiments demonstrate the effectiveness of our method for a variety of datasets.", + "primary_area": "", + "author": "Zhiyuan Wang; Fan Zhou; Goce Trajcevski; Kunpeng Zhang; Ting Zhong", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China + Kashi Institute of Electronics and Information Industry; Iowa State University; University of Maryland, College Park; University of Electronic Science and Technology of China", + "bibtex": "@article{Wang_Zhou_Trajcevski_Zhang_Zhong_2024, title={Learning Dynamic Temporal Relations with Continuous Graph for Multivariate Time Series Forecasting (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27039}, DOI={10.1609/aaai.v37i13.27039}, abstractNote={The recent advance in graph neural networks (GNNs) has inspired a few studies to leverage the dependencies of variables for time series prediction. Despite the promising results, existing GNN-based models cannot capture the global dynamic relations between variables owing to the inherent limitation of their graph learning module. Besides, multi-scale temporal information is usually ignored or simply concatenated in prior methods, resulting in inaccurate predictions. To overcome these limitations, we present CGMF, a Continuous Graph learning method for Multivariate time series Forecasting (CGMF). Our CGMF consists of a continuous graph module incorporating differential equations to capture the long-range intra- and inter-relations of the temporal embedding sequence. We also introduce a controlled differential equation-based fusion mechanism that efficiently exploits multi-scale representations to form continuous evolutional dynamics and learn rich relations and patterns shared across different scales. Comprehensive experiments demonstrate the effectiveness of our method for a variety of datasets.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhiyuan and Zhou, Fan and Trajcevski, Goce and Zhang, Kunpeng and Zhong, Ting}, year={2024}, month={Jul.}, pages={16358-16359} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27039/26811", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27039", + "pdf_size": 83446, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8028001237281966684&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;uestc.edu.cn;iastate.edu;umd.edu;uestc.edu.cn", + "email": "gmail.com;uestc.edu.cn;iastate.edu;umd.edu;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;3;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Kashi Institute of Electronics and Information Industry;Iowa State University;University of Maryland", + "aff_unique_dep": ";Electronics and Information Industry;;", + "aff_unique_url": "https://www.uestc.edu.cn;;https://www.iastate.edu;https://www/umd.edu", + "aff_unique_abbr": "UESTC;;ISU;UMD", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";College Park", + "aff_country_unique_index": "0;0+0;1;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25334", + "title": "Learning Event-Relevant Factors for Video Anomaly Detection", + "track": "main", + "status": "Technical", + "abstract": "Most video anomaly detection methods discriminate events that deviate from normal patterns as anomalies. However, these methods are prone to interferences from event-irrelevant factors, such as background textures and object scale variations, incurring an increased false detection rate. In this paper, we propose to explicitly learn event-relevant factors to eliminate the interferences from event-irrelevant factors on anomaly predictions. To this end, we introduce a causal generative model to separate the event-relevant factors and event-irrelevant ones in videos, and learn the prototypes of event-relevant factors in a memory augmentation module. We design a causal objective function to optimize the causal generative model and develop a counterfactual learning strategy to guide anomaly predictions, which increases the influence of the event-relevant factors. The extensive experiments show the effectiveness of our method for video anomaly detection.", + "primary_area": "computer vision ii", + "author": "Che Sun; Chenrui Shi; Yunde Jia; Yuwei Wu", + "authorids": "", + "aff": "Beijing Key Laboratory of Intelligent Information Technology, School of Computer Science & Technology, Beijing Institute of Technology, China; Beijing Key Laboratory of Intelligent Information Technology, School of Computer Science & Technology, Beijing Institute of Technology, China + Guangdong Laboratory of Machine Perception and Intelligent Computing, Shenzhen MSU-BIT University, China; Beijing Key Laboratory of Intelligent Information Technology, School of Computer Science & Technology, Beijing Institute of Technology, China + Guangdong Laboratory of Machine Perception and Intelligent Computing, Shenzhen MSU-BIT University, China; Beijing Key Laboratory of Intelligent Information Technology, School of Computer Science & Technology, Beijing Institute of Technology, China + Guangdong Laboratory of Machine Perception and Intelligent Computing, Shenzhen MSU-BIT University, China", + "bibtex": "@article{Sun_Shi_Jia_Wu_2023, title={Learning Event-Relevant Factors for Video Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25334}, DOI={10.1609/aaai.v37i2.25334}, abstractNote={Most video anomaly detection methods discriminate events that deviate from normal patterns as anomalies. However, these methods are prone to interferences from event-irrelevant factors, such as background textures and object scale variations, incurring an increased false detection rate. In this paper, we propose to explicitly learn event-relevant factors to eliminate the interferences from event-irrelevant factors on anomaly predictions. To this end, we introduce a causal generative model to separate the event-relevant factors and event-irrelevant ones in videos, and learn the prototypes of event-relevant factors in a memory augmentation module. We design a causal objective function to optimize the causal generative model and develop a counterfactual learning strategy to guide anomaly predictions, which increases the influence of the event-relevant factors. The extensive experiments show the effectiveness of our method for video anomaly detection.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Che and Shi, Chenrui and Jia, Yunde and Wu, Yuwei}, year={2023}, month={Jun.}, pages={2384-2392} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25334/25106", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25334", + "pdf_size": 1191819, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7088383625895908149&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;0+1;0+1", + "aff_unique_norm": "Beijing Institute of Technology;Shenzhen MSU-BIT University", + "aff_unique_dep": "School of Computer Science & Technology;Guangdong Laboratory of Machine Perception and Intelligent Computing", + "aff_unique_url": "http://www.bit.edu.cn/;", + "aff_unique_abbr": "BIT;", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26364", + "title": "Learning Explicit Credit Assignment for Cooperative Multi-Agent Reinforcement Learning via Polarization Policy Gradient", + "track": "main", + "status": "Technical", + "abstract": "Cooperative multi-agent policy gradient (MAPG) algorithms have recently attracted wide attention and are regarded as a general scheme for the multi-agent system. Credit assignment plays an important role in MAPG and can induce cooperation among multiple agents. However, most MAPG algorithms cannot achieve good credit assignment because of the game-theoretic pathology known as centralized-decentralized mismatch. To address this issue, this paper presents a novel method, Multi-Agent Polarization Policy Gradient (MAPPG). MAPPG takes a simple but efficient polarization function to transform the optimal consistency of joint and individual actions into easily realized constraints, thus enabling efficient credit assignment in MAPPG. Theoretically, we prove that individual policies of MAPPG can converge to the global optimum. Empirically, we evaluate MAPPG on the well-known matrix game and differential game, and verify that MAPPG can converge to the global optimum for both discrete and continuous action spaces. We also evaluate MAPPG on a set of StarCraft II micromanagement tasks and demonstrate that MAPPG outperforms the state-of-the-art MAPG algorithms.", + "primary_area": "multiagent systems", + "author": "Wubing Chen; Wenbin Li; Xiao Liu; Shangdong Yang; Yang Gao", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; Nanjing University of Posts and Telecommunications, Nanjing 210023, China + State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Chen_Li_Liu_Yang_Gao_2023, title={Learning Explicit Credit Assignment for Cooperative Multi-Agent Reinforcement Learning via Polarization Policy Gradient}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26364}, DOI={10.1609/aaai.v37i10.26364}, abstractNote={Cooperative multi-agent policy gradient (MAPG) algorithms have recently attracted wide attention and are regarded as a general scheme for the multi-agent system. Credit assignment plays an important role in MAPG and can induce cooperation among multiple agents. However, most MAPG algorithms cannot achieve good credit assignment because of the game-theoretic pathology known as centralized-decentralized mismatch. To address this issue, this paper presents a novel method, Multi-Agent Polarization Policy Gradient (MAPPG). MAPPG takes a simple but efficient polarization function to transform the optimal consistency of joint and individual actions into easily realized constraints, thus enabling efficient credit assignment in MAPPG. Theoretically, we prove that individual policies of MAPPG can converge to the global optimum. Empirically, we evaluate MAPPG on the well-known matrix game and differential game, and verify that MAPPG can converge to the global optimum for both discrete and continuous action spaces. We also evaluate MAPPG on a set of StarCraft II micromanagement tasks and demonstrate that MAPPG outperforms the state-of-the-art MAPG algorithms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Wubing and Li, Wenbin and Liu, Xiao and Yang, Shangdong and Gao, Yang}, year={2023}, month={Jun.}, pages={11542-11550} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26364/26136", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26364", + "pdf_size": 1807108, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15393218010128421234&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;nju.edu.cn;outlook.com;njupt.edu.cn;nju.edu.cn", + "email": "gmail.com;nju.edu.cn;outlook.com;njupt.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+0;0", + "aff_unique_norm": "Nanjing University;Nanjing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;", + "aff_unique_abbr": "Nanjing University;NUPT", + "aff_campus_unique_index": "0;0;0;0+0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25342", + "title": "Learning Fractals by Gradient Descent", + "track": "main", + "status": "Technical", + "abstract": "Fractals are geometric shapes that can display complex and self-similar patterns found in nature (e.g., clouds and plants). Recent works in visual recognition have leveraged this property to create random fractal images for model pre-training. In this paper, we study the inverse problem --- given a target image (not necessarily a fractal), we aim to generate a fractal image that looks like it. We propose a novel approach that learns the parameters underlying a fractal image via gradient descent. We show that our approach can find fractal parameters of high visual quality and be compatible with different loss functions, opening up several potentials, e.g., learning fractals for downstream tasks, scientific understanding, etc.", + "primary_area": "computer vision ii", + "author": "Cheng-Hao Tu; Hong-You Chen; David Carlyn; Wei-Lun Chao", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science and Engineering, The Ohio State University", + "bibtex": "@article{Tu_Chen_Carlyn_Chao_2023, title={Learning Fractals by Gradient Descent}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25342}, DOI={10.1609/aaai.v37i2.25342}, abstractNote={Fractals are geometric shapes that can display complex and self-similar patterns found in nature (e.g., clouds and plants). Recent works in visual recognition have leveraged this property to create random fractal images for model pre-training. In this paper, we study the inverse problem --- given a target image (not necessarily a fractal), we aim to generate a fractal image that looks like it. We propose a novel approach that learns the parameters underlying a fractal image via gradient descent. We show that our approach can find fractal parameters of high visual quality and be compatible with different loss functions, opening up several potentials, e.g., learning fractals for downstream tasks, scientific understanding, etc.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tu, Cheng-Hao and Chen, Hong-You and Carlyn, David and Chao, Wei-Lun}, year={2023}, month={Jun.}, pages={2456-2464} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25342/25114", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25342", + "pdf_size": 4311787, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11530775409363647998&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "osu.edu;osu.edu;osu.edu;osu.edu", + "email": "osu.edu;osu.edu;osu.edu;osu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The Ohio State University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.osu.edu", + "aff_unique_abbr": "OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26989", + "title": "Learning Generalizable Batch Active Learning Strategies via Deep Q-networks (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "To handle a large amount of unlabeled data, batch active learning (BAL) queries humans for the labels of a batch of the most valuable data points at every round. Most current BAL strategies are based on human-designed heuristics, such as uncertainty sampling or mutual information maximization. However, there exists a disagreement between these heuristics and the ultimate goal of BAL, i.e., optimizing the model's final performance within the query budgets. This disagreement leads to a limited generality of these heuristics. To this end, we formulate BAL as an MDP and propose a data-driven approach based on deep reinforcement learning. Our method learns the BAL strategy by maximizing the model's final performance. Experiments on the UCI benchmark show that our method can achieve competitive performance compared to existing heuristics-based approaches.", + "primary_area": "", + "author": "Yi-Chen Li; Wen-Jie Shen; Boyu Zhang; Feng Mao; Zongzhang Zhang; Yang Yu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing 100876, China; Alibaba Group, Hangzhou 310052, China; Alibaba Group, Hangzhou 310052, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Li_Shen_Zhang_Mao_Zhang_Yu_2024, title={Learning Generalizable Batch Active Learning Strategies via Deep Q-networks (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26989}, DOI={10.1609/aaai.v37i13.26989}, abstractNote={To handle a large amount of unlabeled data, batch active learning (BAL) queries humans for the labels of a batch of the most valuable data points at every round. Most current BAL strategies are based on human-designed heuristics, such as uncertainty sampling or mutual information maximization. However, there exists a disagreement between these heuristics and the ultimate goal of BAL, i.e., optimizing the model\u2019s final performance within the query budgets. This disagreement leads to a limited generality of these heuristics. To this end, we formulate BAL as an MDP and propose a data-driven approach based on deep reinforcement learning. Our method learns the BAL strategy by maximizing the model\u2019s final performance. Experiments on the UCI benchmark show that our method can achieve competitive performance compared to existing heuristics-based approaches.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yi-Chen and Shen, Wen-Jie and Zhang, Boyu and Mao, Feng and Zhang, Zongzhang and Yu, Yang}, year={2024}, month={Jul.}, pages={16258-16259} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26989/26761", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26989", + "pdf_size": 89203, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:y87bgGpd_qYJ:scholar.google.com/&scioq=Learning+Generalizable+Batch+Active+Learning+Strategies+via+Deep+Q-networks+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;gmail.com;alibaba-inc.com;alibaba-inc.com;nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;gmail.com;alibaba-inc.com;alibaba-inc.com;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;2;2;0;0", + "aff_unique_norm": "Nanjing University;Beijing University of Posts and Telecommunications;Alibaba Group", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;School of Artificial Intelligence;", + "aff_unique_url": "http://www.nju.edu.cn;http://www.bupt.edu.cn/;https://www.alibaba.com", + "aff_unique_abbr": "Nanjing U;BUPT;Alibaba", + "aff_campus_unique_index": "0;0+1;2;2;0;0", + "aff_campus_unique": "Nanjing;Beijing;Hangzhou", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26229", + "title": "Learning Instrumental Variable from Data Fusion for Treatment Effect Estimation", + "track": "main", + "status": "Technical", + "abstract": "The advent of the big data era brought new opportunities and challenges to draw treatment effect in data fusion, that is, a mixed dataset collected from multiple sources (each source with an independent treatment assignment mechanism). Due to possibly omitted source labels and unmeasured confounders, traditional methods cannot estimate individual treatment assignment probability and infer treatment effect effectively. Therefore, we propose to reconstruct the source label and model it as a Group Instrumental Variable (GIV) to implement IV-based Regression for treatment effect estimation. In this paper, we conceptualize this line of thought and develop a unified framework (Meta-EM) to (1) map the raw data into a representation space to construct Linear Mixed Models for the assigned treatment variable; (2) estimate the distribution differences and model the GIV for the different treatment assignment mechanisms; and (3) adopt an alternating training strategy to iteratively optimize the representations and the joint distribution to model GIV for IV regression. Empirical results demonstrate the advantages of our Meta-EM compared with state-of-the-art methods. The project page with the code and the Supplementary materials is available at https://github.com/causal-machine-learning-lab/meta-em.", + "primary_area": "machine learning iv", + "author": "Anpeng Wu; Kun Kuang; Ruoxuan Xiong; Minqin Zhu; Yuxuan Liu; Bo Li; Furui Liu; Zhihua Wang; Fei Wu", + "authorids": "", + "aff": "Department of Computer Science and Technology, Zhejiang University; Department of Computer Science and Technology, Zhejiang University; Department of Quantitative Theory and Methods, Emory University; Department of Computer Science and Technology, Zhejiang University; Department of Computer Science and Technology, Zhejiang University; School of Economics and Management, Tsinghua University; Huawei Noah\u2019s Ark lab; Shanghai AI Laboratory + Shanghai Institute for Advanced Study of Zhejiang University; Department of Computer Science and Technology, Zhejiang University + Shanghai AI Laboratory + Shanghai Institute for Advanced Study of Zhejiang University", + "bibtex": "@article{Wu_Kuang_Xiong_Zhu_Liu_Li_Liu_Wang_Wu_2023, title={Learning Instrumental Variable from Data Fusion for Treatment Effect Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26229}, DOI={10.1609/aaai.v37i9.26229}, abstractNote={The advent of the big data era brought new opportunities and challenges to draw treatment effect in data fusion, that is, a mixed dataset collected from multiple sources (each source with an independent treatment assignment mechanism). Due to possibly omitted source labels and unmeasured confounders, traditional methods cannot estimate individual treatment assignment probability and infer treatment effect effectively. Therefore, we propose to reconstruct the source label and model it as a Group Instrumental Variable (GIV) to implement IV-based Regression for treatment effect estimation. In this paper, we conceptualize this line of thought and develop a unified framework (Meta-EM) to (1) map the raw data into a representation space to construct Linear Mixed Models for the assigned treatment variable; (2) estimate the distribution differences and model the GIV for the different treatment assignment mechanisms; and (3) adopt an alternating training strategy to iteratively optimize the representations and the joint distribution to model GIV for IV regression. Empirical results demonstrate the advantages of our Meta-EM compared with state-of-the-art methods. The project page with the code and the Supplementary materials is available at https://github.com/causal-machine-learning-lab/meta-em.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Anpeng and Kuang, Kun and Xiong, Ruoxuan and Zhu, Minqin and Liu, Yuxuan and Li, Bo and Liu, Furui and Wang, Zhihua and Wu, Fei}, year={2023}, month={Jun.}, pages={10324-10332} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26229/26001", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26229", + "pdf_size": 5785908, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8813609348375390913&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;zju.edu.cn;emory.edu;zju.edu.cn;zju.edu.cn;sem.tsinghua.edu.cn;huawei.com;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;emory.edu;zju.edu.cn;zju.edu.cn;sem.tsinghua.edu.cn;huawei.com;zju.edu.cn;zju.edu.cn", + "github": "https://github.com/causal-machine-learning-lab/meta-em", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;0;2;3;4+0;0+4+0", + "aff_unique_norm": "Zhejiang University;Emory University;Tsinghua University;Huawei;Shanghai AI Laboratory", + "aff_unique_dep": "Department of Computer Science and Technology;Department of Quantitative Theory and Methods;School of Economics and Management;Noah\u2019s Ark lab;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.emory.edu;https://www.tsinghua.edu.cn;https://www.huawei.com;https://www.shanghai-ai-lab.com", + "aff_unique_abbr": "ZJU;Emory;THU;Huawei;SAIL", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;1;0;0;0;0;0+0;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25800", + "title": "Learning Interpretable Temporal Properties from Positive Examples Only", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of explaining the temporal behavior of black-box systems using human-interpretable models. Following recent research trends, we rely on the fundamental yet interpretable models of deterministic finite automata (DFAs) and linear temporal logic (LTL_f) formulas. In contrast to most existing works for learning DFAs and LTL_f formulas, we consider learning from only positive examples. Our motivation is that negative examples are generally difficult to observe, in particular, from black-box systems. To learn meaningful models from positive examples only, we design algorithms that rely on conciseness and language minimality of models as regularizers. Our learning algorithms are based on two approaches: a symbolic and a counterexample-guided one. The symbolic approach exploits an efficient encoding of language minimality as a constraint satisfaction problem, whereas the counterexample-guided one relies on generating suitable negative examples to guide the learning. Both approaches provide us with effective algorithms with minimality guarantees on the learned models. To assess the effectiveness of our algorithms, we evaluate them on a few practical case studies.", + "primary_area": "knowledge representation and reasoning", + "author": "Rajarshi Roy; Jean-Rapha\u00ebl Gaglione; Nasim Baharisangari; Daniel Neider; Zhe Xu; Ufuk Topcu", + "authorids": "", + "aff": "Max Planck Institute for Software Systems, Kaiserslautern, Germany; University of Texas at Austin, Texas, USA; Arizona State University, Arizona, USA; TU Dortmund University, Dortmund, Germany+Center for Trustworthy Data Science and Security, University Alliance Ruhr, Germany; Arizona State University, Arizona, USA; University of Texas at Austin, Texas, USA", + "bibtex": "@article{Roy_Gaglione_Baharisangari_Neider_Xu_Topcu_2023, title={Learning Interpretable Temporal Properties from Positive Examples Only}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25800}, DOI={10.1609/aaai.v37i5.25800}, abstractNote={We consider the problem of explaining the temporal behavior of black-box systems using human-interpretable models. Following recent research trends, we rely on the fundamental yet interpretable models of deterministic finite automata (DFAs) and linear temporal logic (LTL_f) formulas. In contrast to most existing works for learning DFAs and LTL_f formulas, we consider learning from only positive examples. Our motivation is that negative examples are generally difficult to observe, in particular, from black-box systems. To learn meaningful models from positive examples only, we design algorithms that rely on conciseness and language minimality of models as regularizers. Our learning algorithms are based on two approaches: a symbolic and a counterexample-guided one. The symbolic approach exploits an efficient encoding of language minimality as a constraint satisfaction problem, whereas the counterexample-guided one relies on generating suitable negative examples to guide the learning. Both approaches provide us with effective algorithms with minimality guarantees on the learned models. To assess the effectiveness of our algorithms, we evaluate them on a few practical case studies.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Roy, Rajarshi and Gaglione, Jean-Rapha\u00ebl and Baharisangari, Nasim and Neider, Daniel and Xu, Zhe and Topcu, Ufuk}, year={2023}, month={Jun.}, pages={6507-6515} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25800/25572", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25800", + "pdf_size": 194148, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17295643867766448020&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3+4;2;1", + "aff_unique_norm": "Max Planck Institute for Software Systems;University of Texas at Austin;Arizona State University;TU Dortmund University;University Alliance Ruhr", + "aff_unique_dep": ";;;;Center for Trustworthy Data Science and Security", + "aff_unique_url": "https://www.mpi-sws.org;https://www.utexas.edu;https://www.asu.edu;https://www.tu-dortmund.de;", + "aff_unique_abbr": "MPI-SWS;UT Austin;ASU;TUDO;", + "aff_campus_unique_index": "0;1;2;3;2;1", + "aff_campus_unique": "Kaiserslautern;Austin;Arizona;Dortmund;", + "aff_country_unique_index": "0;1;1;0+0;1;1", + "aff_country_unique": "Germany;United States" + }, + { + "id": "article-25774", + "title": "Learning Logic Programs by Discovering Where Not to Search", + "track": "main", + "status": "Technical", + "abstract": "The goal of inductive logic programming (ILP) is to search for a hypothesis that generalises training examples and background knowledge (BK). To improve performance, we introduce an approach that, before searching for a hypothesis, first discovers \"where not to search\". We use given BK to discover constraints on hypotheses, such as that a number cannot be both even and odd. We use the constraints to bootstrap a constraint-driven ILP system. Our experiments on multiple domains (including program synthesis and inductive general game playing) show that our approach can (i) substantially reduce learning times by up to 97%, and (ii) can scale to domains with millions of facts.", + "primary_area": "knowledge representation and reasoning", + "author": "Andrew Cropper; C\u00e9line Hocquette", + "authorids": "", + "aff": "University of Oxford; University of Oxford", + "bibtex": "@article{Cropper_Hocquette_2023, title={Learning Logic Programs by Discovering Where Not to Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25774}, DOI={10.1609/aaai.v37i5.25774}, abstractNote={The goal of inductive logic programming (ILP) is to search for a hypothesis that generalises training examples and background knowledge (BK). To improve performance, we introduce an approach that, before searching for a hypothesis, first discovers "where not to search". We use given BK to discover constraints on hypotheses, such as that a number cannot be both even and odd. We use the constraints to bootstrap a constraint-driven ILP system. Our experiments on multiple domains (including program synthesis and inductive general game playing) show that our approach can (i) substantially reduce learning times by up to 97%, and (ii) can scale to domains with millions of facts.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cropper, Andrew and Hocquette, C\u00e9line}, year={2023}, month={Jun.}, pages={6289-6296} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25774/25546", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25774", + "pdf_size": 125260, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13991184108471117617&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26891", + "title": "Learning Logical Reasoning Using an Intelligent Tutoring System: A Hybrid Approach to Student Modeling", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "In our previous works, we presented Logic-Muse as an Intelligent Tutoring System that helps learners improve logical reasoning skills in multiple contexts. Logic-Muse components were validated and argued by experts throughout the designing process (ITS researchers, logicians, and reasoning psychologists). A catalog of reasoning errors (syntactic and semantic) has been established, in addition to an explicit representation of semantic knowledge and the structures and meta-structures underlying conditional reasoning. A Bayesian network with expert validation has been developed and used in a Bayesian Knowledge Tracing (BKT) process that allows the inference of the learner skills. \nThis paper presents an evaluation of the learner-model components in Logic-Muse (a bayesian learner model). We conducted a study and collected data from nearly 300 students who processed 48 reasoning activities. These data were used to develop a psychometric model for initializing the learner's model and validating the structure of the initial Bayesian network. We have also developed a neural architecture on which a model was trained to support a deep knowledge tracing (DKT) process. The proposed neural architecture improves the initial version of DKT by allowing the integration of expert knowledge (through the Bayesian Expert Validation Network) and allowing better generalization of knowledge with few samples. The results show a significant improvement in the predictive power of the learner model. The analysis of the results of the psychometric model also illustrates an excellent potential for improving the Bayesian network's structure and the learner model's initialization process.", + "primary_area": "", + "author": "Roger Nkambou; Janie Brisson; Ange Tato; Serge Robert", + "authorids": "", + "aff": "Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Centre de Recherche en Intelligence Artificielle; Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Centre de Recherche en Intelligence Artificielle; Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Centre de Recherche en Intelligence Artificielle+\u00b4Ecole de Technologie Sup \u00b4erieure; Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Centre de Recherche en Intelligence Artificielle", + "bibtex": "@article{Nkambou_Brisson_Tato_Robert_2024, title={Learning Logical Reasoning Using an Intelligent Tutoring System: A Hybrid Approach to Student Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26891}, DOI={10.1609/aaai.v37i13.26891}, abstractNote={In our previous works, we presented Logic-Muse as an Intelligent Tutoring System that helps learners improve logical reasoning skills in multiple contexts. Logic-Muse components were validated and argued by experts throughout the designing process (ITS researchers, logicians, and reasoning psychologists). A catalog of reasoning errors (syntactic and semantic) has been established, in addition to an explicit representation of semantic knowledge and the structures and meta-structures underlying conditional reasoning. A Bayesian network with expert validation has been developed and used in a Bayesian Knowledge Tracing (BKT) process that allows the inference of the learner skills. This paper presents an evaluation of the learner-model components in Logic-Muse (a bayesian learner model). We conducted a study and collected data from nearly 300 students who processed 48 reasoning activities. These data were used to develop a psychometric model for initializing the learner\u2019s model and validating the structure of the initial Bayesian network. We have also developed a neural architecture on which a model was trained to support a deep knowledge tracing (DKT) process. The proposed neural architecture improves the initial version of DKT by allowing the integration of expert knowledge (through the Bayesian Expert Validation Network) and allowing better generalization of knowledge with few samples. The results show a significant improvement in the predictive power of the learner model. The analysis of the results of the psychometric model also illustrates an excellent potential for improving the Bayesian network\u2019s structure and the learner model\u2019s initialization process.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nkambou, Roger and Brisson, Janie and Tato, Ange and Robert, Serge}, year={2024}, month={Jul.}, pages={15930-15937} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26891/26663", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26891", + "pdf_size": 596841, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12864771940481380191&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "uqam.ca;uqam.ca;etsmtl.ca;uqam.ca", + "email": "uqam.ca;uqam.ca;etsmtl.ca;uqam.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "Universit\u00e9 du Qu\u00e9bec \u00e0 Montr\u00e9al;Ecole de Technologie Sup\u00e9rieure", + "aff_unique_dep": "Centre de Recherche en Intelligence Artificielle;", + "aff_unique_url": "https://www.uqam.ca;https://www.etsmtl.ca", + "aff_unique_abbr": "UQAM;ETS", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Montr\u00e9al;", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25516", + "title": "Learning Markov Random Fields for Combinatorial Structures via Sampling through Lov\u00e1sz Local Lemma", + "track": "main", + "status": "Technical", + "abstract": "Learning to generate complex combinatorial structures satisfying constraints will have transformative impacts in many application domains. However, it is beyond the capabilities of existing approaches due to the highly intractable nature of the embedded probabilistic inference. Prior works spend most of the training time learning to separate valid from invalid structures but do not learn the inductive biases of valid structures. We develop NEural Lovasz Sampler (NELSON), which embeds the sampler through Lovasz Local Lemma (LLL) as a fully differentiable neural network layer. Our NELSON-CD embeds this sampler into the contrastive divergence learning process of Markov random fields. NELSON allows us to obtain valid samples from the current model distribution. Contrastive divergence is then applied to separate these samples from those in the training set. NELSON is implemented as a fully differentiable neural net, taking advantage of the parallelism of GPUs. Experimental results on several real-world domains reveal that NELSON learns to generate 100% valid structures, while baselines either time out or cannot ensure validity. NELSON also outperforms other approaches in running time, log-likelihood, and MAP scores.", + "primary_area": "constraint satisfaction and optimization", + "author": "Nan Jiang; Yi Gu; Yexiang Xue", + "authorids": "", + "aff": "Department of Computer Science, Purdue University, USA; Department of Mathematics, Northwestern University, USA; Department of Computer Science, Purdue University, USA", + "bibtex": "@article{Jiang_Gu_Xue_2023, title={Learning Markov Random Fields for Combinatorial Structures via Sampling through Lov\u00e1sz Local Lemma}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25516}, DOI={10.1609/aaai.v37i4.25516}, abstractNote={Learning to generate complex combinatorial structures satisfying constraints will have transformative impacts in many application domains. However, it is beyond the capabilities of existing approaches due to the highly intractable nature of the embedded probabilistic inference. Prior works spend most of the training time learning to separate valid from invalid structures but do not learn the inductive biases of valid structures. We develop NEural Lovasz Sampler (NELSON), which embeds the sampler through Lovasz Local Lemma (LLL) as a fully differentiable neural network layer. Our NELSON-CD embeds this sampler into the contrastive divergence learning process of Markov random fields. NELSON allows us to obtain valid samples from the current model distribution. Contrastive divergence is then applied to separate these samples from those in the training set. NELSON is implemented as a fully differentiable neural net, taking advantage of the parallelism of GPUs. Experimental results on several real-world domains reveal that NELSON learns to generate 100% valid structures, while baselines either time out or cannot ensure validity. NELSON also outperforms other approaches in running time, log-likelihood, and MAP scores.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Nan and Gu, Yi and Xue, Yexiang}, year={2023}, month={Jun.}, pages={4016-4024} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25516/25288", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25516", + "pdf_size": 402174, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:0uBMjAc1bMIJ:scholar.google.com/&scioq=Learning+Markov+Random+Fields+for+Combinatorial+Structures+via+Sampling+through+Lov%C3%A1sz+Local+Lemma&hl=en&as_sdt=0,5", + "gs_version_total": 7, + "aff_domain": "purdue.edu;u.northwestern.edu;purdue.edu", + "email": "purdue.edu;u.northwestern.edu;purdue.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Purdue University;Northwestern University", + "aff_unique_dep": "Department of Computer Science;Department of Mathematics", + "aff_unique_url": "https://www.purdue.edu;https://www.northwestern.edu", + "aff_unique_abbr": "Purdue;NU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25217", + "title": "Learning Motion-Robust Remote Photoplethysmography through Arbitrary Resolution Videos", + "track": "main", + "status": "Technical", + "abstract": "Remote photoplethysmography (rPPG) enables non-contact heart rate (HR) estimation from facial videos which gives significant convenience compared with traditional contact-based measurements. In the real-world long-term health monitoring scenario, the distance of the participants and their head movements usually vary by time, resulting in the inaccurate rPPG measurement due to the varying face resolution and complex motion artifacts. Different from the previous rPPG models designed for a constant distance between camera and participants, in this paper, we propose two plug-and-play blocks (i.e., physiological signal feature extraction block (PFE) and temporal face alignment block (TFA)) to alleviate the degradation of changing distance and head motion.\nOn one side, guided with representative-area information, PFE adaptively encodes the arbitrary resolution facial frames to the fixed-resolution facial structure features. On the other side, leveraging the estimated optical flow, TFA is able to counteract the rPPG signal confusion caused by the head movement thus benefit the motion-robust rPPG signal recovery. Besides, we also train the model with a cross-resolution constraint using a two-stream dual-resolution framework, which further helps PFE learn resolution-robust facial rPPG features. Extensive experiments on three benchmark datasets (UBFC-rPPG, COHFACE and PURE) demonstrate the superior performance of the proposed method. One highlight is that with PFE and TFA, the off-the-shelf spatio-temporal rPPG models can predict more robust rPPG signals under both varying face resolution and severe head movement scenarios. The codes are available at https://github.com/LJWGIT/Arbitrary_Resolution_rPPG.", + "primary_area": "computer vision i", + "author": "Jianwei Li; Zitong Yu; Jingang Shi", + "authorids": "", + "aff": "School of Software Engineering, Xi\u2019an Jiaotong University; Great Bay University; School of Software Engineering, Xi\u2019an Jiaotong University", + "bibtex": "@article{Li_Yu_Shi_2023, title={Learning Motion-Robust Remote Photoplethysmography through Arbitrary Resolution Videos}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25217}, DOI={10.1609/aaai.v37i1.25217}, abstractNote={Remote photoplethysmography (rPPG) enables non-contact heart rate (HR) estimation from facial videos which gives significant convenience compared with traditional contact-based measurements. In the real-world long-term health monitoring scenario, the distance of the participants and their head movements usually vary by time, resulting in the inaccurate rPPG measurement due to the varying face resolution and complex motion artifacts. Different from the previous rPPG models designed for a constant distance between camera and participants, in this paper, we propose two plug-and-play blocks (i.e., physiological signal feature extraction block (PFE) and temporal face alignment block (TFA)) to alleviate the degradation of changing distance and head motion.\nOn one side, guided with representative-area information, PFE adaptively encodes the arbitrary resolution facial frames to the fixed-resolution facial structure features. On the other side, leveraging the estimated optical flow, TFA is able to counteract the rPPG signal confusion caused by the head movement thus benefit the motion-robust rPPG signal recovery. Besides, we also train the model with a cross-resolution constraint using a two-stream dual-resolution framework, which further helps PFE learn resolution-robust facial rPPG features. Extensive experiments on three benchmark datasets (UBFC-rPPG, COHFACE and PURE) demonstrate the superior performance of the proposed method. One highlight is that with PFE and TFA, the off-the-shelf spatio-temporal rPPG models can predict more robust rPPG signals under both varying face resolution and severe head movement scenarios. The codes are available at https://github.com/LJWGIT/Arbitrary_Resolution_rPPG.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jianwei and Yu, Zitong and Shi, Jingang}, year={2023}, month={Jun.}, pages={1334-1342} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25217/24989", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25217", + "pdf_size": 6894792, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7463803864129807142&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xjtu.edu.cn;ieee.org;xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;ieee.org;xjtu.edu.cn", + "github": "https://github.com/LJW-GIT/Arbitrary Resolution rPPG", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Xi'an Jiaotong University;Great Bay University", + "aff_unique_dep": "School of Software Engineering;", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.greatbay.edu", + "aff_unique_abbr": "XJTU;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25962", + "title": "Learning Noise-Induced Reward Functions for Surpassing Demonstrations in Imitation Learning", + "track": "main", + "status": "Technical", + "abstract": "Imitation learning (IL) has recently shown impressive performance in training a reinforcement learning agent with human demonstrations, eliminating the difficulty of designing elaborate reward functions in complex environments. However, most IL methods work under the assumption of the optimality of the demonstrations and thus cannot learn policies to surpass the demonstrators. Some methods have been investigated to obtain better-than-demonstration (BD) performance with inner human feedback or preference labels. In this paper, we propose a method to learn rewards from suboptimal demonstrations via a weighted preference learning technique (LERP). Specifically, we first formulate the suboptimality of demonstrations as the inaccurate estimation of rewards. The inaccuracy is modeled with a reward noise random variable following the Gumbel distribution. Moreover, we derive an upper bound of the expected return with different noise coefficients and propose a theorem to surpass the demonstrations. Unlike existing literature, our analysis does not depend on the linear reward constraint. Consequently, we develop a BD model with a weighted preference learning technique. Experimental results on continuous control and high-dimensional discrete control tasks show the superiority of our LERP method over other state-of-the-art BD methods.", + "primary_area": "machine learning ii", + "author": "Liangyu Huo; Zulin Wang; Mai Xu", + "authorids": "", + "aff": "School of Electronic and Information Engineering, Beihang University; School of Electronic and Information Engineering, Beihang University; School of Electronic and Information Engineering, Beihang University", + "bibtex": "@article{Huo_Wang_Xu_2023, title={Learning Noise-Induced Reward Functions for Surpassing Demonstrations in Imitation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25962}, DOI={10.1609/aaai.v37i7.25962}, abstractNote={Imitation learning (IL) has recently shown impressive performance in training a reinforcement learning agent with human demonstrations, eliminating the difficulty of designing elaborate reward functions in complex environments. However, most IL methods work under the assumption of the optimality of the demonstrations and thus cannot learn policies to surpass the demonstrators. Some methods have been investigated to obtain better-than-demonstration (BD) performance with inner human feedback or preference labels. In this paper, we propose a method to learn rewards from suboptimal demonstrations via a weighted preference learning technique (LERP). Specifically, we first formulate the suboptimality of demonstrations as the inaccurate estimation of rewards. The inaccuracy is modeled with a reward noise random variable following the Gumbel distribution. Moreover, we derive an upper bound of the expected return with different noise coefficients and propose a theorem to surpass the demonstrations. Unlike existing literature, our analysis does not depend on the linear reward constraint. Consequently, we develop a BD model with a weighted preference learning technique. Experimental results on continuous control and high-dimensional discrete control tasks show the superiority of our LERP method over other state-of-the-art BD methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huo, Liangyu and Wang, Zulin and Xu, Mai}, year={2023}, month={Jun.}, pages={7953-7961} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25962/25734", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25962", + "pdf_size": 1927183, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:PgxcjCJHCrsJ:scholar.google.com/&scioq=Learning+Noise-Induced+Reward+Functions+for+Surpassing+Demonstrations+in+Imitation+Learning&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "School of Electronic and Information Engineering", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "Beihang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25875", + "title": "Learning Optimal Features via Partial Invariance", + "track": "main", + "status": "Technical", + "abstract": "Learning models that are robust to distribution shifts is a key concern in the context of their real-life applicability. Invariant Risk Minimization (IRM) is a popular framework that aims to learn robust models from multiple environments. The success of IRM requires an important assumption: the underlying causal mechanisms/features remain invariant across environments. When not satisfied, we show that IRM can over-constrain the predictor and to remedy this, we propose a relaxation via partial invariance. In this work, we theoretically highlight the sub-optimality of IRM and then demonstrate how learning from a partition of training domains can help improve invariant models. Several experiments, conducted both in linear settings as well as with deep neural networks on tasks over both language and image data, allow us to verify our conclusions.", + "primary_area": "machine learning i", + "author": "Moulik Choraria; Ibtihal Ferwana; Ankur Mani; Lav R. Varshney", + "authorids": "", + "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Minnesota Twin Cities; University of Illinois at Urbana-Champaign", + "bibtex": "@article{Choraria_Ferwana_Mani_Varshney_2023, title={Learning Optimal Features via Partial Invariance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25875}, DOI={10.1609/aaai.v37i6.25875}, abstractNote={Learning models that are robust to distribution shifts is a key concern in the context of their real-life applicability. Invariant Risk Minimization (IRM) is a popular framework that aims to learn robust models from multiple environments. The success of IRM requires an important assumption: the underlying causal mechanisms/features remain invariant across environments. When not satisfied, we show that IRM can over-constrain the predictor and to remedy this, we propose a relaxation via partial invariance. In this work, we theoretically highlight the sub-optimality of IRM and then demonstrate how learning from a partition of training domains can help improve invariant models. Several experiments, conducted both in linear settings as well as with deep neural networks on tasks over both language and image data, allow us to verify our conclusions.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Choraria, Moulik and Ferwana, Ibtihal and Mani, Ankur and Varshney, Lav R.}, year={2023}, month={Jun.}, pages={7175-7183} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25875/25647", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25875", + "pdf_size": 165072, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5055360525992107859&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "illinois.edu;illinois.edu;umn.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;umn.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;University of Minnesota", + "aff_unique_dep": ";", + "aff_unique_url": "https://illinois.edu;https://www.minnstate.edu", + "aff_unique_abbr": "UIUC;UMN", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Urbana-Champaign;Twin Cities", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25852", + "title": "Learning Pessimism for Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Off-policy deep reinforcement learning algorithms commonly compensate for overestimation bias during temporal-difference learning by utilizing pessimistic estimates of the expected target returns. In this work, we propose Generalized Pessimism Learning (GPL), a strategy employing a novel learnable penalty to enact such pessimism. In particular, we propose to learn this penalty alongside the critic with dual TD-learning, a new procedure to estimate and minimize the magnitude of the target returns bias with trivial computational cost. GPL enables us to accurately counteract overestimation bias throughout training without incurring the downsides of overly pessimistic targets. By integrating GPL with popular off-policy algorithms, we achieve state-of-the-art results in both competitive proprioceptive and pixel-based benchmarks.", + "primary_area": "machine learning i", + "author": "Edoardo Cetin; Oya Celiktutan", + "authorids": "", + "aff": "King\u2019s College London; King\u2019s College London", + "bibtex": "@article{Cetin_Celiktutan_2023, title={Learning Pessimism for Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25852}, DOI={10.1609/aaai.v37i6.25852}, abstractNote={Off-policy deep reinforcement learning algorithms commonly compensate for overestimation bias during temporal-difference learning by utilizing pessimistic estimates of the expected target returns. In this work, we propose Generalized Pessimism Learning (GPL), a strategy employing a novel learnable penalty to enact such pessimism. In particular, we propose to learn this penalty alongside the critic with dual TD-learning, a new procedure to estimate and minimize the magnitude of the target returns bias with trivial computational cost. GPL enables us to accurately counteract overestimation bias throughout training without incurring the downsides of overly pessimistic targets. By integrating GPL with popular off-policy algorithms, we achieve state-of-the-art results in both competitive proprioceptive and pixel-based benchmarks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cetin, Edoardo and Celiktutan, Oya}, year={2023}, month={Jun.}, pages={6971-6979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25852/25624", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25852", + "pdf_size": 4392722, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9431721912774251772&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "kcl.ac.uk;kcl.ac.uk", + "email": "kcl.ac.uk;kcl.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "King's College London", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kcl.ac.uk", + "aff_unique_abbr": "KCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25219", + "title": "Learning Polysemantic Spoof Trace: A Multi-Modal Disentanglement Network for Face Anti-spoofing", + "track": "main", + "status": "Technical", + "abstract": "Along with the widespread use of face recognition systems, their vulnerability has become highlighted. While existing face anti-spoofing methods can be generalized between attack types, generic solutions are still challenging due to the diversity of spoof characteristics. Recently, the spoof trace disentanglement framework has shown great potential for coping with both seen and unseen spoof scenarios, but the performance is largely restricted by the single-modal input. This paper focuses on this issue and presents a multi-modal disentanglement model which targetedly learns polysemantic spoof traces for more accurate and robust generic attack detection. In particular, based on the adversarial learning mechanism, a two-stream disentangling network is designed to estimate spoof patterns from the RGB and depth inputs, respectively. In this case, it captures complementary spoofing clues inhering in different attacks. Furthermore, a fusion module is exploited, which recalibrates both representations at multiple stages to promote the disentanglement in each individual modality. It then performs cross-modality aggregation to deliver a more comprehensive spoof trace representation for prediction. Extensive evaluations are conducted on multiple benchmarks, demonstrating that learning polysemantic spoof traces favorably contributes to anti-spoofing with more perceptible and interpretable results.", + "primary_area": "computer vision i", + "author": "Kaicheng Li; Hongyu Yang; Binghui Chen; Pengyu Li; Biao Wang; Di Huang", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University, China+School of Computer Science and Engineering, Beihang University, China; Institute of Artificial Intelligence, Beihang University, China; Beijing University of Posts and Telecommunications, China; ; ; State Key Laboratory of Software Development Environment, Beihang University, China+School of Computer Science and Engineering, Beihang University, China+Hangzhou Innovation Institute, Beihang University, China", + "bibtex": "@article{Li_Yang_Chen_Li_Wang_Huang_2023, title={Learning Polysemantic Spoof Trace: A Multi-Modal Disentanglement Network for Face Anti-spoofing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25219}, DOI={10.1609/aaai.v37i1.25219}, abstractNote={Along with the widespread use of face recognition systems, their vulnerability has become highlighted. While existing face anti-spoofing methods can be generalized between attack types, generic solutions are still challenging due to the diversity of spoof characteristics. Recently, the spoof trace disentanglement framework has shown great potential for coping with both seen and unseen spoof scenarios, but the performance is largely restricted by the single-modal input. This paper focuses on this issue and presents a multi-modal disentanglement model which targetedly learns polysemantic spoof traces for more accurate and robust generic attack detection. In particular, based on the adversarial learning mechanism, a two-stream disentangling network is designed to estimate spoof patterns from the RGB and depth inputs, respectively. In this case, it captures complementary spoofing clues inhering in different attacks. Furthermore, a fusion module is exploited, which recalibrates both representations at multiple stages to promote the disentanglement in each individual modality. It then performs cross-modality aggregation to deliver a more comprehensive spoof trace representation for prediction. Extensive evaluations are conducted on multiple benchmarks, demonstrating that learning polysemantic spoof traces favorably contributes to anti-spoofing with more perceptible and interpretable results.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Kaicheng and Yang, Hongyu and Chen, Binghui and Li, Pengyu and Wang, Biao and Huang, Di}, year={2023}, month={Jun.}, pages={1351-1359} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25219/24991", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25219", + "pdf_size": 2998373, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10829853565292249956&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;bupt.edu.cn;gmail.com;foxmail.com;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;bupt.edu.cn;gmail.com;foxmail.com;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0;1;0+0+0", + "aff_unique_norm": "Beihang University;Beijing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory of Software Development Environment;", + "aff_unique_url": "http://www.buaa.edu.cn;http://www.bupt.edu.cn/", + "aff_unique_abbr": "Beihang;BUPT", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0+0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25930", + "title": "Learning Program Synthesis for Integer Sequences from Scratch", + "track": "main", + "status": "Technical", + "abstract": "We present a self-learning approach for synthesizing programs from integer\nsequences. Our method relies on a tree search guided by a learned policy. \nOur system is tested on the On-Line Encyclopedia of Integer Sequences.\nThere, it discovers, on its own, solutions for 27987 sequences starting from \nbasic operators and without human-written training examples.", + "primary_area": "machine learning i", + "author": "Thibault Gauthier; Josef Urban", + "authorids": "", + "aff": "Czech Technical University in Prague, Czech Republic; Czech Technical University in Prague, Czech Republic", + "bibtex": "@article{Gauthier_Urban_2023, title={Learning Program Synthesis for Integer Sequences from Scratch}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25930}, DOI={10.1609/aaai.v37i6.25930}, abstractNote={We present a self-learning approach for synthesizing programs from integer\nsequences. Our method relies on a tree search guided by a learned policy. Our system is tested on the On-Line Encyclopedia of Integer Sequences.\nThere, it discovers, on its own, solutions for 27987 sequences starting from basic operators and without human-written training examples.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gauthier, Thibault and Urban, Josef}, year={2023}, month={Jun.}, pages={7670-7677} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25930/25702", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25930", + "pdf_size": 151571, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12891655542313215112&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "thibaultgauthier.fr;gmail.com", + "email": "thibaultgauthier.fr;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Czech Technical University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ctu.cz", + "aff_unique_abbr": "CTU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Prague", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Czech Republic" + }, + { + "id": "article-25273", + "title": "Learning Progressive Modality-Shared Transformers for Effective Visible-Infrared Person Re-identification", + "track": "main", + "status": "Technical", + "abstract": "Visible-Infrared Person Re-Identification (VI-ReID) is a challenging retrieval task under complex modality changes. Existing methods usually focus on extracting discriminative visual features while ignoring the reliability and commonality of visual features between different modalities. In this paper, we propose a novel deep learning framework named Progressive Modality-shared Transformer (PMT) for effective VI-ReID. To reduce the negative effect of modality gaps, we first take the gray-scale images as an auxiliary modality and propose a progressive learning strategy. Then, we propose a Modality-Shared Enhancement Loss (MSEL) to guide the model to explore more reliable identity information from modality-shared features. Finally, to cope with the problem of large intra-class differences and small inter-class differences, we propose a Discriminative Center Loss (DCL) combined with the MSEL to further improve the discrimination of reliable features. Extensive experiments on SYSU-MM01 and RegDB datasets show that our proposed framework performs better than most state-of-the-art methods. For model reproduction, we release the source code at https://github.com/hulu88/PMT.", + "primary_area": "computer vision ii", + "author": "Hu Lu; Xuezhang Zou; Pingping Zhang", + "authorids": "", + "aff": "School of Computer Science and Communication Engineering, Jiangsu University; School of Computer Science and Communication Engineering, Jiangsu University; School of Artificial Intelligence, Dalian University of Technology", + "bibtex": "@article{Lu_Zou_Zhang_2023, title={Learning Progressive Modality-Shared Transformers for Effective Visible-Infrared Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25273}, DOI={10.1609/aaai.v37i2.25273}, abstractNote={Visible-Infrared Person Re-Identification (VI-ReID) is a challenging retrieval task under complex modality changes. Existing methods usually focus on extracting discriminative visual features while ignoring the reliability and commonality of visual features between different modalities. In this paper, we propose a novel deep learning framework named Progressive Modality-shared Transformer (PMT) for effective VI-ReID. To reduce the negative effect of modality gaps, we first take the gray-scale images as an auxiliary modality and propose a progressive learning strategy. Then, we propose a Modality-Shared Enhancement Loss (MSEL) to guide the model to explore more reliable identity information from modality-shared features. Finally, to cope with the problem of large intra-class differences and small inter-class differences, we propose a Discriminative Center Loss (DCL) combined with the MSEL to further improve the discrimination of reliable features. Extensive experiments on SYSU-MM01 and RegDB datasets show that our proposed framework performs better than most state-of-the-art methods. For model reproduction, we release the source code at https://github.com/hulu88/PMT.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Hu and Zou, Xuezhang and Zhang, Pingping}, year={2023}, month={Jun.}, pages={1835-1843} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25273/25045", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25273", + "pdf_size": 9586464, + "gs_citation": 137, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4758208537469350668&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "ujs.edu.cn;stmail.ujs.edu.cn;dlut.edu.cn", + "email": "ujs.edu.cn;stmail.ujs.edu.cn;dlut.edu.cn", + "github": "https://github.com/hulu88/PMT", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Jiangsu University;Dalian University of Technology", + "aff_unique_dep": "School of Computer Science and Communication Engineering;School of Artificial Intelligence", + "aff_unique_url": "http://www.ujs.edu.cn;http://www.dlut.edu.cn", + "aff_unique_abbr": ";DUT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Dalian", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26423", + "title": "Learning Rational Subgoals from Demonstrations and Instructions", + "track": "main", + "status": "Technical", + "abstract": "We present a framework for learning useful subgoals that support efficient long-term planning to achieve novel goals. At the core of our framework is a collection of rational subgoals (RSGs), which are essentially binary classifiers over the environmental states. RSGs can be learned from weakly-annotated data, in the form of unsegmented demonstration trajectories, paired with abstract task descriptions, which are composed of terms initially unknown to the agent (e.g., collect-wood then craft-boat then go-across-river). Our framework also discovers dependencies between RSGs, e.g., the task collect-wood is a helpful subgoal for the task craft-boat. Given a goal description, the learned subgoals and the derived dependencies facilitate off-the-shelf planning algorithms, such as A* and RRT, by setting helpful subgoals as waypoints to the planner, which significantly improves performance-time efficiency. Project page: https://rsg.csail.mit.edu", + "primary_area": "planning routing and scheduling", + "author": "Zhezheng Luo; Jiayuan Mao; Jiajun Wu; Tom\u00e1s Lozano-P\u00e9rez; Joshua B. Tenenbaum; Leslie Pack Kaelbling", + "authorids": "", + "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Stanford University; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", + "bibtex": "@article{Luo_Mao_Wu_Lozano-P\u00e9rez_Tenenbaum_Kaelbling_2023, title={Learning Rational Subgoals from Demonstrations and Instructions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26423}, DOI={10.1609/aaai.v37i10.26423}, abstractNote={We present a framework for learning useful subgoals that support efficient long-term planning to achieve novel goals. At the core of our framework is a collection of rational subgoals (RSGs), which are essentially binary classifiers over the environmental states. RSGs can be learned from weakly-annotated data, in the form of unsegmented demonstration trajectories, paired with abstract task descriptions, which are composed of terms initially unknown to the agent (e.g., collect-wood then craft-boat then go-across-river). Our framework also discovers dependencies between RSGs, e.g., the task collect-wood is a helpful subgoal for the task craft-boat. Given a goal description, the learned subgoals and the derived dependencies facilitate off-the-shelf planning algorithms, such as A* and RRT, by setting helpful subgoals as waypoints to the planner, which significantly improves performance-time efficiency. Project page: https://rsg.csail.mit.edu}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Zhezheng and Mao, Jiayuan and Wu, Jiajun and Lozano-P\u00e9rez, Tom\u00e1s and Tenenbaum, Joshua B. and Kaelbling, Leslie Pack}, year={2023}, month={Jun.}, pages={12068-12078} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26423/26195", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26423", + "pdf_size": 3643059, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17366899837365642434&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "https://rsg.csail.mit.edu", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Stanford University", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.stanford.edu", + "aff_unique_abbr": "MIT;Stanford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26434", + "title": "Learning Relational Causal Models with Cycles through Relational Acyclification", + "track": "main", + "status": "Technical", + "abstract": "In real-world phenomena which involve mutual influence or causal effects between interconnected units, equilibrium states are typically represented with cycles in graphical models. An expressive class of graphical models, relational causal models, can represent and reason about complex dynamic systems exhibiting such cycles or feedback loops. Existing cyclic causal discovery algorithms for learning causal models from observational data assume that the data instances are independent and identically distributed which makes them unsuitable for relational causal models. At the same time, causal discovery algorithms for relational causal models assume acyclicity. In this work, we examine the necessary and sufficient conditions under which a constraint-based relational causal discovery algorithm is sound and complete for cyclic relational causal models. We introduce relational acyclification, an operation specifically designed for relational models that enables reasoning about the identifiability of cyclic relational causal models. We show that under the assumptions of relational acyclification and sigma-faithfulness, the relational causal discovery algorithm RCD is sound and complete for cyclic relational models. We present experimental results to support our claim.", + "primary_area": "reasoning under uncertainty", + "author": "Ragib Ahsan; David Arbour; Elena Zheleva", + "authorids": "", + "aff": "University of Illinois at Chicago; Adobe Research; University of Illinois at Chicago", + "bibtex": "@article{Ahsan_Arbour_Zheleva_2023, title={Learning Relational Causal Models with Cycles through Relational Acyclification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26434}, DOI={10.1609/aaai.v37i10.26434}, abstractNote={In real-world phenomena which involve mutual influence or causal effects between interconnected units, equilibrium states are typically represented with cycles in graphical models. An expressive class of graphical models, relational causal models, can represent and reason about complex dynamic systems exhibiting such cycles or feedback loops. Existing cyclic causal discovery algorithms for learning causal models from observational data assume that the data instances are independent and identically distributed which makes them unsuitable for relational causal models. At the same time, causal discovery algorithms for relational causal models assume acyclicity. In this work, we examine the necessary and sufficient conditions under which a constraint-based relational causal discovery algorithm is sound and complete for cyclic relational causal models. We introduce relational acyclification, an operation specifically designed for relational models that enables reasoning about the identifiability of cyclic relational causal models. We show that under the assumptions of relational acyclification and sigma-faithfulness, the relational causal discovery algorithm RCD is sound and complete for cyclic relational models. We present experimental results to support our claim.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ahsan, Ragib and Arbour, David and Zheleva, Elena}, year={2023}, month={Jun.}, pages={12164-12171} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26434/26206", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26434", + "pdf_size": 1541428, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14857122373888335664&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "uic.edu;adobe.com;uic.edu", + "email": "uic.edu;adobe.com;uic.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Illinois at Chicago;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www.uic.edu;https://research.adobe.com", + "aff_unique_abbr": "UIC;Adobe", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25538", + "title": "Learning Representations of Bi-level Knowledge Graphs for Reasoning beyond Link Prediction", + "track": "main", + "status": "Technical", + "abstract": "Knowledge graphs represent known facts using triplets. While existing knowledge graph embedding methods only consider the connections between entities, we propose considering the relationships between triplets. For example, let us consider two triplets T1 and T2 where T1 is (Academy_Awards, Nominates, Avatar) and T2 is (Avatar, Wins, Academy_Awards). Given these two base-level triplets, we see that T1 is a prerequisite for T2. In this paper, we define a higher-level triplet to represent a relationship between triplets, e.g., where PrerequisiteFor is a higher-level relation. We define a bi-level knowledge graph that consists of the base-level and the higher-level triplets. We also propose a data augmentation strategy based on the random walks on the bi-level knowledge graph to augment plausible triplets. Our model called BiVE learns embeddings by taking into account the structures of the base-level and the higher-level triplets, with additional consideration of the augmented triplets. We propose two new tasks: triplet prediction and conditional link prediction. Given a triplet T1 and a higher-level relation, the triplet prediction predicts a triplet that is likely to be connected to T1 by the higher-level relation, e.g., . The conditional link prediction predicts a missing entity in a triplet conditioned on another triplet, e.g., . Experimental results show that BiVE significantly outperforms all other methods in the two new tasks and the typical base-level link prediction in real-world bi-level knowledge graphs.", + "primary_area": "data mining and knowledge management", + "author": "Chanyoung Chung; Joyce Jiyoung Whang", + "authorids": "", + "aff": "School of Computing, KAIST; School of Computing, KAIST", + "bibtex": "@article{Chung_Whang_2023, title={Learning Representations of Bi-level Knowledge Graphs for Reasoning beyond Link Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25538}, DOI={10.1609/aaai.v37i4.25538}, abstractNote={Knowledge graphs represent known facts using triplets. While existing knowledge graph embedding methods only consider the connections between entities, we propose considering the relationships between triplets. For example, let us consider two triplets T1 and T2 where T1 is (Academy_Awards, Nominates, Avatar) and T2 is (Avatar, Wins, Academy_Awards). Given these two base-level triplets, we see that T1 is a prerequisite for T2. In this paper, we define a higher-level triplet to represent a relationship between triplets, e.g., <T1, PrerequisiteFor, T2> where PrerequisiteFor is a higher-level relation. We define a bi-level knowledge graph that consists of the base-level and the higher-level triplets. We also propose a data augmentation strategy based on the random walks on the bi-level knowledge graph to augment plausible triplets. Our model called BiVE learns embeddings by taking into account the structures of the base-level and the higher-level triplets, with additional consideration of the augmented triplets. We propose two new tasks: triplet prediction and conditional link prediction. Given a triplet T1 and a higher-level relation, the triplet prediction predicts a triplet that is likely to be connected to T1 by the higher-level relation, e.g., <T1, PrerequisiteFor, ?>. The conditional link prediction predicts a missing entity in a triplet conditioned on another triplet, e.g., <T1, PrerequisiteFor, (Avatar, Wins, ?)>. Experimental results show that BiVE significantly outperforms all other methods in the two new tasks and the typical base-level link prediction in real-world bi-level knowledge graphs.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chung, Chanyoung and Whang, Joyce Jiyoung}, year={2023}, month={Jun.}, pages={4208-4216} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25538/25310", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25538", + "pdf_size": 285723, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11080502900731635537&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26091", + "title": "Learning Revenue Maximization Using Posted Prices for Stochastic Strategic Patient Buyers", + "track": "main", + "status": "Technical", + "abstract": "We consider a seller faced with buyers which have the ability to delay their decision, which we call patience.\nEach buyer's type is composed of value and patience, and it is sampled i.i.d. from a distribution.\nThe seller, using posted prices, would like to maximize her revenue from selling to the buyer. \nIn this paper, we formalize this setting and characterize the resulting Stackelberg equilibrium, where the seller first commits to her strategy, and then the buyers best respond. Following this, we show how to compute both the optimal pure and mixed strategies. \nWe then consider a learning setting, where the seller does not have access to the distribution over buyer's types. Our main results are the following. We derive a sample complexity bound for the learning of an approximate optimal pure strategy, by computing the fat-shattering dimension of this setting. Moreover, we provide a general sample complexity bound for the approximate optimal mixed strategy. \nWe also consider an online setting and derive a vanishing regret bound with respect to both the optimal pure strategy and the optimal mixed strategy.", + "primary_area": "machine learning iii", + "author": "Eitan-Hai Mashiah; Idan Attias; Yishay Mansour", + "authorids": "", + "aff": "Tel Aviv University, Israel; Ben-Gurion University, Israel; Tel Aviv University, Israel + Google Research, Israel", + "bibtex": "@article{Mashiah_Attias_Mansour_2023, title={Learning Revenue Maximization Using Posted Prices for Stochastic Strategic Patient Buyers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26091}, DOI={10.1609/aaai.v37i8.26091}, abstractNote={We consider a seller faced with buyers which have the ability to delay their decision, which we call patience.\nEach buyer\u2019s type is composed of value and patience, and it is sampled i.i.d. from a distribution.\nThe seller, using posted prices, would like to maximize her revenue from selling to the buyer. In this paper, we formalize this setting and characterize the resulting Stackelberg equilibrium, where the seller first commits to her strategy, and then the buyers best respond. Following this, we show how to compute both the optimal pure and mixed strategies. We then consider a learning setting, where the seller does not have access to the distribution over buyer\u2019s types. Our main results are the following. We derive a sample complexity bound for the learning of an approximate optimal pure strategy, by computing the fat-shattering dimension of this setting. Moreover, we provide a general sample complexity bound for the approximate optimal mixed strategy. We also consider an online setting and derive a vanishing regret bound with respect to both the optimal pure strategy and the optimal mixed strategy.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mashiah, Eitan-Hai and Attias, Idan and Mansour, Yishay}, year={2023}, month={Jun.}, pages={9090-9098} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26091/25863", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26091", + "pdf_size": 158591, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12808881286666348573&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;post.bgu.ac.il;gmail.com", + "email": "gmail.com;post.bgu.ac.il;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "Tel Aviv University;Ben-Gurion University;Google", + "aff_unique_dep": ";;Google Research", + "aff_unique_url": "https://www.tau.ac.il;https://www.bgu.ac.il;https://research.google", + "aff_unique_abbr": "TAU;BGU;Google", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Israel", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26424", + "title": "Learning Safe Numeric Action Models", + "track": "main", + "status": "Technical", + "abstract": "Powerful domain-independent planners have been developed to solve various types of planning problems. \nThese planners often require a model of the acting agent's actions, given in some planning domain description language. \nYet obtaining such an action model is a notoriously hard task. \nThis task is even more challenging in mission-critical domains, where a trial-and-error approach to learning how to act is not an option. \nIn such domains, the action model used to generate plans must be safe, in the sense that plans generated with it must be applicable and achieve their goals. \nLearning safe action models for planning has been recently explored for domains in which states are sufficiently described with Boolean variables. \nIn this work, we go beyond this limitation and propose the NSAM algorithm. \nNSAM runs in time that is polynomial in the number of observations and, under certain conditions, is guaranteed to return safe action models. \nWe analyze its worst-case sample complexity, which may be intractable for some domains. Empirically, however, NSAM can quickly learn a safe action model that can solve most problems in the domain.", + "primary_area": "planning routing and scheduling", + "author": "Argaman Mordoch; Brendan Juba; Roni Stern", + "authorids": "", + "aff": "Ben Gurion University in Be\u2019er Sheva, Israel; Washington University in St. Louis, USA; Ben Gurion University in Be\u2019er Sheva, Israel", + "bibtex": "@article{Mordoch_Juba_Stern_2023, title={Learning Safe Numeric Action Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26424}, DOI={10.1609/aaai.v37i10.26424}, abstractNote={Powerful domain-independent planners have been developed to solve various types of planning problems. These planners often require a model of the acting agent\u2019s actions, given in some planning domain description language. Yet obtaining such an action model is a notoriously hard task. This task is even more challenging in mission-critical domains, where a trial-and-error approach to learning how to act is not an option. In such domains, the action model used to generate plans must be safe, in the sense that plans generated with it must be applicable and achieve their goals. Learning safe action models for planning has been recently explored for domains in which states are sufficiently described with Boolean variables. In this work, we go beyond this limitation and propose the NSAM algorithm. NSAM runs in time that is polynomial in the number of observations and, under certain conditions, is guaranteed to return safe action models. We analyze its worst-case sample complexity, which may be intractable for some domains. Empirically, however, NSAM can quickly learn a safe action model that can solve most problems in the domain.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mordoch, Argaman and Juba, Brendan and Stern, Roni}, year={2023}, month={Jun.}, pages={12079-12086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26424/26196", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26424", + "pdf_size": 1195371, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8267656807993357471&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "post.bgu.ac.il;wustl.edu;gmail.com", + "email": "post.bgu.ac.il;wustl.edu;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Ben Gurion University;Washington University in St. Louis", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bgu.ac.il;https://wustl.edu", + "aff_unique_abbr": "BGU;WUSTL", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Be\u2019er Sheva;St. Louis", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Israel;United States" + }, + { + "id": "article-25431", + "title": "Learning Second-Order Attentive Context for Efficient Correspondence Pruning", + "track": "main", + "status": "Technical", + "abstract": "Correspondence pruning aims to search consistent correspondences (inliers) from a set of putative correspondences. It is challenging because of the disorganized spatial distribution of numerous outliers, especially when putative correspondences are largely dominated by outliers. It's more challenging to ensure effectiveness while maintaining efficiency. In this paper, we propose an effective and efficient method for correspondence pruning. Inspired by the success of attentive context in correspondence problems, we first extend the attentive context to the first-order attentive context and then introduce the idea of attention in attention (ANA) to model second-order attentive context for correspondence pruning. Compared with first-order attention that focuses on feature-consistent context, second-order attention dedicates to attention weights itself and provides an additional source to encode consistent context from the attention map. For efficiency, we derive two approximate formulations for the naive implementation of second-order attention to optimize the cubic complexity to linear complexity, such that second-order attention can be used with negligible computational overheads. We further implement our formulations in a second-order context layer and then incorporate the layer in an ANA block. Extensive experiments demonstrate that our method is effective and efficient in pruning outliers, especially in high-outlier-ratio cases. Compared with the state-of-the-art correspondence pruning approach LMCNet, our method runs 14 times faster while maintaining a competitive accuracy.", + "primary_area": "computer vision iii", + "author": "Xinyi Ye; Weiyue Zhao; Hao Lu; Zhiguo Cao", + "authorids": "", + "aff": "Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology; Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Arti\ufb01cial Intelligence and Automation, Huazhong University of Science and Technology", + "bibtex": "@article{Ye_Zhao_Lu_Cao_2023, title={Learning Second-Order Attentive Context for Efficient Correspondence Pruning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25431}, DOI={10.1609/aaai.v37i3.25431}, abstractNote={Correspondence pruning aims to search consistent correspondences (inliers) from a set of putative correspondences. It is challenging because of the disorganized spatial distribution of numerous outliers, especially when putative correspondences are largely dominated by outliers. It\u2019s more challenging to ensure effectiveness while maintaining efficiency. In this paper, we propose an effective and efficient method for correspondence pruning. Inspired by the success of attentive context in correspondence problems, we first extend the attentive context to the first-order attentive context and then introduce the idea of attention in attention (ANA) to model second-order attentive context for correspondence pruning. Compared with first-order attention that focuses on feature-consistent context, second-order attention dedicates to attention weights itself and provides an additional source to encode consistent context from the attention map. For efficiency, we derive two approximate formulations for the naive implementation of second-order attention to optimize the cubic complexity to linear complexity, such that second-order attention can be used with negligible computational overheads. We further implement our formulations in a second-order context layer and then incorporate the layer in an ANA block. Extensive experiments demonstrate that our method is effective and efficient in pruning outliers, especially in high-outlier-ratio cases. Compared with the state-of-the-art correspondence pruning approach LMCNet, our method runs 14 times faster while maintaining a competitive accuracy.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Xinyi and Zhao, Weiyue and Lu, Hao and Cao, Zhiguo}, year={2023}, month={Jun.}, pages={3250-3258} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25431/25203", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25431", + "pdf_size": 1268754, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11239808839759531625&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/DIVE128/ANANet", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Arti\ufb01cial Intelligence and Automation", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25222", + "title": "Learning Semantic Alignment with Global Modality Reconstruction for Video-Language Pre-training towards Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Video-language pre-training for text-based video retrieval tasks is vitally important. Previous pre-training methods suffer from the semantic misalignments. The reason is that these methods ignore sequence alignments but focusing on critical token alignment. To alleviate the problem, we propose a video-language pre-training framework, termed videolanguage pre-training For lEarning sEmantic aLignments (FEEL), to learn semantic alignments at the sequence level. Specifically, the global modality reconstruction and the cross- modal self-contrasting method is utilized to learn the alignments at the sequence level better. Extensive experimental results demonstrate the effectiveness of FEEL on text-based video retrieval and text-based video corpus moment retrieval.", + "primary_area": "computer vision i", + "author": "Mingchao Li; Xiaoming Shi; Haitao Leng; Wei Zhou; Hai-Tao Zheng; Kuncai Zhang", + "authorids": "", + "aff": "Department of Computer Science and Technology, Tsinghua University + Alibaba Group; Shanghai Artificial Intelligence Laboratory; Alibaba Group; Alibaba Group; Shenzhen International Graduate School, Tsinghua University + Peng Cheng Laboratory; Alibaba Group", + "bibtex": "@article{Li_Shi_Leng_Zhou_Zheng_Zhang_2023, title={Learning Semantic Alignment with Global Modality Reconstruction for Video-Language Pre-training towards Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25222}, DOI={10.1609/aaai.v37i1.25222}, abstractNote={Video-language pre-training for text-based video retrieval tasks is vitally important. Previous pre-training methods suffer from the semantic misalignments. The reason is that these methods ignore sequence alignments but focusing on critical token alignment. To alleviate the problem, we propose a video-language pre-training framework, termed videolanguage pre-training For lEarning sEmantic aLignments (FEEL), to learn semantic alignments at the sequence level. Specifically, the global modality reconstruction and the cross- modal self-contrasting method is utilized to learn the alignments at the sequence level better. Extensive experimental results demonstrate the effectiveness of FEEL on text-based video retrieval and text-based video corpus moment retrieval.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Mingchao and Shi, Xiaoming and Leng, Haitao and Zhou, Wei and Zheng, Hai-Tao and Zhang, Kuncai}, year={2023}, month={Jun.}, pages={1377-1385} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25222/24994", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25222", + "pdf_size": 18476413, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17489917461341928809&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;pjlab.org.cn;alibaba-inc.com;alibaba-inc.com;sz.tsinghua.edu.cn;alibaba-inc.com", + "email": "gmail.com;pjlab.org.cn;alibaba-inc.com;alibaba-inc.com;sz.tsinghua.edu.cn;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;1;1;0+3;1", + "aff_unique_norm": "Tsinghua University;Alibaba Group;Shanghai Artificial Intelligence Laboratory;Peng Cheng Laboratory", + "aff_unique_dep": "Department of Computer Science and Technology;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.alibaba.com;http://www.shailab.org/;http://www.pcl.ac.cn", + "aff_unique_abbr": "THU;Alibaba;Shanghai AI Lab;PCL", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25479", + "title": "Learning Semantic Degradation-Aware Guidance for Recognition-Driven Unsupervised Low-Light Image Enhancement", + "track": "main", + "status": "Technical", + "abstract": "Low-light images suffer severe degradation of low lightness and noise corruption, causing unsatisfactory visual quality and visual recognition performance. \nTo solve this problem while meeting the unavailability of paired datasets in wide-range scenarios, unsupervised low-light image enhancement (ULLIE) techniques have been developed. \nHowever, these methods are primarily guided to alleviate the degradation effect on visual quality rather than semantic levels, hence limiting their performance in visual recognition tasks. \nTo this end, we propose to learn a Semantic Degradation-Aware Guidance (SDAG) that perceives the low-light degradation effect on semantic levels in a self-supervised manner, which is further utilized to guide the ULLIE methods.\nThe proposed SDAG utilizes the low-light degradation factors as augmented signals to degrade the low-light images, and then capture their degradation effect on semantic levels. \nSpecifically, our SDAG employs the subsequent pre-trained recognition model extractor to extract semantic representations, and then learns to self-reconstruct the enhanced low-light image and its augmented degraded images. \nBy constraining the relative reconstruction effect between the original enhanced image and the augmented formats, our SDAG learns to be aware of the degradation effect on semantic levels in a relative comparison manner. \nMoreover, our SDAG is general and can be plugged into the training paradigm of the existing ULLIE methods. \nExtensive experiments demonstrate its effectiveness for improving the ULLIE approaches on the downstream recognition tasks while maintaining a competitive visual quality. \nCode will be available at https://github.com/zheng980629/SDAG.", + "primary_area": "computer vision iii", + "author": "Naishan Zheng; Jie Huang; Man Zhou; Zizheng Yang; Qi Zhu; Feng Zhao", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Zheng_Huang_Zhou_Yang_Zhu_Zhao_2023, title={Learning Semantic Degradation-Aware Guidance for Recognition-Driven Unsupervised Low-Light Image Enhancement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25479}, DOI={10.1609/aaai.v37i3.25479}, abstractNote={Low-light images suffer severe degradation of low lightness and noise corruption, causing unsatisfactory visual quality and visual recognition performance. To solve this problem while meeting the unavailability of paired datasets in wide-range scenarios, unsupervised low-light image enhancement (ULLIE) techniques have been developed. However, these methods are primarily guided to alleviate the degradation effect on visual quality rather than semantic levels, hence limiting their performance in visual recognition tasks. To this end, we propose to learn a Semantic Degradation-Aware Guidance (SDAG) that perceives the low-light degradation effect on semantic levels in a self-supervised manner, which is further utilized to guide the ULLIE methods.\nThe proposed SDAG utilizes the low-light degradation factors as augmented signals to degrade the low-light images, and then capture their degradation effect on semantic levels. Specifically, our SDAG employs the subsequent pre-trained recognition model extractor to extract semantic representations, and then learns to self-reconstruct the enhanced low-light image and its augmented degraded images. By constraining the relative reconstruction effect between the original enhanced image and the augmented formats, our SDAG learns to be aware of the degradation effect on semantic levels in a relative comparison manner. Moreover, our SDAG is general and can be plugged into the training paradigm of the existing ULLIE methods. Extensive experiments demonstrate its effectiveness for improving the ULLIE approaches on the downstream recognition tasks while maintaining a competitive visual quality. Code will be available at https://github.com/zheng980629/SDAG.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Naishan and Huang, Jie and Zhou, Man and Yang, Zizheng and Zhu, Qi and Zhao, Feng}, year={2023}, month={Jun.}, pages={3678-3686} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25479/25251", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25479", + "pdf_size": 1297133, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12330335416117387395&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/zheng980629/SDAG", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26007", + "title": "Learning Similarity Metrics for Volumetric Simulations with Multiscale CNNs", + "track": "main", + "status": "Technical", + "abstract": "Simulations that produce three-dimensional data are ubiquitous in science, ranging from fluid flows to plasma physics. We propose a similarity model based on entropy, which allows for the creation of physically meaningful ground truth distances for the similarity assessment of scalar and vectorial data, produced from transport and motion-based simulations. Utilizing two data acquisition methods derived from this model, we create collections of fields from numerical PDE solvers and existing simulation data repositories. Furthermore, a multiscale CNN architecture that computes a volumetric similarity metric (VolSiM) is proposed. To the best of our knowledge this is the first learning method inherently designed to address the challenges arising for the similarity assessment of high-dimensional simulation data. Additionally, the tradeoff between a large batch size and an accurate correlation computation for correlation-based loss functions is investigated, and the metric's invariance with respect to rotation and scale operations is analyzed. Finally, the robustness and generalization of VolSiM is evaluated on a large range of test data, as well as a particularly challenging turbulence case study, that is close to potential real-world applications.", + "primary_area": "machine learning ii", + "author": "Georg Kohl; Li-Wei Chen; Nils Thuerey", + "authorids": "", + "aff": "Technical University of Munich; Technical University of Munich; Technical University of Munich", + "bibtex": "@article{Kohl_Chen_Thuerey_2023, title={Learning Similarity Metrics for Volumetric Simulations with Multiscale CNNs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26007}, DOI={10.1609/aaai.v37i7.26007}, abstractNote={Simulations that produce three-dimensional data are ubiquitous in science, ranging from fluid flows to plasma physics. We propose a similarity model based on entropy, which allows for the creation of physically meaningful ground truth distances for the similarity assessment of scalar and vectorial data, produced from transport and motion-based simulations. Utilizing two data acquisition methods derived from this model, we create collections of fields from numerical PDE solvers and existing simulation data repositories. Furthermore, a multiscale CNN architecture that computes a volumetric similarity metric (VolSiM) is proposed. To the best of our knowledge this is the first learning method inherently designed to address the challenges arising for the similarity assessment of high-dimensional simulation data. Additionally, the tradeoff between a large batch size and an accurate correlation computation for correlation-based loss functions is investigated, and the metric\u2019s invariance with respect to rotation and scale operations is analyzed. Finally, the robustness and generalization of VolSiM is evaluated on a large range of test data, as well as a particularly challenging turbulence case study, that is close to potential real-world applications.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kohl, Georg and Chen, Li-Wei and Thuerey, Nils}, year={2023}, month={Jun.}, pages={8351-8359} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26007/25779", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26007", + "pdf_size": 3709419, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4285194958875681657&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "tum.de;tum.de;tum.de", + "email": "tum.de;tum.de;tum.de", + "github": "https://github.com/tum-pbs/VOLSIM", + "project": "https://arxiv.org/abs/2202.04109", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technical University of Munich", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tum.de", + "aff_unique_abbr": "TUM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25235", + "title": "Learning Single Image Defocus Deblurring with Misaligned Training Pairs", + "track": "main", + "status": "Technical", + "abstract": "By adopting popular pixel-wise loss, existing methods for defocus deblurring heavily rely on well aligned training image pairs. Although training pairs of ground-truth and blurry images are carefully collected, e.g., DPDD dataset, misalignment is inevitable between training pairs, making existing methods possibly suffer from deformation artifacts. In this paper, we propose a joint deblurring and reblurring learning (JDRL) framework for single image defocus deblurring with misaligned training pairs. Generally, JDRL consists of a deblurring module and a spatially invariant reblurring module, by which deblurred result can be adaptively supervised by ground-truth image to recover sharp textures while maintaining spatial consistency with the blurry image. First, in the deblurring module, a bi-directional optical flow-based deformation is introduced to tolerate spatial misalignment between deblurred and ground-truth images. Second, in the reblurring module, deblurred result is reblurred to be spatially aligned with blurry image, by predicting a set of isotropic blur kernels and weighting maps. Moreover, we establish a new single image defocus deblurring (SDD) dataset, further validating our JDRL and also benefiting future research. Our JDRL can be applied to boost defocus deblurring networks in terms of both quantitative metrics and visual quality on DPDD, RealDOF and our SDD datasets.", + "primary_area": "computer vision ii", + "author": "Yu Li; Dongwei Ren; Xinya Shu; Wangmeng Zuo", + "authorids": "", + "aff": "School of Computer Science and Technology, Harbin Institute of Technology; School of Computer Science and Technology, Harbin Institute of Technology; School of Computer Science and Technology, Harbin Institute of Technology; School of Computer Science and Technology, Harbin Institute of Technology + Peng Cheng Laboratory", + "bibtex": "@article{Li_Ren_Shu_Zuo_2023, title={Learning Single Image Defocus Deblurring with Misaligned Training Pairs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25235}, DOI={10.1609/aaai.v37i2.25235}, abstractNote={By adopting popular pixel-wise loss, existing methods for defocus deblurring heavily rely on well aligned training image pairs. Although training pairs of ground-truth and blurry images are carefully collected, e.g., DPDD dataset, misalignment is inevitable between training pairs, making existing methods possibly suffer from deformation artifacts. In this paper, we propose a joint deblurring and reblurring learning (JDRL) framework for single image defocus deblurring with misaligned training pairs. Generally, JDRL consists of a deblurring module and a spatially invariant reblurring module, by which deblurred result can be adaptively supervised by ground-truth image to recover sharp textures while maintaining spatial consistency with the blurry image. First, in the deblurring module, a bi-directional optical flow-based deformation is introduced to tolerate spatial misalignment between deblurred and ground-truth images. Second, in the reblurring module, deblurred result is reblurred to be spatially aligned with blurry image, by predicting a set of isotropic blur kernels and weighting maps. Moreover, we establish a new single image defocus deblurring (SDD) dataset, further validating our JDRL and also benefiting future research. Our JDRL can be applied to boost defocus deblurring networks in terms of both quantitative metrics and visual quality on DPDD, RealDOF and our SDD datasets.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yu and Ren, Dongwei and Shu, Xinya and Zuo, Wangmeng}, year={2023}, month={Jun.}, pages={1495-1503} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25235/25007", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25235", + "pdf_size": 6606144, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13460986889751841632&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 5, + "aff_domain": "outlook.com;gmail.com;outlook.com;hit.edu.cn", + "email": "outlook.com;gmail.com;outlook.com;hit.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://www.hit.edu.cn/;http://www.pcl.ac.cn", + "aff_unique_abbr": "HIT;PCL", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25085", + "title": "Learning Temporal-Ordered Representation for Spike Streams Based on Discrete Wavelet Transforms", + "track": "main", + "status": "Technical", + "abstract": "Spike camera, a new type of neuromorphic visual sensor that imitates the sampling mechanism of the primate fovea, can capture photons and output 40000 Hz binary spike streams. Benefiting from the asynchronous sampling mechanism, the spike camera can record fast-moving objects and clear images can be recovered from the spike stream at any specified timestamps without motion blurring. Despite these, due to the dense time sequence information of the discrete spike stream, it is not easy to directly apply the existing algorithms of traditional cameras to the spike camera. Therefore, it is necessary and interesting to explore a universally effective representation of dense spike streams to better fit various network architectures. In this paper, we propose to mine temporal-robust features of spikes in time-frequency space with wavelet transforms. We present a novel Wavelet-Guided Spike Enhancing (WGSE) paradigm consisting of three consecutive steps: multi-level wavelet transform, CNN-based learnable module, and inverse wavelet transform. With the assistance of WGSE, the new streaming representation of spikes can be learned. We demonstrate the effectiveness of WGSE on two downstream tasks, achieving state-of-the-art performance on the image reconstruction task and getting considerable performance on semantic segmentation. Furthermore, We build a new spike-based synthesized dataset for semantic segmentation. Code and Datasets are available at https://github.com/Leozhangjiyuan/WGSE-SpikeCamera.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Jiyuan Zhang; Shanshan Jia; Zhaofei Yu; Tiejun Huang", + "authorids": "", + "aff": "School of Computer Science, Peking University+Institute for Artificial Intelligence, Peking University; School of Computer Science, Peking University+Institute for Artificial Intelligence, Peking University; School of Computer Science, Peking University+Institute for Artificial Intelligence, Peking University; School of Computer Science, Peking University+Institute for Artificial Intelligence, Peking University", + "bibtex": "@article{Zhang_Jia_Yu_Huang_2023, title={Learning Temporal-Ordered Representation for Spike Streams Based on Discrete Wavelet Transforms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25085}, DOI={10.1609/aaai.v37i1.25085}, abstractNote={Spike camera, a new type of neuromorphic visual sensor that imitates the sampling mechanism of the primate fovea, can capture photons and output 40000 Hz binary spike streams. Benefiting from the asynchronous sampling mechanism, the spike camera can record fast-moving objects and clear images can be recovered from the spike stream at any specified timestamps without motion blurring. Despite these, due to the dense time sequence information of the discrete spike stream, it is not easy to directly apply the existing algorithms of traditional cameras to the spike camera. Therefore, it is necessary and interesting to explore a universally effective representation of dense spike streams to better fit various network architectures. In this paper, we propose to mine temporal-robust features of spikes in time-frequency space with wavelet transforms. We present a novel Wavelet-Guided Spike Enhancing (WGSE) paradigm consisting of three consecutive steps: multi-level wavelet transform, CNN-based learnable module, and inverse wavelet transform. With the assistance of WGSE, the new streaming representation of spikes can be learned. We demonstrate the effectiveness of WGSE on two downstream tasks, achieving state-of-the-art performance on the image reconstruction task and getting considerable performance on semantic segmentation. Furthermore, We build a new spike-based synthesized dataset for semantic segmentation. Code and Datasets are available at https://github.com/Leozhangjiyuan/WGSE-SpikeCamera.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jiyuan and Jia, Shanshan and Yu, Zhaofei and Huang, Tiejun}, year={2023}, month={Jun.}, pages={137-147} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25085/24857", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25085", + "pdf_size": 7753318, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4487665816072248228&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/Leozhangjiyuan/WGSE-SpikeCamera", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26000", + "title": "Learning Topology-Specific Experts for Molecular Property Prediction", + "track": "main", + "status": "Technical", + "abstract": "Recently, graph neural networks (GNNs) have been successfully applied to predicting molecular properties, which is one of the most classical cheminformatics tasks with various applications. Despite their effectiveness, we empirically observe that training a single GNN model for diverse molecules with distinct structural patterns limits its prediction performance. In this paper, motivated by this observation, we propose TopExpert to leverage topology-specific prediction models (referred to as experts), each of which is responsible for each molecular group sharing similar topological semantics. That is, each expert learns topology-specific discriminative features while being trained with its corresponding topological group. To tackle the key challenge of grouping molecules by their topological patterns, we introduce a clustering-based gating module that assigns an input molecule into one of the clusters and further optimizes the gating module with two different types of self-supervision: topological semantics induced by GNNs and molecular scaffolds, respectively. Extensive experiments demonstrate that TopExpert has boosted the performance for molecular property prediction and also achieved better generalization for new molecules with unseen scaffolds than baselines. The code is available at https://github.com/kimsu55/ToxExpert.", + "primary_area": "machine learning ii", + "author": "Suyeon Kim; Dongha Lee; SeongKu Kang; Seonghyeon Lee; Hwanjo Yu", + "authorids": "", + "aff": "Pohang University of Science and Technology (POSTECH), South Korea; University of Illinois at Urbana-Champaign (UIUC), United States; Pohang University of Science and Technology (POSTECH), South Korea; Pohang University of Science and Technology (POSTECH), South Korea; Pohang University of Science and Technology (POSTECH), South Korea", + "bibtex": "@article{Kim_Lee_Kang_Lee_Yu_2023, title={Learning Topology-Specific Experts for Molecular Property Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26000}, DOI={10.1609/aaai.v37i7.26000}, abstractNote={Recently, graph neural networks (GNNs) have been successfully applied to predicting molecular properties, which is one of the most classical cheminformatics tasks with various applications. Despite their effectiveness, we empirically observe that training a single GNN model for diverse molecules with distinct structural patterns limits its prediction performance. In this paper, motivated by this observation, we propose TopExpert to leverage topology-specific prediction models (referred to as experts), each of which is responsible for each molecular group sharing similar topological semantics. That is, each expert learns topology-specific discriminative features while being trained with its corresponding topological group. To tackle the key challenge of grouping molecules by their topological patterns, we introduce a clustering-based gating module that assigns an input molecule into one of the clusters and further optimizes the gating module with two different types of self-supervision: topological semantics induced by GNNs and molecular scaffolds, respectively. Extensive experiments demonstrate that TopExpert has boosted the performance for molecular property prediction and also achieved better generalization for new molecules with unseen scaffolds than baselines. The code is available at https://github.com/kimsu55/ToxExpert.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Suyeon and Lee, Dongha and Kang, SeongKu and Lee, Seonghyeon and Yu, Hwanjo}, year={2023}, month={Jun.}, pages={8291-8299} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26000/25772", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26000", + "pdf_size": 635379, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5180662947435107164&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "postech.ac.kr;illinois.edu;postech.ac.kr;postech.ac.kr;postech.ac.kr", + "email": "postech.ac.kr;illinois.edu;postech.ac.kr;postech.ac.kr;postech.ac.kr", + "github": "https://github.com/kimsu55/ToxExpert", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Pohang University of Science and Technology;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.postech.ac.kr;https://illinois.edu", + "aff_unique_abbr": "POSTECH;UIUC", + "aff_campus_unique_index": "0;1;0;0;0", + "aff_campus_unique": "Pohang;Urbana-Champaign", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-25406", + "title": "Learning a Generalized Gaze Estimator from Gaze-Consistent Feature", + "track": "main", + "status": "Technical", + "abstract": "Gaze estimator computes the gaze direction based on face images. Most existing gaze estimation methods perform well under within-dataset settings, but can not generalize to unseen domains. In particular, the ground-truth labels in unseen domain are often unavailable. In this paper, we propose a new domain generalization method based on gaze-consistent features. Our idea is to consider the gaze-irrelevant factors as unfavorable interference and disturb the training data against them, so that the model cannot fit to these gaze-irrelevant factors, instead, only fits to the gaze-consistent features. To this end, we first disturb the training data via adversarial attack or data augmentation based on the gaze-irrelevant factors, i.e., identity, expression, illumination and tone. Then we extract the gaze-consistent features by aligning the gaze features from disturbed data with non-disturbed gaze features. Experimental results show that our proposed method achieves state-of-the-art performance on gaze domain generalization task. Furthermore, our proposed method also improves domain adaption performance on gaze estimation. Our work provides new insight on gaze domain generalization task.", + "primary_area": "computer vision iii", + "author": "Mingjie Xu; Haofei Wang; Feng Lu", + "authorids": "", + "aff": "State Key Laboratory of VR Technology and Systems, School of CSE, Beihang University; Peng Cheng Laboratory, Shenzhen, China; State Key Laboratory of VR Technology and Systems, School of CSE, Beihang University + Peng Cheng Laboratory, Shenzhen, China", + "bibtex": "@article{Xu_Wang_Lu_2023, title={Learning a Generalized Gaze Estimator from Gaze-Consistent Feature}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25406}, DOI={10.1609/aaai.v37i3.25406}, abstractNote={Gaze estimator computes the gaze direction based on face images. Most existing gaze estimation methods perform well under within-dataset settings, but can not generalize to unseen domains. In particular, the ground-truth labels in unseen domain are often unavailable. In this paper, we propose a new domain generalization method based on gaze-consistent features. Our idea is to consider the gaze-irrelevant factors as unfavorable interference and disturb the training data against them, so that the model cannot fit to these gaze-irrelevant factors, instead, only fits to the gaze-consistent features. To this end, we first disturb the training data via adversarial attack or data augmentation based on the gaze-irrelevant factors, i.e., identity, expression, illumination and tone. Then we extract the gaze-consistent features by aligning the gaze features from disturbed data with non-disturbed gaze features. Experimental results show that our proposed method achieves state-of-the-art performance on gaze domain generalization task. Furthermore, our proposed method also improves domain adaption performance on gaze estimation. Our work provides new insight on gaze domain generalization task.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Mingjie and Wang, Haofei and Lu, Feng}, year={2023}, month={Jun.}, pages={3027-3035} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25406/25178", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25406", + "pdf_size": 3135936, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4301330214785324258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;pcl.ac.cn;buaa.edu.cn", + "email": "buaa.edu.cn;pcl.ac.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "Beihang University;Peng Cheng Laboratory", + "aff_unique_dep": "School of CSE;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "Beihang;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26930", + "title": "Learning and Planning under Uncertainty for Conservation Decisions", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "My research focuses on new techniques in machine learning and game theory to optimally allocate our scarce resources in multi-agent settings to maximize environmental sustainability. Drawing scientific questions from my close partnership with conservation organizations, I have advanced new lines of research in learning and planning under uncertainty, inspired by the low-data, noisy, and dynamic settings faced by rangers on the frontlines of protected areas.", + "primary_area": "", + "author": "Lily Xu", + "authorids": "", + "aff": "Harvard University", + "bibtex": "@article{Xu_2024, title={Learning and Planning under Uncertainty for Conservation Decisions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26930}, DOI={10.1609/aaai.v37i13.26930}, abstractNote={My research focuses on new techniques in machine learning and game theory to optimally allocate our scarce resources in multi-agent settings to maximize environmental sustainability. Drawing scientific questions from my close partnership with conservation organizations, I have advanced new lines of research in learning and planning under uncertainty, inspired by the low-data, noisy, and dynamic settings faced by rangers on the frontlines of protected areas.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Lily}, year={2024}, month={Jul.}, pages={16139-16140} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26930/26702", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26930", + "pdf_size": 57044, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:d7EavvP_f9cJ:scholar.google.com/&scioq=Learning+and+Planning+under+Uncertainty+for+Conservation+Decisions&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "g.harvard.edu", + "email": "g.harvard.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Harvard University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.harvard.edu", + "aff_unique_abbr": "Harvard", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25571", + "title": "Learning by Applying: A General Framework for Mathematical Reasoning via Enhancing Explicit Knowledge Learning", + "track": "main", + "status": "Technical", + "abstract": "Mathematical reasoning is one of the crucial abilities of general artificial intelligence, which requires machines to master mathematical logic and knowledge from solving problems. However, existing approaches are not transparent (thus not interpretable) in terms of what knowledge has been learned and applied in the reasoning process. In this paper, we propose a general Learning by Applying (LeAp) framework to enhance existing models (backbones) in a principled way by explicit knowledge learning. In LeAp, we perform knowledge learning in a novel problem-knowledge-expression paradigm, with a Knowledge Encoder to acquire knowledge from problem data and a Knowledge Decoder to apply knowledge for expression reasoning. The learned mathematical knowledge, including word-word relations and word-operator relations, forms an explicit knowledge graph, which bridges the knowledge \u201clearning\u201d and \u201capplying\u201d organically. Moreover, for problem solving, we design a semantics-enhanced module and a reasoning-enhanced module that apply knowledge to improve the problem comprehension and symbol reasoning abilities of any backbone, respectively. We theoretically prove the superiority of LeAp's autonomous learning mechanism. Experiments on three real-world datasets show that LeAp improves all backbones' performances, learns accurate knowledge, and achieves a more interpretable reasoning process.", + "primary_area": "data mining and knowledge management", + "author": "Jiayu Liu; Zhenya Huang; ChengXiang Zhai; Qi Liu", + "authorids": "", + "aff": "Anhui Province Key Laboratory of Big Data Analysis and Application, School of Data Science & School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Data Science & School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; University of Illinois at Urbana-Champaign; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Data Science & School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence", + "bibtex": "@article{Liu_Huang_Zhai_Liu_2023, title={Learning by Applying: A General Framework for Mathematical Reasoning via Enhancing Explicit Knowledge Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25571}, DOI={10.1609/aaai.v37i4.25571}, abstractNote={Mathematical reasoning is one of the crucial abilities of general artificial intelligence, which requires machines to master mathematical logic and knowledge from solving problems. However, existing approaches are not transparent (thus not interpretable) in terms of what knowledge has been learned and applied in the reasoning process. In this paper, we propose a general Learning by Applying (LeAp) framework to enhance existing models (backbones) in a principled way by explicit knowledge learning. In LeAp, we perform knowledge learning in a novel problem-knowledge-expression paradigm, with a Knowledge Encoder to acquire knowledge from problem data and a Knowledge Decoder to apply knowledge for expression reasoning. The learned mathematical knowledge, including word-word relations and word-operator relations, forms an explicit knowledge graph, which bridges the knowledge \u201clearning\u201d and \u201capplying\u201d organically. Moreover, for problem solving, we design a semantics-enhanced module and a reasoning-enhanced module that apply knowledge to improve the problem comprehension and symbol reasoning abilities of any backbone, respectively. We theoretically prove the superiority of LeAp\u2019s autonomous learning mechanism. Experiments on three real-world datasets show that LeAp improves all backbones\u2019 performances, learns accurate knowledge, and achieves a more interpretable reasoning process.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jiayu and Huang, Zhenya and Zhai, ChengXiang and Liu, Qi}, year={2023}, month={Jun.}, pages={4497-4506} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25571/25343", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25571", + "pdf_size": 581879, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14447945820043954814&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;illinois.edu;ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn;illinois.edu;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;0+1", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;University of Illinois at Urbana-Champaign", + "aff_unique_dep": "School of Data Science & School of Computer Science and Technology;;", + "aff_unique_url": "http://www.ustc.edu.cn;;https://illinois.edu", + "aff_unique_abbr": "USTC;;UIUC", + "aff_campus_unique_index": ";;1;", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0+0;0+0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26379", + "title": "Learning from Good Trajectories in Offline Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Offline multi-agent reinforcement learning (MARL) aims to learn effective multi-agent policies from pre-collected datasets, which is an important step toward the deployment of multi-agent systems in real-world applications. However, in practice, each individual behavior policy that generates multi-agent joint trajectories usually has a different level of how well it performs. e.g., an agent is a random policy while other agents are medium policies. In the cooperative game with global reward, one agent learned by existing offline MARL often inherits this random policy, jeopardizing the utility of the entire team. In this paper, we investigate offline MARL with explicit consideration on the diversity of agent-wise trajectories and propose a novel framework called Shared Individual Trajectories (SIT) to address this problem. Specifically, an attention-based reward decomposition network assigns the credit to each agent through a differentiable key-value memory mechanism in an offline manner. These decomposed credits are then used to reconstruct the joint offline datasets into prioritized experience replay with individual trajectories, thereafter agents can share their good trajectories and conservatively train their policies with a graph attention network (GAT) based critic. We evaluate our method in both discrete control (i.e., StarCraft II and multi-agent particle environment) and continuous control (i.e., multi-agent mujoco). The results indicate that our method achieves significantly better results in complex and mixed offline multi-agent datasets, especially when the difference of data quality between individual trajectories is large.", + "primary_area": "multiagent systems", + "author": "Qi Tian; Kun Kuang; Furui Liu; Baoxiang Wang", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University, Hangzhou, China; College of Computer Science and Technology, Zhejiang University, Hangzhou, China; Huawei Noah\u2019s Ark Lab, Beijing, China; School of Data Science, Chinese University of Hong Kong (Shenzhen), Shenzhen, China", + "bibtex": "@article{Tian_Kuang_Liu_Wang_2023, title={Learning from Good Trajectories in Offline Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26379}, DOI={10.1609/aaai.v37i10.26379}, abstractNote={Offline multi-agent reinforcement learning (MARL) aims to learn effective multi-agent policies from pre-collected datasets, which is an important step toward the deployment of multi-agent systems in real-world applications. However, in practice, each individual behavior policy that generates multi-agent joint trajectories usually has a different level of how well it performs. e.g., an agent is a random policy while other agents are medium policies. In the cooperative game with global reward, one agent learned by existing offline MARL often inherits this random policy, jeopardizing the utility of the entire team. In this paper, we investigate offline MARL with explicit consideration on the diversity of agent-wise trajectories and propose a novel framework called Shared Individual Trajectories (SIT) to address this problem. Specifically, an attention-based reward decomposition network assigns the credit to each agent through a differentiable key-value memory mechanism in an offline manner. These decomposed credits are then used to reconstruct the joint offline datasets into prioritized experience replay with individual trajectories, thereafter agents can share their good trajectories and conservatively train their policies with a graph attention network (GAT) based critic. We evaluate our method in both discrete control (i.e., StarCraft II and multi-agent particle environment) and continuous control (i.e., multi-agent mujoco). The results indicate that our method achieves significantly better results in complex and mixed offline multi-agent datasets, especially when the difference of data quality between individual trajectories is large.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Qi and Kuang, Kun and Liu, Furui and Wang, Baoxiang}, year={2023}, month={Jun.}, pages={11672-11680} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26379/26151", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26379", + "pdf_size": 1770757, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15512708235523627443&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;zju.edu.cn;huawei.com;cuhk.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;huawei.com;cuhk.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "Zhejiang University;Huawei Noah\u2019s Ark Lab;Chinese University of Hong Kong (Shenzhen)", + "aff_unique_dep": "College of Computer Science and Technology;;School of Data Science", + "aff_unique_url": "http://www.zju.edu.cn;https://www.huawei.com/en/ai/noahs-ark-lab;https://www.cuhk.edu.cn", + "aff_unique_abbr": "ZJU;HNA Lab;CUHK (Shenzhen)", + "aff_campus_unique_index": "0;0;1;2", + "aff_campus_unique": "Hangzhou;Beijing;Shenzhen", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25972", + "title": "Learning from Training Dynamics: Identifying Mislabeled Data beyond Manually Designed Features", + "track": "main", + "status": "Technical", + "abstract": "While mislabeled or ambiguously-labeled samples in the training set could negatively affect the performance of deep models, diagnosing the dataset and identifying mislabeled samples helps to improve the generalization power. Training dynamics, i.e., the traces left by iterations of optimization algorithms, have recently been proved to be effective to localize mislabeled samples with hand-crafted features.\nIn this paper, beyond manually designed features, we introduce a novel learning-based solution, leveraging a noise detector, instanced by an LSTM network, which learns to predict whether a sample was mislabeled using the raw training dynamics as input. \nSpecifically, the proposed method trains the noise detector in a supervised manner using the dataset with synthesized label noises and can adapt to various datasets (either naturally or synthesized label-noised) without retraining. \nWe conduct extensive experiments to evaluate the proposed method.\nWe train the noise detector based on the synthesized label-noised CIFAR dataset and test such noise detector on Tiny ImageNet, CUB-200, Caltech-256, WebVision and Clothing1M. \nResults show that the proposed method precisely detects mislabeled samples on various datasets without further adaptation, and outperforms state-of-the-art methods.\nBesides, more experiments demonstrate that the mislabel identification can guide a label correction, namely data debugging, providing orthogonal improvements of algorithm-centric state-of-the-art techniques from the data aspect.", + "primary_area": "machine learning ii", + "author": "Qingrui Jia; Xuhong Li; Lei Yu; Jiang Bian; Penghao Zhao; Shupeng Li; Haoyi Xiong; Dejing Dou", + "authorids": "", + "aff": "Sino-French Engineer School, Beihang University, Beijing, China+Baidu Inc. Beijing, China; Baidu Inc. Beijing, China; Sino-French Engineer School, Beihang University, Beijing, China+Beihang Hangzhou Innovation Institute Yuhang, Hangzhou, China; Baidu Inc. Beijing, China; Baidu Inc. Beijing, China; Baidu Inc. Beijing, China; Baidu Inc. Beijing, China; BCG Greater China, Beijing, China", + "bibtex": "@article{Jia_Li_Yu_Bian_Zhao_Li_Xiong_Dou_2023, title={Learning from Training Dynamics: Identifying Mislabeled Data beyond Manually Designed Features}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25972}, DOI={10.1609/aaai.v37i7.25972}, abstractNote={While mislabeled or ambiguously-labeled samples in the training set could negatively affect the performance of deep models, diagnosing the dataset and identifying mislabeled samples helps to improve the generalization power. Training dynamics, i.e., the traces left by iterations of optimization algorithms, have recently been proved to be effective to localize mislabeled samples with hand-crafted features.\nIn this paper, beyond manually designed features, we introduce a novel learning-based solution, leveraging a noise detector, instanced by an LSTM network, which learns to predict whether a sample was mislabeled using the raw training dynamics as input. Specifically, the proposed method trains the noise detector in a supervised manner using the dataset with synthesized label noises and can adapt to various datasets (either naturally or synthesized label-noised) without retraining. We conduct extensive experiments to evaluate the proposed method.\nWe train the noise detector based on the synthesized label-noised CIFAR dataset and test such noise detector on Tiny ImageNet, CUB-200, Caltech-256, WebVision and Clothing1M. Results show that the proposed method precisely detects mislabeled samples on various datasets without further adaptation, and outperforms state-of-the-art methods.\nBesides, more experiments demonstrate that the mislabel identification can guide a label correction, namely data debugging, providing orthogonal improvements of algorithm-centric state-of-the-art techniques from the data aspect.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Qingrui and Li, Xuhong and Yu, Lei and Bian, Jiang and Zhao, Penghao and Li, Shupeng and Xiong, Haoyi and Dou, Dejing}, year={2023}, month={Jun.}, pages={8041-8049} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25972/25744", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25972", + "pdf_size": 919161, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5401289584529904904&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "baidu.com;baidu.com;buaa.edu.cn;buaa.edu.cn; ; ;baidu.com; ", + "email": "baidu.com;baidu.com;buaa.edu.cn;buaa.edu.cn; ; ;baidu.com; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;0+0;1;1;1;1;2", + "aff_unique_norm": "Beihang University;Baidu Inc.;BCG Greater China", + "aff_unique_dep": "Sino-French Engineer School;;", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.baidu.com;", + "aff_unique_abbr": ";Baidu;", + "aff_campus_unique_index": "0+0;0;0+1;0;0;0;0;0", + "aff_campus_unique": "Beijing;Hangzhou", + "aff_country_unique_index": "0+0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25607", + "title": "Learning from the Wisdom of Crowds: Exploiting Similar Sessions for Session Search", + "track": "main", + "status": "Technical", + "abstract": "Search engines are essential internet services, enabling users to efficiently find the information they need. Session search employs users\u2019 session logs of queries to solve complex retrieval tasks, in which users search multiple times until interested documents are found. Most existing session search models focus on the contextual information within the current search, ignoring the evidence from historical search sessions. Considering the fact that many ongoing retrieval tasks should have already been carried out by other users with a similar intent, we argue that historical sessions with similar intents can help improve the accuracy of the current search task. We propose a novel Similar Session-enhanced Ranking (SSR) model to improve the session search performance using historical sessions with similar intents. Specifically, the candidate historical sessions are matched by query-level and session-level semantic similarity, and then query-level neighbor behaviors are aggregated by a Query-guided GNN (QGNN) while session-level neighbor behaviors are aggregated using the attention mechanism. Finally, we integrate the refined and aggregated historical neighbor information into the current search session. Experimental results on AOL and Tiangong-ST datasets show that our SSR model significantly outperforms the state-of-the-art models.", + "primary_area": "data mining and knowledge management", + "author": "Yuhang Ye; Zhonghua Li; Zhicheng Dou; Yutao Zhu; Changwang Zhang; Shangquan Wu; Zhao Cao", + "authorids": "", + "aff": "Huawei Poisson Lab, China; Huawei Poisson Lab, China; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China; University of Montreal, Quebec, Canada; Huawei Poisson Lab, China; Huawei Poisson Lab, China; Huawei Poisson Lab, China", + "bibtex": "@article{Ye_Li_Dou_Zhu_Zhang_Wu_Cao_2023, title={Learning from the Wisdom of Crowds: Exploiting Similar Sessions for Session Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25607}, DOI={10.1609/aaai.v37i4.25607}, abstractNote={Search engines are essential internet services, enabling users to efficiently find the information they need. Session search employs users\u2019 session logs of queries to solve complex retrieval tasks, in which users search multiple times until interested documents are found. Most existing session search models focus on the contextual information within the current search, ignoring the evidence from historical search sessions. Considering the fact that many ongoing retrieval tasks should have already been carried out by other users with a similar intent, we argue that historical sessions with similar intents can help improve the accuracy of the current search task. We propose a novel Similar Session-enhanced Ranking (SSR) model to improve the session search performance using historical sessions with similar intents. Specifically, the candidate historical sessions are matched by query-level and session-level semantic similarity, and then query-level neighbor behaviors are aggregated by a Query-guided GNN (QGNN) while session-level neighbor behaviors are aggregated using the attention mechanism. Finally, we integrate the refined and aggregated historical neighbor information into the current search session. Experimental results on AOL and Tiangong-ST datasets show that our SSR model significantly outperforms the state-of-the-art models.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Yuhang and Li, Zhonghua and Dou, Zhicheng and Zhu, Yutao and Zhang, Changwang and Wu, Shangquan and Cao, Zhao}, year={2023}, month={Jun.}, pages={4818-4826} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25607/25379", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25607", + "pdf_size": 348067, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=552732056791215139&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "huawei.com;huawei.com;ruc.edu.cn;gmail.com;huawei.com;huawei.com;huawei.com", + "email": "huawei.com;huawei.com;ruc.edu.cn;gmail.com;huawei.com;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;0;0;0", + "aff_unique_norm": "Huawei;Renmin University of China;University of Montreal", + "aff_unique_dep": "Poisson Lab;Gaoling School of Artificial Intelligence;", + "aff_unique_url": "https://www.huawei.com;http://www.ruc.edu.cn;https://wwwumontreal.ca", + "aff_unique_abbr": "Huawei;RUC;UM", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Beijing;Montreal", + "aff_country_unique_index": "0;0;0;1;0;0;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-26269", + "title": "Learning the Finer Things: Bayesian Structure Learning at the Instantiation Level", + "track": "main", + "status": "Technical", + "abstract": "Successful machine learning methods require a trade-off between memorization and generalization. Too much memorization and the model cannot generalize to unobserved examples. Too much over-generalization and we risk under-fitting the data. While we commonly measure their performance through cross validation and accuracy metrics, how should these algorithms cope in domains that are extremely under-determined where accuracy is always unsatisfactory? We present a novel probabilistic graphical model structure learning approach that can learn, generalize and explain in these elusive domains by operating at the random variable instantiation level. Using Minimum Description Length (MDL) analysis, we propose a new decomposition of the learning problem over all training exemplars, fusing together minimal entropy inferences to construct a final knowledge base. By leveraging Bayesian Knowledge Bases (BKBs), a framework that operates at the instantiation level and inherently subsumes Bayesian Networks (BNs), we develop both a theoretical MDL score and associated structure learning algorithm that demonstrates significant improvements over learned BNs on 40 benchmark datasets. Further, our algorithm incorporates recent off-the-shelf DAG learning techniques enabling tractable results even on large problems. We then demonstrate the utility of our approach in a significantly under-determined domain by learning gene regulatory networks on breast cancer gene mutational data available from The Cancer Genome Atlas (TCGA).", + "primary_area": "machine learning iv", + "author": "Chase Yakaboski; Eugene Santos, Jr", + "authorids": "", + "aff": "Thayer School of Engineering at Dartmouth College, Hanover, NH; Thayer School of Engineering at Dartmouth College, Hanover, NH", + "bibtex": "@article{Yakaboski_Santos, Jr_2023, title={Learning the Finer Things: Bayesian Structure Learning at the Instantiation Level}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26269}, DOI={10.1609/aaai.v37i9.26269}, abstractNote={Successful machine learning methods require a trade-off between memorization and generalization. Too much memorization and the model cannot generalize to unobserved examples. Too much over-generalization and we risk under-fitting the data. While we commonly measure their performance through cross validation and accuracy metrics, how should these algorithms cope in domains that are extremely under-determined where accuracy is always unsatisfactory? We present a novel probabilistic graphical model structure learning approach that can learn, generalize and explain in these elusive domains by operating at the random variable instantiation level. Using Minimum Description Length (MDL) analysis, we propose a new decomposition of the learning problem over all training exemplars, fusing together minimal entropy inferences to construct a final knowledge base. By leveraging Bayesian Knowledge Bases (BKBs), a framework that operates at the instantiation level and inherently subsumes Bayesian Networks (BNs), we develop both a theoretical MDL score and associated structure learning algorithm that demonstrates significant improvements over learned BNs on 40 benchmark datasets. Further, our algorithm incorporates recent off-the-shelf DAG learning techniques enabling tractable results even on large problems. We then demonstrate the utility of our approach in a significantly under-determined domain by learning gene regulatory networks on breast cancer gene mutational data available from The Cancer Genome Atlas (TCGA).}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yakaboski, Chase and Santos, Jr, Eugene}, year={2023}, month={Jun.}, pages={10693-10701} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26269/26041", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26269", + "pdf_size": 305317, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5368216039064325959&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "dartmouth.edu;dartmouth.edu", + "email": "dartmouth.edu;dartmouth.edu", + "github": "https://github.com/di2ag/pybkb", + "project": "https://arxiv.org/abs/2303.04339", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Dartmouth College", + "aff_unique_dep": "Thayer School of Engineering", + "aff_unique_url": "https://engineering.dartmouth.edu", + "aff_unique_abbr": "Dartmouth", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Hanover", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25804", + "title": "Learning to Break Symmetries for Efficient Optimization in Answer Set Programming", + "track": "main", + "status": "Technical", + "abstract": "The ability to efficiently solve hard combinatorial optimization problems is a key prerequisite to various applications of declarative programming paradigms. Symmetries in solution candidates pose a significant challenge to modern optimization algorithms since the enumeration of such candidates might substantially reduce their performance.\n\nThis paper proposes a novel approach using Inductive Logic Programming (ILP) to lift symmetry-breaking constraints for optimization problems modeled in Answer Set Programming (ASP). Given an ASP encoding with optimization statements and a set of small representative instances, our method augments ground ASP programs with auxiliary normal rules enabling the identification of symmetries using existing tools, like SBASS. Then, the obtained symmetries are lifted to first-order constraints with ILP. \nWe prove the correctness of our method and evaluate it on real-world optimization problems from the domain of automated configuration. Our experiments show significant improvements of optimization performance due to the learned first-order constraints.", + "primary_area": "knowledge representation and reasoning", + "author": "Alice Tarzariol; Martin Gebser; Konstantin Schekotihin; Mark Law", + "authorids": "", + "aff": "University of Klagenfurt, Klagenfurt, Austria; University of Klagenfurt, Klagenfurt, Austria + Graz University of Technology, Graz, Austria; University of Klagenfurt, Klagenfurt, Austria; ILASP Limited, Grantham, UK", + "bibtex": "@article{Tarzariol_Gebser_Schekotihin_Law_2023, title={Learning to Break Symmetries for Efficient Optimization in Answer Set Programming}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25804}, DOI={10.1609/aaai.v37i5.25804}, abstractNote={The ability to efficiently solve hard combinatorial optimization problems is a key prerequisite to various applications of declarative programming paradigms. Symmetries in solution candidates pose a significant challenge to modern optimization algorithms since the enumeration of such candidates might substantially reduce their performance. This paper proposes a novel approach using Inductive Logic Programming (ILP) to lift symmetry-breaking constraints for optimization problems modeled in Answer Set Programming (ASP). Given an ASP encoding with optimization statements and a set of small representative instances, our method augments ground ASP programs with auxiliary normal rules enabling the identification of symmetries using existing tools, like SBASS. Then, the obtained symmetries are lifted to first-order constraints with ILP. We prove the correctness of our method and evaluate it on real-world optimization problems from the domain of automated configuration. Our experiments show significant improvements of optimization performance due to the learned first-order constraints.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tarzariol, Alice and Gebser, Martin and Schekotihin, Konstantin and Law, Mark}, year={2023}, month={Jun.}, pages={6541-6549} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25804/25576", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25804", + "pdf_size": 169845, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17206423519291419438&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 7, + "aff_domain": "aau.at;aau.at;aau.at;ilasp.com", + "email": "aau.at;aau.at;aau.at;ilasp.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;0;2", + "aff_unique_norm": "University of Klagenfurt;Graz University of Technology;ILASP Limited", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uni-klagenfurt.at;https://www.tugraz.at;", + "aff_unique_abbr": "Uni Klagenfurt;TUGraz;", + "aff_campus_unique_index": "0;0+1;0", + "aff_campus_unique": "Klagenfurt;Graz;", + "aff_country_unique_index": "0;0+0;0;1", + "aff_country_unique": "Austria;United Kingdom" + }, + { + "id": "article-25610", + "title": "Learning to Count Isomorphisms with Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Subgraph isomorphism counting is an important problem on graphs, as many graph-based tasks exploit recurring subgraph patterns. Classical methods usually boil down to a backtracking framework that needs to navigate a huge search space with prohibitive computational cost. Some recent studies resort to graph neural networks (GNNs) to learn a low-dimensional representation for both the query and input graphs, in order to predict the number of subgraph isomorphisms on the input graph. However, typical GNNs employ a node-centric message passing scheme that receives and aggregates messages on nodes, which is inadequate in complex structure matching for isomorphism counting. Moreover, on an input graph, the space of possible query graphs is enormous, and different parts of the input graph will be triggered to match different queries. Thus, expecting a fixed representation of the input graph to match diversely structured query graphs is unrealistic. In this paper, we propose a novel GNN called Count-GNN for subgraph isomorphism counting, to deal with the above challenges. At the edge level, given that an edge is an atomic unit of encoding graph structures, we propose an edge-centric message passing scheme, where messages on edges are propagated and aggregated based on the edge adjacency to preserve fine-grained structural information. At the graph level, we modulate the input graph representation conditioned on the query, so that the input graph can be adapted to each query individually to improve their matching. Finally, we conduct extensive experiments on a number of benchmark datasets to demonstrate the superior performance of Count-GNN.", + "primary_area": "data mining and knowledge management", + "author": "Xingtong Yu; Zemin Liu; Yuan Fang; Xinming Zhang", + "authorids": "", + "aff": "University of Science and Technology of China, China; National University of Singapore, Singapore; Singapore Management University, Singapore; University of Science and Technology of China, China", + "bibtex": "@article{Yu_Liu_Fang_Zhang_2023, title={Learning to Count Isomorphisms with Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25610}, DOI={10.1609/aaai.v37i4.25610}, abstractNote={Subgraph isomorphism counting is an important problem on graphs, as many graph-based tasks exploit recurring subgraph patterns. Classical methods usually boil down to a backtracking framework that needs to navigate a huge search space with prohibitive computational cost. Some recent studies resort to graph neural networks (GNNs) to learn a low-dimensional representation for both the query and input graphs, in order to predict the number of subgraph isomorphisms on the input graph. However, typical GNNs employ a node-centric message passing scheme that receives and aggregates messages on nodes, which is inadequate in complex structure matching for isomorphism counting. Moreover, on an input graph, the space of possible query graphs is enormous, and different parts of the input graph will be triggered to match different queries. Thus, expecting a fixed representation of the input graph to match diversely structured query graphs is unrealistic. In this paper, we propose a novel GNN called Count-GNN for subgraph isomorphism counting, to deal with the above challenges. At the edge level, given that an edge is an atomic unit of encoding graph structures, we propose an edge-centric message passing scheme, where messages on edges are propagated and aggregated based on the edge adjacency to preserve fine-grained structural information. At the graph level, we modulate the input graph representation conditioned on the query, so that the input graph can be adapted to each query individually to improve their matching. Finally, we conduct extensive experiments on a number of benchmark datasets to demonstrate the superior performance of Count-GNN.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Xingtong and Liu, Zemin and Fang, Yuan and Zhang, Xinming}, year={2023}, month={Jun.}, pages={4845-4853} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25610/25382", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25610", + "pdf_size": 304319, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14889089383496443902&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "mail.ustc.edu.cn;nus.edu.sg;smu.edu.sg;ustc.edu.cn", + "email": "mail.ustc.edu.cn;nus.edu.sg;smu.edu.sg;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Science and Technology of China;National University of Singapore;Singapore Management University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.nus.edu.sg;https://www.smu.edu.sg", + "aff_unique_abbr": "USTC;NUS;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25742", + "title": "Learning to Defer with Limited Expert Predictions", + "track": "main", + "status": "Technical", + "abstract": "Recent research suggests that combining AI models with a human expert can exceed the performance of either alone. The combination of their capabilities is often realized by learning to defer algorithms that enable the AI to learn to decide whether to make a prediction for a particular instance or defer it to the human expert. However, to accurately learn which instances should be deferred to the human expert, a large number of expert predictions that accurately reflect the expert's capabilities are required\u2014in addition to the ground truth labels needed to train the AI. This requirement shared by many learning to defer algorithms hinders their adoption in scenarios where the responsible expert regularly changes or where acquiring a sufficient number of expert predictions is costly. In this paper, we propose a three-step approach to reduce the number of expert predictions required to train learning to defer algorithms. It encompasses (1) the training of an embedding model with ground truth labels to generate feature representations that serve as a basis for (2) the training of an expertise predictor model to approximate the expert's capabilities. (3) The expertise predictor generates artificial expert predictions for instances not yet labeled by the expert, which are required by the learning to defer algorithms. We evaluate our approach on two public datasets. One with \"synthetically\" generated human experts and another from the medical domain containing real-world radiologists' predictions. Our experiments show that the approach allows the training of various learning to defer algorithms with a minimal number of human expert predictions. Furthermore, we demonstrate that even a small number of expert predictions per class is sufficient for these algorithms to exceed the performance the AI and the human expert can achieve individually.", + "primary_area": "humans and ai", + "author": "Patrick Hemmer; Lukas Thede; Michael V\u00f6ssing; Johannes Jakubik; Niklas K\u00fchl", + "authorids": "", + "aff": "Karlsruhe Institute of Technology; Karlsruhe Institute of Technology; Karlsruhe Institute of Technology; Karlsruhe Institute of Technology; Karlsruhe Institute of Technology", + "bibtex": "@article{Hemmer_Thede_V\u00f6ssing_Jakubik_K\u00fchl_2023, title={Learning to Defer with Limited Expert Predictions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25742}, DOI={10.1609/aaai.v37i5.25742}, abstractNote={Recent research suggests that combining AI models with a human expert can exceed the performance of either alone. The combination of their capabilities is often realized by learning to defer algorithms that enable the AI to learn to decide whether to make a prediction for a particular instance or defer it to the human expert. However, to accurately learn which instances should be deferred to the human expert, a large number of expert predictions that accurately reflect the expert\u2019s capabilities are required\u2014in addition to the ground truth labels needed to train the AI. This requirement shared by many learning to defer algorithms hinders their adoption in scenarios where the responsible expert regularly changes or where acquiring a sufficient number of expert predictions is costly. In this paper, we propose a three-step approach to reduce the number of expert predictions required to train learning to defer algorithms. It encompasses (1) the training of an embedding model with ground truth labels to generate feature representations that serve as a basis for (2) the training of an expertise predictor model to approximate the expert\u2019s capabilities. (3) The expertise predictor generates artificial expert predictions for instances not yet labeled by the expert, which are required by the learning to defer algorithms. We evaluate our approach on two public datasets. One with "synthetically" generated human experts and another from the medical domain containing real-world radiologists\u2019 predictions. Our experiments show that the approach allows the training of various learning to defer algorithms with a minimal number of human expert predictions. Furthermore, we demonstrate that even a small number of expert predictions per class is sufficient for these algorithms to exceed the performance the AI and the human expert can achieve individually.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hemmer, Patrick and Thede, Lukas and V\u00f6ssing, Michael and Jakubik, Johannes and K\u00fchl, Niklas}, year={2023}, month={Jun.}, pages={6002-6011} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25742/25514", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25742", + "pdf_size": 305429, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18323967427523361653&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "kit.edu;alumni.kit.edu;kit.edu;kit.edu;kit.edu", + "email": "kit.edu;alumni.kit.edu;kit.edu;kit.edu;kit.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Karlsruhe Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kit.edu", + "aff_unique_abbr": "KIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25356", + "title": "Learning to Generate an Unbiased Scene Graph by Using Attribute-Guided Predicate Features", + "track": "main", + "status": "Technical", + "abstract": "Scene Graph Generation (SGG) aims to capture the semantic information in an image and build a structured representation, which facilitates downstream tasks. The current challenge in SGG is to tackle the biased predictions caused by the long-tailed distribution of predicates. Since multiple predicates in SGG are coupled in an image, existing data re-balancing methods cannot completely balance the head and tail predicates. In this work, a decoupled learning framework is proposed for unbiased scene graph generation by using attribute-guided predicate features to construct a balanced training set. Specifically, the predicate recognition is decoupled into Predicate Feature Representation Learning (PFRL) and predicate classifier training with a class-balanced predicate feature set, which is constructed by our proposed Attribute-guided Predicate Feature Generation (A-PFG) model. In the A-PFG model, we first define the class labels of and corresponding visual feature as attributes to describe a predicate. Then the predicate feature and the attribute embedding are mapped into a shared hidden space by a dual Variational Auto-encoder (VAE), and finally the synthetic predicate features are forced to learn the contextual information in the attributes via cross reconstruction and distribution alignment. To demonstrate the effectiveness of our proposed method, our decoupled learning framework and A-PFG model are applied to various SGG models. The empirical results show that our method is substantially improved on all benchmarks and achieves new state-of-the-art performance for unbiased scene graph generation. Our code is available at https://github.com/wanglei0618/A-PFG.", + "primary_area": "computer vision ii", + "author": "Lei Wang; Zejian Yuan; Badong Chen", + "authorids": "", + "aff": "Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, Xi\u2019an, China; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, Xi\u2019an, China; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University, Xi\u2019an, China", + "bibtex": "@article{Wang_Yuan_Chen_2023, title={Learning to Generate an Unbiased Scene Graph by Using Attribute-Guided Predicate Features}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25356}, DOI={10.1609/aaai.v37i2.25356}, abstractNote={Scene Graph Generation (SGG) aims to capture the semantic information in an image and build a structured representation, which facilitates downstream tasks. The current challenge in SGG is to tackle the biased predictions caused by the long-tailed distribution of predicates. Since multiple predicates in SGG are coupled in an image, existing data re-balancing methods cannot completely balance the head and tail predicates. In this work, a decoupled learning framework is proposed for unbiased scene graph generation by using attribute-guided predicate features to construct a balanced training set. Specifically, the predicate recognition is decoupled into Predicate Feature Representation Learning (PFRL) and predicate classifier training with a class-balanced predicate feature set, which is constructed by our proposed Attribute-guided Predicate Feature Generation (A-PFG) model. In the A-PFG model, we first define the class labels of <subject-predicate-object> and corresponding visual feature as attributes to describe a predicate. Then the predicate feature and the attribute embedding are mapped into a shared hidden space by a dual Variational Auto-encoder (VAE), and finally the synthetic predicate features are forced to learn the contextual information in the attributes via cross reconstruction and distribution alignment. To demonstrate the effectiveness of our proposed method, our decoupled learning framework and A-PFG model are applied to various SGG models. The empirical results show that our method is substantially improved on all benchmarks and achieves new state-of-the-art performance for unbiased scene graph generation. Our code is available at https://github.com/wanglei0618/A-PFG.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Lei and Yuan, Zejian and Chen, Badong}, year={2023}, month={Jun.}, pages={2581-2589} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25356/25128", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25356", + "pdf_size": 6184732, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4351623706587833284&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn", + "github": "https://github.com/wanglei0618/A-PFG", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Xi'an Jiaotong University", + "aff_unique_dep": "Institute of Artificial Intelligence and Robotics", + "aff_unique_url": "http://www.xjtu.edu.cn", + "aff_unique_abbr": "XJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26510", + "title": "Learning to Imagine: Distillation-Based Interactive Context Exploitation for Dialogue State Tracking", + "track": "main", + "status": "Technical", + "abstract": "In dialogue state tracking (DST), the exploitation of dialogue history is a crucial research direction, and the existing DST models can be divided into two categories: full-history models and partial-history models. Since the \u201cselect first, use later\u201d mechanism explicitly filters the distracting information being passed to the downstream state prediction, the partial-history models have recently achieved a performance advantage over the full-history models. However, besides the redundant information, some critical dialogue context information was inevitably filtered out by the partial-history models simultaneously. To reconcile the contextual consideration with avoiding the introduction of redundant information, we propose DICE-DST, a model-agnostic module widely applicable to the partial-history DST models, which aims to strengthen the ability of context exploitation for the encoder of each DST model. Specifically, we first construct a teacher encoder and devise two contextual reasoning tasks to train it to acquire extensive dialogue contextual knowledge. Then we transfer the contextual knowledge from the teacher encoder to the student encoder via a novel turn-level attention-alignment distillation. Experimental results show that our approach extensively improves the performance of partial-history DST models and thereby achieves new state-of-the-art performance on multiple mainstream datasets while keeping high efficiency.", + "primary_area": "speech natural language processing", + "author": "Jinyu Guo; Kai Shuang; Kaihang Zhang; Yixuan Liu; Jijie Li; Zihan Wang", + "authorids": "", + "aff": "State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications + School of Computer Science, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications + School of Computer Science, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications + School of Computer Science, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications + School of Computer Science, Beijing University of Posts and Telecommunications; Beijing Academy of Artificial Intelligence, Beijing, China; Graduate School of Information Science and Technology, The University of Tokyo", + "bibtex": "@article{Guo_Shuang_Zhang_Liu_Li_Wang_2023, title={Learning to Imagine: Distillation-Based Interactive Context Exploitation for Dialogue State Tracking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26510}, DOI={10.1609/aaai.v37i11.26510}, abstractNote={In dialogue state tracking (DST), the exploitation of dialogue history is a crucial research direction, and the existing DST models can be divided into two categories: full-history models and partial-history models. Since the \u201cselect first, use later\u201d mechanism explicitly filters the distracting information being passed to the downstream state prediction, the partial-history models have recently achieved a performance advantage over the full-history models. However, besides the redundant information, some critical dialogue context information was inevitably filtered out by the partial-history models simultaneously. To reconcile the contextual consideration with avoiding the introduction of redundant information, we propose DICE-DST, a model-agnostic module widely applicable to the partial-history DST models, which aims to strengthen the ability of context exploitation for the encoder of each DST model. Specifically, we first construct a teacher encoder and devise two contextual reasoning tasks to train it to acquire extensive dialogue contextual knowledge. Then we transfer the contextual knowledge from the teacher encoder to the student encoder via a novel turn-level attention-alignment distillation. Experimental results show that our approach extensively improves the performance of partial-history DST models and thereby achieves new state-of-the-art performance on multiple mainstream datasets while keeping high efficiency.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Jinyu and Shuang, Kai and Zhang, Kaihang and Liu, Yixuan and Li, Jijie and Wang, Zihan}, year={2023}, month={Jun.}, pages={12845-12853} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26510/26282", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26510", + "pdf_size": 311652, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6787258037523336829&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;baai.ac.cn;tkl.iis.u-tokyo.ac.jp", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;baai.ac.cn;tkl.iis.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;1;2", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Beijing Academy of Artificial Intelligence;The University of Tokyo", + "aff_unique_dep": "State Key Laboratory of Networking and Switching Technology;;Graduate School of Information Science and Technology", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.baaic.cn;https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "BUPT;BAAI;UTokyo", + "aff_campus_unique_index": "1;1;1;1;1;2", + "aff_campus_unique": ";Beijing;Tokyo", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;1", + "aff_country_unique": "China;Japan" + }, + { + "id": "article-26545", + "title": "Learning to Know Myself: A Coarse-to-Fine Persona-Aware Training Framework for Personalized Dialogue Generation", + "track": "main", + "status": "Technical", + "abstract": "A critical challenge for open-domain dialogue agents is to generate persona-relevant and consistent responses. Due to the nature of persona sparsity in conversation scenarios, previous persona-based dialogue agents trained with Maximum Likelihood Estimation tend to overlook the given personas and generate responses irrelevant or inconsistent with personas. To address this problem, we propose a two-stage coarse-to-fine persona-aware training framework to improve the persona consistency of a dialogue agent progressively. Specifically, our framework first trains the dialogue agent to answer the constructed persona-aware questions, making it highly sensitive to the personas to generate persona-relevant responses. Then the dialogue agent is further trained with a contrastive learning paradigm by explicitly perceiving the difference between the consistent and the generated inconsistent responses, forcing it to pay more attention to the key persona information to generate consistent responses. By applying our proposed training framework to several representative baseline models, experimental results show significant boosts on both automatic and human evaluation metrics, especially the consistency of generated responses.", + "primary_area": "speech natural language processing", + "author": "Yunpeng Li; Yue Hu; Yajing Sun; Luxi Xing; Ping Guo; Yuqiang Xie; Wei Peng", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Li_Hu_Sun_Xing_Guo_Xie_Peng_2023, title={Learning to Know Myself: A Coarse-to-Fine Persona-Aware Training Framework for Personalized Dialogue Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26545}, DOI={10.1609/aaai.v37i11.26545}, abstractNote={A critical challenge for open-domain dialogue agents is to generate persona-relevant and consistent responses. Due to the nature of persona sparsity in conversation scenarios, previous persona-based dialogue agents trained with Maximum Likelihood Estimation tend to overlook the given personas and generate responses irrelevant or inconsistent with personas. To address this problem, we propose a two-stage coarse-to-fine persona-aware training framework to improve the persona consistency of a dialogue agent progressively. Specifically, our framework first trains the dialogue agent to answer the constructed persona-aware questions, making it highly sensitive to the personas to generate persona-relevant responses. Then the dialogue agent is further trained with a contrastive learning paradigm by explicitly perceiving the difference between the consistent and the generated inconsistent responses, forcing it to pay more attention to the key persona information to generate consistent responses. By applying our proposed training framework to several representative baseline models, experimental results show significant boosts on both automatic and human evaluation metrics, especially the consistency of generated responses.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yunpeng and Hu, Yue and Sun, Yajing and Xing, Luxi and Guo, Ping and Xie, Yuqiang and Peng, Wei}, year={2023}, month={Jun.}, pages={13157-13165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26545/26317", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26545", + "pdf_size": 2608562, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12606769874751209059&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25203", + "title": "Learning to Learn Better for Video Object Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Recently, the joint learning framework (JOINT) integrates matching based transductive reasoning and online inductive learning to achieve accurate and robust semi-supervised video object segmentation (SVOS). However, using the mask embedding as the label to guide the generation of target features in the two branches may result in inadequate target representation and degrade the performance. Besides, how to reasonably fuse the target features in the two different branches rather than simply adding them together to avoid the adverse effect of one dominant branch has not been investigated. In this paper, we propose a novel framework that emphasizes Learning to Learn Better (LLB) target features for SVOS, termed LLB, where we design the discriminative label generation module (DLGM) and the adaptive fusion module to address these issues. Technically, the DLGM takes the background-filtered frame instead of the target mask as input and adopts a lightweight encoder to generate the target features, which serves as the label of the online few-shot learner and the value of the decoder in the transformer to guide the two branches to learn more discriminative target representation. The adaptive fusion module maintains a learnable gate for each branch, which reweighs the element-wise feature representation and allows an adaptive amount of target information in each branch flowing to the fused target feature, thus preventing one branch from being dominant and making the target feature more robust to distractor. Extensive experiments on public benchmarks show that our proposed LLB method achieves state-of-the-art performance.", + "primary_area": "computer vision i", + "author": "Meng Lan; Jing Zhang; Lefei Zhang; Dacheng Tao", + "authorids": "", + "aff": "Institute of Artificial Intelligence and School of Computer Science, Wuhan University, China; The University of Sydney, Australia; Institute of Artificial Intelligence and School of Computer Science, Wuhan University, China + Hubei Luojia Laboratory, China; JD Explore Academy, China + The University of Sydney, Australia", + "bibtex": "@article{Lan_Zhang_Zhang_Tao_2023, title={Learning to Learn Better for Video Object Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25203}, DOI={10.1609/aaai.v37i1.25203}, abstractNote={Recently, the joint learning framework (JOINT) integrates matching based transductive reasoning and online inductive learning to achieve accurate and robust semi-supervised video object segmentation (SVOS). However, using the mask embedding as the label to guide the generation of target features in the two branches may result in inadequate target representation and degrade the performance. Besides, how to reasonably fuse the target features in the two different branches rather than simply adding them together to avoid the adverse effect of one dominant branch has not been investigated. In this paper, we propose a novel framework that emphasizes Learning to Learn Better (LLB) target features for SVOS, termed LLB, where we design the discriminative label generation module (DLGM) and the adaptive fusion module to address these issues. Technically, the DLGM takes the background-filtered frame instead of the target mask as input and adopts a lightweight encoder to generate the target features, which serves as the label of the online few-shot learner and the value of the decoder in the transformer to guide the two branches to learn more discriminative target representation. The adaptive fusion module maintains a learnable gate for each branch, which reweighs the element-wise feature representation and allows an adaptive amount of target information in each branch flowing to the fused target feature, thus preventing one branch from being dominant and making the target feature more robust to distractor. Extensive experiments on public benchmarks show that our proposed LLB method achieves state-of-the-art performance.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Meng and Zhang, Jing and Zhang, Lefei and Tao, Dacheng}, year={2023}, month={Jun.}, pages={1205-1212} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25203/24975", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25203", + "pdf_size": 788320, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10682406066798195351&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "whu.edu.cn;sydney.edu.au;whu.edu.cn;gmail.com", + "email": "whu.edu.cn;sydney.edu.au;whu.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0+2;3+1", + "aff_unique_norm": "Wuhan University;The University of Sydney;Hubei Luojia Laboratory;JD Explore Academy", + "aff_unique_dep": "Institute of Artificial Intelligence and School of Computer Science;;;", + "aff_unique_url": "http://www.whu.edu.cn;https://www.sydney.edu.au;;", + "aff_unique_abbr": "WHU;USYD;;", + "aff_campus_unique_index": "0;0;", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;1;0+0;0+1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26489", + "title": "Learning to Memorize Entailment and Discourse Relations for Persona-Consistent Dialogues", + "track": "main", + "status": "Technical", + "abstract": "Maintaining engagement and consistency is particularly important in dialogue systems. Existing works have improved the performance of dialogue systems by intentionally learning interlocutor personas with sophisticated network structures. One issue with this approach is that it requires more personal corpora with annotations. Additionally, these models typically perform the next utterance prediction to generate a response but neglect the discourse coherence in the entire conversation. To address these issues, this study proposes a method of learning to memorize entailment and discourse relations for persona-consistent dialogue tasks. Entailment text pairs in natural language inference dataset were applied to learn latent entailment relations as external memories by premise-to-hypothesis generation task. Furthermore, an internal memory with a similar architecture was applied to the discourse information in the dialogue. Placing orthogonality restrictions on these two memory spaces ensures that the latent entailment relations remain dialogue-independent. Both memories collaborate to obtain entailment and discourse representation for the generation, allowing a deeper understanding of both consistency and coherence. Experiments on two large public datasets, PersonaChat and DSTC7-AVSD, demonstrated the effectiveness of the proposed method. Both automatic and human evaluations indicate that the proposed model outperforms several strong baselines in terms of both persona consistency and response coherence. Our source code is availabled at https://github.com/Chenrj233/LMEDR.", + "primary_area": "speech natural language processing", + "author": "Ruijun Chen; Jin Wang; Liang-Chih Yu; Xuejie Zhang", + "authorids": "", + "aff": "School of Information Science and Engineering, Yunnan University, Yunnan, China; School of Information Science and Engineering, Yunnan University, Yunnan, China; Department of Information Management, Yuan Ze University, Taiwan; School of Information Science and Engineering, Yunnan University, Yunnan, China", + "bibtex": "@article{Chen_Wang_Yu_Zhang_2023, title={Learning to Memorize Entailment and Discourse Relations for Persona-Consistent Dialogues}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26489}, DOI={10.1609/aaai.v37i11.26489}, abstractNote={Maintaining engagement and consistency is particularly important in dialogue systems. Existing works have improved the performance of dialogue systems by intentionally learning interlocutor personas with sophisticated network structures. One issue with this approach is that it requires more personal corpora with annotations. Additionally, these models typically perform the next utterance prediction to generate a response but neglect the discourse coherence in the entire conversation. To address these issues, this study proposes a method of learning to memorize entailment and discourse relations for persona-consistent dialogue tasks. Entailment text pairs in natural language inference dataset were applied to learn latent entailment relations as external memories by premise-to-hypothesis generation task. Furthermore, an internal memory with a similar architecture was applied to the discourse information in the dialogue. Placing orthogonality restrictions on these two memory spaces ensures that the latent entailment relations remain dialogue-independent. Both memories collaborate to obtain entailment and discourse representation for the generation, allowing a deeper understanding of both consistency and coherence. Experiments on two large public datasets, PersonaChat and DSTC7-AVSD, demonstrated the effectiveness of the proposed method. Both automatic and human evaluations indicate that the proposed model outperforms several strong baselines in terms of both persona consistency and response coherence. Our source code is availabled at https://github.com/Chenrj233/LMEDR.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Ruijun and Wang, Jin and Yu, Liang-Chih and Zhang, Xuejie}, year={2023}, month={Jun.}, pages={12653-12661} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26489/26261", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26489", + "pdf_size": 373920, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1013647481082074856&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ynu.edu.cn;ynu.edu.cn;saturn.yzu.edu.tw;ynu.edu.cn", + "email": "mail.ynu.edu.cn;ynu.edu.cn;saturn.yzu.edu.tw;ynu.edu.cn", + "github": "https://github.com/Chenrj233/LMEDR", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Yunnan University;Yuan Ze University", + "aff_unique_dep": "School of Information Science and Engineering;Department of Information Management", + "aff_unique_url": "http://www.ynu.edu.cn;https://www.yzu.edu.tw", + "aff_unique_abbr": "YNU;YZU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Yunnan;", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;Taiwan, China" + }, + { + "id": "article-26391", + "title": "Learning to Play General-Sum Games against Multiple Boundedly Rational Agents", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of training a principal in a multi-agent general-sum game using reinforcement learning (RL). Learning a robust principal policy requires anticipating the worst possible strategic responses of other agents, which is generally NP-hard. However, we show that no-regret dynamics can identify these worst-case responses in poly-time in smooth games. We propose a framework that uses this policy evaluation method for efficiently learning a robust principal policy using RL. This framework can be extended to provide robustness to boundedly rational agents too. Our motivating application is automated mechanism design: we empirically demonstrate our framework learns robust mechanisms in both matrix games and complex spatiotemporal games. In particular, we learn a dynamic tax policy that improves the welfare of a simulated trade-and-barter economy by 15%, even when facing previously unseen boundedly rational RL taxpayers.", + "primary_area": "multiagent systems", + "author": "Eric Zhao; Alexander R. Trott; Caiming Xiong; Stephan Zheng", + "authorids": "", + "aff": "Salesforce Research. Palo Alto, California, USA + University of California, Berkeley. Berkeley, California, USA; MosaicML. San Francisco, California, USA; Salesforce Research. Palo Alto, California, USA; Salesforce Research. Palo Alto, California, USA", + "bibtex": "@article{Zhao_Trott_Xiong_Zheng_2023, title={Learning to Play General-Sum Games against Multiple Boundedly Rational Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26391}, DOI={10.1609/aaai.v37i10.26391}, abstractNote={We study the problem of training a principal in a multi-agent general-sum game using reinforcement learning (RL). Learning a robust principal policy requires anticipating the worst possible strategic responses of other agents, which is generally NP-hard. However, we show that no-regret dynamics can identify these worst-case responses in poly-time in smooth games. We propose a framework that uses this policy evaluation method for efficiently learning a robust principal policy using RL. This framework can be extended to provide robustness to boundedly rational agents too. Our motivating application is automated mechanism design: we empirically demonstrate our framework learns robust mechanisms in both matrix games and complex spatiotemporal games. In particular, we learn a dynamic tax policy that improves the welfare of a simulated trade-and-barter economy by 15%, even when facing previously unseen boundedly rational RL taxpayers.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Eric and Trott, Alexander R. and Xiong, Caiming and Zheng, Stephan}, year={2023}, month={Jun.}, pages={11781-11789} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26391/26163", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26391", + "pdf_size": 1053437, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13950167503266961858&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "salesforce.com;mosaicml.com;salesforce.com;salesforce.com", + "email": "salesforce.com;mosaicml.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;0", + "aff_unique_norm": "Salesforce Research;University of California, Berkeley;MosaicML", + "aff_unique_dep": ";;", + "aff_unique_url": "https://research.salesforce.com;https://www.berkeley.edu;", + "aff_unique_abbr": "Salesforce;UC Berkeley;", + "aff_campus_unique_index": "0+1;2;0;0", + "aff_campus_unique": "Palo Alto;Berkeley;San Francisco", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26827", + "title": "Learning to See the Physical World", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "This paper is part of the AAAI-23 New Faculty Highlights. In my presentation, I will introduce my research goal, which is to build machines that see, interact with, and reason about the physical world just like humans. This problem, which we call physical scene understanding, involves three key topics that bridge research in computer science, AI, robotics, cognitive science, and neuroscience: Perception, Physical Interaction, and Reasoning.", + "primary_area": "", + "author": "Jiajun Wu", + "authorids": "", + "aff": "Stanford University", + "bibtex": "@article{Wu_2024, title={Learning to See the Physical World}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26827}, DOI={10.1609/aaai.v37i13.26827}, abstractNote={This paper is part of the AAAI-23 New Faculty Highlights. In my presentation, I will introduce my research goal, which is to build machines that see, interact with, and reason about the physical world just like humans. This problem, which we call physical scene understanding, involves three key topics that bridge research in computer science, AI, robotics, cognitive science, and neuroscience: Perception, Physical Interaction, and Reasoning.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Jiajun}, year={2024}, month={Jul.}, pages={15460-15460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26827/26599", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26827", + "pdf_size": 1361036, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5029696192865399448&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "salesforce.com;mosaicml.com;salesforce.com;salesforce.com", + "email": "salesforce.com;mosaicml.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25756", + "title": "Learning to Select Pivotal Samples for Meta Re-weighting", + "track": "main", + "status": "Technical", + "abstract": "Sample re-weighting strategies provide a promising mechanism to deal with imperfect training data in machine learning, such as noisily labeled or class-imbalanced data. One such strategy involves formulating a bi-level optimization problem called the meta re-weighting problem, whose goal is to optimize performance on a small set of perfect pivotal samples, called meta samples. Many approaches have been proposed to efficiently solve this problem. However, all of them assume that a perfect meta sample set is already provided while we observe that the selections of meta sample set is performance-critical. In this paper, we study how to learn to identify such a meta sample set from a large, imperfect training set, that is subsequently cleaned and used to optimize performance in the meta re-weighting setting. We propose a learning framework which reduces the meta samples selection problem to a weighted K-means clustering problem through rigorously theoretical analysis. We propose two clustering methods within our learning framework, Representation-based clustering method (RBC) and Gradient-based clustering method (GBC), for balancing performance and computational efficiency. Empirical studies demonstrate the performance advantage of our methods over various baseline methods", + "primary_area": "humans and ai", + "author": "Yinjun Wu; Adam Stein; Jacob Gardner; Mayur Naik", + "authorids": "", + "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", + "bibtex": "@article{Wu_Stein_Gardner_Naik_2023, title={Learning to Select Pivotal Samples for Meta Re-weighting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25756}, DOI={10.1609/aaai.v37i5.25756}, abstractNote={Sample re-weighting strategies provide a promising mechanism to deal with imperfect training data in machine learning, such as noisily labeled or class-imbalanced data. One such strategy involves formulating a bi-level optimization problem called the meta re-weighting problem, whose goal is to optimize performance on a small set of perfect pivotal samples, called meta samples. Many approaches have been proposed to efficiently solve this problem. However, all of them assume that a perfect meta sample set is already provided while we observe that the selections of meta sample set is performance-critical. In this paper, we study how to learn to identify such a meta sample set from a large, imperfect training set, that is subsequently cleaned and used to optimize performance in the meta re-weighting setting. We propose a learning framework which reduces the meta samples selection problem to a weighted K-means clustering problem through rigorously theoretical analysis. We propose two clustering methods within our learning framework, Representation-based clustering method (RBC) and Gradient-based clustering method (GBC), for balancing performance and computational efficiency. Empirical studies demonstrate the performance advantage of our methods over various baseline methods}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yinjun and Stein, Adam and Gardner, Jacob and Naik, Mayur}, year={2023}, month={Jun.}, pages={6128-6136} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25756/25528", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25756", + "pdf_size": 904039, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:qUDwl2goFT8J:scholar.google.com/&scioq=Learning+to+Select+Pivotal+Samples+for+Meta+Re-weighting&hl=en&as_sdt=0,5", + "gs_version_total": 6, + "aff_domain": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;cis.upenn.edu", + "email": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;cis.upenn.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25812", + "title": "Learning to Select Prototypical Parts for Interpretable Sequential Data Modeling", + "track": "main", + "status": "Technical", + "abstract": "Prototype-based interpretability methods provide intuitive explanations of model prediction by comparing samples to a reference set of memorized exemplars or typical representatives in terms of similarity. In the field of sequential data modeling, similarity calculations of prototypes are usually based on encoded representation vectors. However, due to highly recursive functions, there is usually a non-negligible disparity between the prototype-based explanations and the original input. In this work, we propose a Self-Explaining Selective Model (SESM) that uses a linear combination of prototypical concepts to explain its own predictions. The model employs the idea of case-based reasoning by selecting sub-sequences of the input that mostly activate different concepts as prototypical parts, which users can compare to sub-sequences selected from different example inputs to understand model decisions. For better interpretability, we design multiple constraints including diversity, stability, and locality as training objectives. Extensive experiments in different domains demonstrate that our method exhibits promising interpretability and competitive accuracy.", + "primary_area": "knowledge representation and reasoning", + "author": "Yifei Zhang; Neng Gao; Cunqing Ma", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Zhang_Gao_Ma_2023, title={Learning to Select Prototypical Parts for Interpretable Sequential Data Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25812}, DOI={10.1609/aaai.v37i5.25812}, abstractNote={Prototype-based interpretability methods provide intuitive explanations of model prediction by comparing samples to a reference set of memorized exemplars or typical representatives in terms of similarity. In the field of sequential data modeling, similarity calculations of prototypes are usually based on encoded representation vectors. However, due to highly recursive functions, there is usually a non-negligible disparity between the prototype-based explanations and the original input. In this work, we propose a Self-Explaining Selective Model (SESM) that uses a linear combination of prototypical concepts to explain its own predictions. The model employs the idea of case-based reasoning by selecting sub-sequences of the input that mostly activate different concepts as prototypical parts, which users can compare to sub-sequences selected from different example inputs to understand model decisions. For better interpretability, we design multiple constraints including diversity, stability, and locality as training objectives. Extensive experiments in different domains demonstrate that our method exhibits promising interpretability and competitive accuracy.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yifei and Gao, Neng and Ma, Cunqing}, year={2023}, month={Jun.}, pages={6612-6620} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25812/25584", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25812", + "pdf_size": 717798, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6945093855316544038&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Information Engineering", + "aff_unique_url": "http://www.cas.cn", + "aff_unique_abbr": "CAS", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26500", + "title": "Learning to Select from Multiple Options", + "track": "main", + "status": "Technical", + "abstract": "Many NLP tasks can be regarded as a selection problem from a set of options, such as classification tasks, multi-choice question answering, etc. Textual entailment (TE) has been shown as the state-of-the-art (SOTA) approach to dealing with those selection problems. TE treats input texts as premises (P), options as hypotheses (H), then handles the selection problem by modeling (P, H) pairwise. Two limitations: first, the pairwise modeling is unaware of other options, which is less intuitive since humans often determine the best options by comparing competing candidates; second, the inference process of pairwise TE is time-consuming, especially when the option space is large. To deal with the two issues, this work first proposes a contextualized TE model (Context-TE) by appending other k options as the context of the current (P, H) modeling. Context-TE is able to learn more reliable decision for the H since it considers various context. Second, we speed up Context-TE by coming up with Parallel-TE, which learns the decisions of multiple options simultaneously. Parallel-TE significantly improves the inference speed while keeping comparable performance with Context-TE. Our methods are evaluated on three tasks (ultra-fine entity typing, intent detection and multi-choice QA) that are typical selection problems with different sizes of options. Experiments show our models set new SOTA performance; particularly, Parallel-TE is faster than the pairwise TE by k times in inference.", + "primary_area": "speech natural language processing", + "author": "Jiangshu Du; Wenpeng Yin; Congying Xia; Philip S. Yu", + "authorids": "", + "aff": "University of Illinois at Chicago; Penn State University; Salesforce Research; University of Illinois at Chicago", + "bibtex": "@article{Du_Yin_Xia_Yu_2023, title={Learning to Select from Multiple Options}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26500}, DOI={10.1609/aaai.v37i11.26500}, abstractNote={Many NLP tasks can be regarded as a selection problem from a set of options, such as classification tasks, multi-choice question answering, etc. Textual entailment (TE) has been shown as the state-of-the-art (SOTA) approach to dealing with those selection problems. TE treats input texts as premises (P), options as hypotheses (H), then handles the selection problem by modeling (P, H) pairwise. Two limitations: first, the pairwise modeling is unaware of other options, which is less intuitive since humans often determine the best options by comparing competing candidates; second, the inference process of pairwise TE is time-consuming, especially when the option space is large. To deal with the two issues, this work first proposes a contextualized TE model (Context-TE) by appending other k options as the context of the current (P, H) modeling. Context-TE is able to learn more reliable decision for the H since it considers various context. Second, we speed up Context-TE by coming up with Parallel-TE, which learns the decisions of multiple options simultaneously. Parallel-TE significantly improves the inference speed while keeping comparable performance with Context-TE. Our methods are evaluated on three tasks (ultra-fine entity typing, intent detection and multi-choice QA) that are typical selection problems with different sizes of options. Experiments show our models set new SOTA performance; particularly, Parallel-TE is faster than the pairwise TE by k times in inference.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Jiangshu and Yin, Wenpeng and Xia, Congying and Yu, Philip S.}, year={2023}, month={Jun.}, pages={12754-12762} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26500/26272", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26500", + "pdf_size": 643463, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11866676252102873622&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "uic.edu;psu.edu;salesforce.com;uic.edu", + "email": "uic.edu;psu.edu;salesforce.com;uic.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Illinois at Chicago;Penn State University;Salesforce", + "aff_unique_dep": ";;Salesforce Research", + "aff_unique_url": "https://www.uic.edu;https://www.psu.edu;https://research.salesforce.com", + "aff_unique_abbr": "UIC;PSU;Salesforce", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26371", + "title": "Learning to Shape Rewards Using a Game of Two Partners", + "track": "main", + "status": "Technical", + "abstract": "Reward shaping (RS) is a powerful method in reinforcement learning (RL) for overcoming the problem of sparse or uninformative rewards. However, RS typically relies on manually engineered shaping-reward functions whose construc- tion is time-consuming and error-prone. It also requires domain knowledge which runs contrary to the goal of autonomous learning. We introduce Reinforcement Learning Optimising Shaping Algorithm (ROSA), an automated reward shaping framework in which the shaping-reward function is constructed in a Markov game between two agents. A reward-shaping agent (Shaper) uses switching controls to determine which states to add shaping rewards for more efficient learning while the other agent (Controller) learns the optimal policy for the task using these shaped rewards. We prove that ROSA, which adopts existing RL algorithms, learns to construct a shaping-reward function that is beneficial to the task thus ensuring efficient convergence to high performance policies. We demonstrate ROSA\u2019s properties in three didactic experiments and show its superior performance against state-of-the-art RS algorithms in challenging sparse reward environments.", + "primary_area": "multiagent systems", + "author": "David Mguni; Taher Jafferjee; Jianhong Wang; Nicolas Perez-Nieves; Wenbin Song; Feifei Tong; Matthew Taylor; Tianpei Yang; Zipeng Dai; Hui Chen; Jiangcheng Zhu; Kun Shao; Jun Wang; Yaodong Yang", + "authorids": "", + "aff": "Huawei R&D; Huawei R&D; University of Manchester, UK; Imperial College London, UK; Shanghai Tech University, China; Huawei R&D; University of Alberta, Edmonton, Canada+Alberta Machine Intelligence Institute, Edmonton, Canada; University of Alberta, Edmonton, Canada+Alberta Machine Intelligence Institute, Edmonton, Canada; Huawei R&D; University College London, UK; Huawei R&D; Huawei R&D; University College London, UK; Peking University, Beijing, China", + "bibtex": "@article{Mguni_Jafferjee_Wang_Perez-Nieves_Song_Tong_Taylor_Yang_Dai_Chen_Zhu_Shao_Wang_Yang_2023, title={Learning to Shape Rewards Using a Game of Two Partners}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26371}, DOI={10.1609/aaai.v37i10.26371}, abstractNote={Reward shaping (RS) is a powerful method in reinforcement learning (RL) for overcoming the problem of sparse or uninformative rewards. However, RS typically relies on manually engineered shaping-reward functions whose construc- tion is time-consuming and error-prone. It also requires domain knowledge which runs contrary to the goal of autonomous learning. We introduce Reinforcement Learning Optimising Shaping Algorithm (ROSA), an automated reward shaping framework in which the shaping-reward function is constructed in a Markov game between two agents. A reward-shaping agent (Shaper) uses switching controls to determine which states to add shaping rewards for more efficient learning while the other agent (Controller) learns the optimal policy for the task using these shaped rewards. We prove that ROSA, which adopts existing RL algorithms, learns to construct a shaping-reward function that is beneficial to the task thus ensuring efficient convergence to high performance policies. We demonstrate ROSA\u2019s properties in three didactic experiments and show its superior performance against state-of-the-art RS algorithms in challenging sparse reward environments.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mguni, David and Jafferjee, Taher and Wang, Jianhong and Perez-Nieves, Nicolas and Song, Wenbin and Tong, Feifei and Taylor, Matthew and Yang, Tianpei and Dai, Zipeng and Chen, Hui and Zhu, Jiangcheng and Shao, Kun and Wang, Jun and Yang, Yaodong}, year={2023}, month={Jun.}, pages={11604-11612} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26371/26143", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26371", + "pdf_size": 3210236, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2724759105135043079&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "hotmail.com; ; ; ; ; ; ; ; ; ; ; ; ;pku.edu.cn", + "email": "hotmail.com; ; ; ; ; ; ; ; ; ; ; ; ;pku.edu.cn", + "github": "", + "project": "", + "author_num": 14, + "aff_unique_index": "0;0;1;2;3;0;4+5;4+5;0;6;0;0;6;7", + "aff_unique_norm": "Huawei;University of Manchester;Imperial College London;Shanghai Tech University;University of Alberta;Alberta Machine Intelligence Institute;University College London;Peking University", + "aff_unique_dep": "R&D;;;;;;;", + "aff_unique_url": "https://www.huawei.com;https://www.manchester.ac.uk;https://www.imperial.ac.uk;http://www.shanghaitech.edu.cn;https://www.ualberta.ca;https://www.amii.ca;https://www.ucl.ac.uk;http://www.pku.edu.cn", + "aff_unique_abbr": "Huawei;UoM;ICL;STU;UAlberta;AMII;UCL;Peking U", + "aff_campus_unique_index": "1+1;1+1;2", + "aff_campus_unique": ";Edmonton;Beijing", + "aff_country_unique_index": "0;0;1;1;0;0;2+2;2+2;0;1;0;0;1;0", + "aff_country_unique": "China;United Kingdom;Canada" + }, + { + "id": "article-25468", + "title": "Learning to Super-resolve Dynamic Scenes for Neuromorphic Spike Camera", + "track": "main", + "status": "Technical", + "abstract": "Spike camera is a kind of neuromorphic sensor that uses a novel ``integrate-and-fire'' mechanism to generate a continuous spike stream to record the dynamic light intensity at extremely high temporal resolution. However, as a trade-off for high temporal resolution, its spatial resolution is limited, resulting in inferior reconstruction details. To address this issue, this paper develops a network (SpikeSR-Net) to super-resolve a high-resolution image sequence from the low-resolution binary spike streams. SpikeSR-Net is designed based on the observation model of spike camera and exploits both the merits of model-based and learning-based methods. To deal with the limited representation capacity of binary data, a pixel-adaptive spike encoder is proposed to convert spikes to latent representation to infer clues on intensity and motion. Then, a motion-aligned super resolver is employed to exploit long-term correlation, so that the dense sampling in temporal domain can be exploited to enhance the spatial resolution without introducing motion blur. Experimental results show that SpikeSR-Net is promising in super-resolving higher-quality images for spike camera.", + "primary_area": "computer vision iii", + "author": "Jing Zhao; Ruiqin Xiong; Jian Zhang; Rui Zhao; Hangfan Liu; Tiejun Huang", + "authorids": "", + "aff": "Institute of Digital Media, School of Computer Science, Peking University+National Engineering Research Center of Visual Technology (NERCVT), Peking University+Beijing Academy of Artificial Intelligence; Institute of Digital Media, School of Computer Science, Peking University+National Engineering Research Center of Visual Technology (NERCVT), Peking University+Beijing Academy of Artificial Intelligence; School of Electronic and Computer Engineering, Peking University Shenzhen Graduate School; Institute of Digital Media, School of Computer Science, Peking University+National Engineering Research Center of Visual Technology (NERCVT), Peking University+Beijing Academy of Artificial Intelligence; Center for Biomedical Image Computing and Analytics, University of Pennsylvania; Institute of Digital Media, School of Computer Science, Peking University+National Engineering Research Center of Visual Technology (NERCVT), Peking University+Beijing Academy of Artificial Intelligence", + "bibtex": "@article{Zhao_Xiong_Zhang_Zhao_Liu_Huang_2023, title={Learning to Super-resolve Dynamic Scenes for Neuromorphic Spike Camera}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25468}, DOI={10.1609/aaai.v37i3.25468}, abstractNote={Spike camera is a kind of neuromorphic sensor that uses a novel ``integrate-and-fire\u2019\u2019 mechanism to generate a continuous spike stream to record the dynamic light intensity at extremely high temporal resolution. However, as a trade-off for high temporal resolution, its spatial resolution is limited, resulting in inferior reconstruction details. To address this issue, this paper develops a network (SpikeSR-Net) to super-resolve a high-resolution image sequence from the low-resolution binary spike streams. SpikeSR-Net is designed based on the observation model of spike camera and exploits both the merits of model-based and learning-based methods. To deal with the limited representation capacity of binary data, a pixel-adaptive spike encoder is proposed to convert spikes to latent representation to infer clues on intensity and motion. Then, a motion-aligned super resolver is employed to exploit long-term correlation, so that the dense sampling in temporal domain can be exploited to enhance the spatial resolution without introducing motion blur. Experimental results show that SpikeSR-Net is promising in super-resolving higher-quality images for spike camera.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Jing and Xiong, Ruiqin and Zhang, Jian and Zhao, Rui and Liu, Hangfan and Huang, Tiejun}, year={2023}, month={Jun.}, pages={3579-3587} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25468/25240", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25468", + "pdf_size": 19399519, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8318995706789588797&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;upenn.edu;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;upenn.edu;pku.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0+1;0+0+1;0;0+0+1;2;0+0+1", + "aff_unique_norm": "Peking University;Beijing Academy of Artificial Intelligence;University of Pennsylvania", + "aff_unique_dep": "School of Computer Science;;Center for Biomedical Image Computing and Analytics", + "aff_unique_url": "http://www.pku.edu.cn;https://www.baaic.cn;https://www.upenn.edu", + "aff_unique_abbr": "PKU;BAAI;UPenn", + "aff_campus_unique_index": ";;1;;", + "aff_campus_unique": ";Shenzhen Graduate School", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0+0;1;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26491", + "title": "Learning towards Selective Data Augmentation for Dialogue Generation", + "track": "main", + "status": "Technical", + "abstract": "As it is cumbersome and expensive to acquire a huge amount of data for training neural dialog models, data augmentation is proposed to effectively utilize existing training samples.\nHowever, current data augmentation techniques on the dialog generation task mostly augment all cases in the training dataset without considering the intrinsic attributes between different cases.\nWe argue that not all cases are beneficial for augmentation task, and the cases suitable for augmentation should obey the following two attributes: \n(1) low-quality (the dialog model cannot generate a high-quality response for the case),\n(2) representative (the case should represent the property of the whole dataset).\nHerein, we explore this idea by proposing a Selective Data Augmentation framework (SDA) for the response generation task.\nSDA employs a dual adversarial network to select the lowest quality and most representative data points for augmentation in one stage. \nExtensive experiments conducted on two publicly available datasets, i.e., DailyDialog and OpenSubtitles, show that our framework can improve the response generation performance with respect to various metrics", + "primary_area": "speech natural language processing", + "author": "Xiuying Chen; Mingzhe Li; Jiayi Zhang; Xiaoqiang Xia; Chen Wei; Jianwei Cui; Xin Gao; Xiangliang Zhang; Rui Yan", + "authorids": "", + "aff": "Computational Bioscience Research Center, KAUST; Ant Group; Xiaomi AI Lab; Xiaomi AI Lab; Xiaomi AI Lab; Xiaomi AI Lab; Computational Bioscience Research Center, KAUST; University of Notre Dame; Gaoling School of Artificial Intelligence, Renmin University of China", + "bibtex": "@article{Chen_Li_Zhang_Xia_Wei_Cui_Gao_Zhang_Yan_2023, title={Learning towards Selective Data Augmentation for Dialogue Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26491}, DOI={10.1609/aaai.v37i11.26491}, abstractNote={As it is cumbersome and expensive to acquire a huge amount of data for training neural dialog models, data augmentation is proposed to effectively utilize existing training samples.\nHowever, current data augmentation techniques on the dialog generation task mostly augment all cases in the training dataset without considering the intrinsic attributes between different cases.\nWe argue that not all cases are beneficial for augmentation task, and the cases suitable for augmentation should obey the following two attributes: (1) low-quality (the dialog model cannot generate a high-quality response for the case),\n(2) representative (the case should represent the property of the whole dataset).\nHerein, we explore this idea by proposing a Selective Data Augmentation framework (SDA) for the response generation task.\nSDA employs a dual adversarial network to select the lowest quality and most representative data points for augmentation in one stage. Extensive experiments conducted on two publicly available datasets, i.e., DailyDialog and OpenSubtitles, show that our framework can improve the response generation performance with respect to various metrics}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xiuying and Li, Mingzhe and Zhang, Jiayi and Xia, Xiaoqiang and Wei, Chen and Cui, Jianwei and Gao, Xin and Zhang, Xiangliang and Yan, Rui}, year={2023}, month={Jun.}, pages={12673-12681} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26491/26263", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26491", + "pdf_size": 337745, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8824141523839732791&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "kaust.edu.sa;antgroup.com; ; ; ; ;kaust.edu.sa; ;ruc.edu.cn", + "email": "kaust.edu.sa;antgroup.com; ; ; ; ;kaust.edu.sa; ;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2;2;2;2;0;3;4", + "aff_unique_norm": "King Abdullah University of Science and Technology;Ant Group;Xiaomi Corporation;University of Notre Dame;Renmin University of China", + "aff_unique_dep": "Computational Bioscience Research Center;;Xiaomi AI Lab;;Gaoling School of Artificial Intelligence", + "aff_unique_url": "https://www.kaust.edu.sa;https://www.antgroup.com;https://www.xiaomi.com;https://www.nd.edu;http://www.ruc.edu.cn", + "aff_unique_abbr": "KAUST;Ant Group;Xiaomi;Notre Dame;RUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;1;1;1;1;1;0;2;1", + "aff_country_unique": "Saudi Arabia;China;United States" + }, + { + "id": "article-26043", + "title": "Learning with Partial Labels from Semi-supervised Perspective", + "track": "main", + "status": "Technical", + "abstract": "Partial Label (PL) learning refers to the task of learning from the partially labeled data, where each training instance is ambiguously equipped with a set of candidate labels but only one is valid. Advances in the recent deep PL learning literature have shown that the deep learning paradigms, e.g., self-training, contrastive learning, or class activate values, can achieve promising performance. Inspired by the impressive success of deep Semi-Supervised (SS) learning, we transform the PL learning problem into the SS learning problem, and propose a novel PL learning method, namely Partial Label learning with Semi-supervised Perspective (PLSP). Specifically, we first form the pseudo-labeled dataset by selecting a small number of reliable pseudo-labeled instances with high-confidence prediction scores and treating the remaining instances as pseudo-unlabeled ones. Then we design a SS learning objective, consisting of a supervised loss for pseudo-labeled instances and a semantic consistency regularization for pseudo-unlabeled instances. We further introduce a complementary regularization for those non-candidate labels to constrain the model predictions on them to be as small as possible. Empirical results demonstrate that PLSP significantly outperforms the existing PL baseline methods, especially on high ambiguity levels. Code available: https://github.com/changchunli/PLSP.", + "primary_area": "machine learning ii", + "author": "Ximing Li; Yuanzhi Jiang; Changchun Li; Yiyuan Wang; Jihong Ouyang", + "authorids": "", + "aff": "College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China; College of Information Science and Technology, Northeast Normal University, China+Key Laboratory of Applied Statistics of MOE, Northeast Normal University, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China", + "bibtex": "@article{Li_Jiang_Li_Wang_Ouyang_2023, title={Learning with Partial Labels from Semi-supervised Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26043}, DOI={10.1609/aaai.v37i7.26043}, abstractNote={Partial Label (PL) learning refers to the task of learning from the partially labeled data, where each training instance is ambiguously equipped with a set of candidate labels but only one is valid. Advances in the recent deep PL learning literature have shown that the deep learning paradigms, e.g., self-training, contrastive learning, or class activate values, can achieve promising performance. Inspired by the impressive success of deep Semi-Supervised (SS) learning, we transform the PL learning problem into the SS learning problem, and propose a novel PL learning method, namely Partial Label learning with Semi-supervised Perspective (PLSP). Specifically, we first form the pseudo-labeled dataset by selecting a small number of reliable pseudo-labeled instances with high-confidence prediction scores and treating the remaining instances as pseudo-unlabeled ones. Then we design a SS learning objective, consisting of a supervised loss for pseudo-labeled instances and a semantic consistency regularization for pseudo-unlabeled instances. We further introduce a complementary regularization for those non-candidate labels to constrain the model predictions on them to be as small as possible. Empirical results demonstrate that PLSP significantly outperforms the existing PL baseline methods, especially on high ambiguity levels. Code available: https://github.com/changchunli/PLSP.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Ximing and Jiang, Yuanzhi and Li, Changchun and Wang, Yiyuan and Ouyang, Jihong}, year={2023}, month={Jun.}, pages={8666-8674} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26043/25815", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26043", + "pdf_size": 829564, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5105017697180782280&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;mails.jlu.edu.cn;gmail.com;nenu.edu.cn;jlu.edu.cn", + "email": "gmail.com;mails.jlu.edu.cn;gmail.com;nenu.edu.cn;jlu.edu.cn", + "github": "https://github.com/changchunli/PLSP", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;1+1;0+0", + "aff_unique_norm": "Jilin University;Northeast Normal University", + "aff_unique_dep": "College of Computer Science and Technology;College of Information Science and Technology", + "aff_unique_url": "http://www.jlu.edu.cn;http://www.nenu.edu.cn", + "aff_unique_abbr": "JLU;NENU", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26278", + "title": "Learning-Assisted Algorithm Unrolling for Online Optimization with Budget Constraints", + "track": "main", + "status": "Technical", + "abstract": "Online optimization with multiple budget constraints is challenging since the online decisions over a short time horizon are coupled together by strict inventory constraints. The existing manually-designed algorithms cannot achieve satisfactory average performance for this setting because they often need a large number of time steps for convergence and/or may violate the inventory constraints. In this paper, we propose a new machine learning (ML) assisted unrolling approach, called LAAU (Learning-Assisted Algorithm Unrolling), which unrolls the agent\u2019s online decision pipeline and leverages an ML model for updating the Lagrangian multiplier online. For efficient training via backpropagation, we derive gradients of the decision pipeline over time. We also provide the average cost bounds for two cases when training data is available offline and collected online, respectively. Finally, we present numerical results to highlight that LAAU can outperform the existing baselines.", + "primary_area": "machine learning iv", + "author": "Jianyi Yang; Shaolei Ren", + "authorids": "", + "aff": "University of California, Riverside; University of California, Riverside", + "bibtex": "@article{Yang_Ren_2023, title={Learning-Assisted Algorithm Unrolling for Online Optimization with Budget Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26278}, DOI={10.1609/aaai.v37i9.26278}, abstractNote={Online optimization with multiple budget constraints is challenging since the online decisions over a short time horizon are coupled together by strict inventory constraints. The existing manually-designed algorithms cannot achieve satisfactory average performance for this setting because they often need a large number of time steps for convergence and/or may violate the inventory constraints. In this paper, we propose a new machine learning (ML) assisted unrolling approach, called LAAU (Learning-Assisted Algorithm Unrolling), which unrolls the agent\u2019s online decision pipeline and leverages an ML model for updating the Lagrangian multiplier online. For efficient training via backpropagation, we derive gradients of the decision pipeline over time. We also provide the average cost bounds for two cases when training data is available offline and collected online, respectively. Finally, we present numerical results to highlight that LAAU can outperform the existing baselines.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Jianyi and Ren, Shaolei}, year={2023}, month={Jun.}, pages={10771-10779} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26278/26050", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26278", + "pdf_size": 280542, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5877374599978658061&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ucr.edu;ucr.edu", + "email": "ucr.edu;ucr.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Riverside", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucr.edu", + "aff_unique_abbr": "UCR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Riverside", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26414", + "title": "Learning-Augmented Algorithms for Online TSP on the Line", + "track": "main", + "status": "Technical", + "abstract": "We study the online Traveling Salesman Problem (TSP) on the line augmented with machine-learned predictions. In the classical problem, there is a stream of requests released over time along the real line. The goal is to minimize the makespan of the algorithm. We distinguish between the open variant and the closed one, in which we additionally require the algorithm to return to the origin after serving all requests. The state of the art is a 1.64-competitive algorithm and a 2.04-competitive algorithm for the closed and open variants, respectively. In both cases, a tight lower bound is known.\n\nIn both variants, our primary prediction model involves predicted positions of the requests. We introduce algorithms that (i) obtain a tight 1.5 competitive ratio for the closed variant and a 1.66 competitive ratio for the open variant in the case of perfect predictions, (ii) are robust against unbounded prediction error, and (iii) are smooth, i.e., their performance degrades gracefully as the prediction error increases.\n\nMoreover, we further investigate the learning-augmented setting in the open variant by additionally considering a prediction for the last request served by the optimal offline algorithm. Our algorithm for this enhanced setting obtains a 1.33 competitive ratio with perfect predictions while also being smooth and robust, beating the lower bound of 1.44 we show for our original prediction setting for the open variant. Also, we provide a lower bound of 1.25 for this enhanced setting.", + "primary_area": "planning routing and scheduling", + "author": "Themistoklis Gouleakis; Konstantinos Lakis; Golnoosh Shahkarami", + "authorids": "", + "aff": "National University of Singapore; ETH Z\u00fcrich; Max Planck Institute for Informatics + Universit\u00e4t des Saarlandes", + "bibtex": "@article{Gouleakis_Lakis_Shahkarami_2023, title={Learning-Augmented Algorithms for Online TSP on the Line}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26414}, DOI={10.1609/aaai.v37i10.26414}, abstractNote={We study the online Traveling Salesman Problem (TSP) on the line augmented with machine-learned predictions. In the classical problem, there is a stream of requests released over time along the real line. The goal is to minimize the makespan of the algorithm. We distinguish between the open variant and the closed one, in which we additionally require the algorithm to return to the origin after serving all requests. The state of the art is a 1.64-competitive algorithm and a 2.04-competitive algorithm for the closed and open variants, respectively. In both cases, a tight lower bound is known. In both variants, our primary prediction model involves predicted positions of the requests. We introduce algorithms that (i) obtain a tight 1.5 competitive ratio for the closed variant and a 1.66 competitive ratio for the open variant in the case of perfect predictions, (ii) are robust against unbounded prediction error, and (iii) are smooth, i.e., their performance degrades gracefully as the prediction error increases. Moreover, we further investigate the learning-augmented setting in the open variant by additionally considering a prediction for the last request served by the optimal offline algorithm. Our algorithm for this enhanced setting obtains a 1.33 competitive ratio with perfect predictions while also being smooth and robust, beating the lower bound of 1.44 we show for our original prediction setting for the open variant. Also, we provide a lower bound of 1.25 for this enhanced setting.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gouleakis, Themistoklis and Lakis, Konstantinos and Shahkarami, Golnoosh}, year={2023}, month={Jun.}, pages={11989-11996} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26414/26186", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26414", + "pdf_size": 149391, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8083758276170853706&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "nus.edu.sg;student.ethz.ch;mpi-inf.mpg.de", + "email": "nus.edu.sg;student.ethz.ch;mpi-inf.mpg.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "National University of Singapore;ETH Z\u00fcrich;Max Planck Institute for Informatics;Universit\u00e4t des Saarlandes", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.ethz.ch;https://mpi-inf.mpg.de;https://www.uni-saarland.de", + "aff_unique_abbr": "NUS;ETHZ;MPII;UDS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2+2", + "aff_country_unique": "Singapore;Switzerland;Germany" + }, + { + "id": "article-25397", + "title": "Less Is More Important: An Attention Module Guided by Probability Density Function for Convolutional Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Attention modules, which adaptively weight and refine features according to the importance of the input, have become a critical technique to boost the capability of convolutional neural networks. However, most existing attention modules are heuristic without a sound interpretation, and thus, require empirical engineering to design structure and operators within the modules. To handle the above issue, based on our 'less is more important' observation, we propose an Attention Module guided by Probability Density Function (PDF), dubbed PdfAM, which enjoys a rational motivation and requires few empirical structure designs. Concretely, we observe that pixels with less occurrence are prone to be textural details or foreground objects with much importance to aid vision tasks. Thus, with PDF values adopted as a smooth and anti-noise alternative to the pixel occurrence frequency, we design our PdfAM by first estimating the PDF based on some distribution assumption, and then predicting a 3D attention map via applying a negative correlation between the attention weights and the estimated PDF values. Furthermore, we develop learnable PDF-rescale parameters so as to adaptively transform the estimated PDF and predict a customized negative correlation. Experiments show that our PdfAM consistently boosts various networks under both high- and low-level vision tasks, and also performs favorably against other attention modules in terms of accuracy and convergence.", + "primary_area": "computer vision iii", + "author": "Jingfen Xie; Jian Zhang", + "authorids": "", + "aff": "Peking University Shenzhen Graduate School, Shenzhen, China; Peking University Shenzhen Graduate School, Shenzhen, China", + "bibtex": "@article{Xie_Zhang_2023, title={Less Is More Important: An Attention Module Guided by Probability Density Function for Convolutional Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25397}, DOI={10.1609/aaai.v37i3.25397}, abstractNote={Attention modules, which adaptively weight and refine features according to the importance of the input, have become a critical technique to boost the capability of convolutional neural networks. However, most existing attention modules are heuristic without a sound interpretation, and thus, require empirical engineering to design structure and operators within the modules. To handle the above issue, based on our \u2019less is more important\u2019 observation, we propose an Attention Module guided by Probability Density Function (PDF), dubbed PdfAM, which enjoys a rational motivation and requires few empirical structure designs. Concretely, we observe that pixels with less occurrence are prone to be textural details or foreground objects with much importance to aid vision tasks. Thus, with PDF values adopted as a smooth and anti-noise alternative to the pixel occurrence frequency, we design our PdfAM by first estimating the PDF based on some distribution assumption, and then predicting a 3D attention map via applying a negative correlation between the attention weights and the estimated PDF values. Furthermore, we develop learnable PDF-rescale parameters so as to adaptively transform the estimated PDF and predict a customized negative correlation. Experiments show that our PdfAM consistently boosts various networks under both high- and low-level vision tasks, and also performs favorably against other attention modules in terms of accuracy and convergence.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Jingfen and Zhang, Jian}, year={2023}, month={Jun.}, pages={2947-2955} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25397/25169", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25397", + "pdf_size": 10121184, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1779771625228691276&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26977", + "title": "Less Is More: Volatility Forecasting with Contrastive Representation Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Earnings conference calls are indicative information events for volatility forecasting, which is essential for financial risk management and asset pricing. Although recent volatility forecasting models have explored the textual content of conference calls for prediction, they suffer from modeling the long-text and representing the risk-relevant information. This work proposes to identify key sentences for robust and interpretable transcript representation learning based on the cognitive theory. Specifically, we introduce TextRank to find key sentences and leverage attention mechanism to screen out the candidates by modeling the semantic correlations. Upon on the structural information of earning conference calls, we propose a structure-based contrastive learning method to facilitate the effective transcript representation. Empirical results on the benchmark dataset demonstrate the superiority of our model over competitive baselines in volatility forecasting.", + "primary_area": "", + "author": "Yanlong Huang; Wenxin Tai; Ting Zhong; Kunpeng Zhang", + "authorids": "", + "aff": "University of Electronic Science and Technology; University of Electronic Science and Technology; University of Electronic Science and Technology+University of Maryland, College park; University of Maryland, College park", + "bibtex": "@article{Huang_Tai_Zhong_Zhang_2024, title={Less Is More: Volatility Forecasting with Contrastive Representation Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26977}, DOI={10.1609/aaai.v37i13.26977}, abstractNote={Earnings conference calls are indicative information events for volatility forecasting, which is essential for financial risk management and asset pricing. Although recent volatility forecasting models have explored the textual content of conference calls for prediction, they suffer from modeling the long-text and representing the risk-relevant information. This work proposes to identify key sentences for robust and interpretable transcript representation learning based on the cognitive theory. Specifically, we introduce TextRank to find key sentences and leverage attention mechanism to screen out the candidates by modeling the semantic correlations. Upon on the structural information of earning conference calls, we propose a structure-based contrastive learning method to facilitate the effective transcript representation. Empirical results on the benchmark dataset demonstrate the superiority of our model over competitive baselines in volatility forecasting.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Yanlong and Tai, Wenxin and Zhong, Ting and Zhang, Kunpeng}, year={2024}, month={Jul.}, pages={16234-16235} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26977/26749", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26977", + "pdf_size": 177443, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10640777219305892402&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;uestc.edu.cn;umd.edu", + "email": "gmail.com;gmail.com;uestc.edu.cn;umd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;1", + "aff_unique_norm": "University of Electronic Science and Technology of China;University of Maryland", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;https://www/umd.edu", + "aff_unique_abbr": "UESTC;UMD", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";College Park", + "aff_country_unique_index": "0;0;0+1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25558", + "title": "Let Graph Be the Go Board: Gradient-Free Node Injection Attack for Graph Neural Networks via Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Graph Neural Networks (GNNs) have drawn significant attentions over the years and been broadly applied to essential applications requiring solid robustness or vigorous security standards, such as product recommendation and user behavior modeling. Under these scenarios, exploiting GNN's vulnerabilities and further downgrading its performance become extremely incentive for adversaries. Previous attackers mainly focus on structural perturbations or node injections to the existing graphs, guided by gradients from the surrogate models. Although they deliver promising results, several limitations still exist. For the structural perturbation attack, to launch a proposed attack, adversaries need to manipulate the existing graph topology, which is impractical in most circumstances. Whereas for the node injection attack, though being more practical, current approaches require training surrogate models to simulate a white-box setting, which results in significant performance downgrade when the surrogate architecture diverges from the actual victim model. To bridge these gaps, in this paper, we study the problem of black-box node injection attack, without training a potentially misleading surrogate model. Specifically, we model the node injection attack as a Markov decision process and propose Gradient-free Graph Advantage Actor Critic, namely G2A2C, a reinforcement learning framework in the fashion of advantage actor critic. By directly querying the victim model, G2A2C learns to inject highly malicious nodes with extremely limited attacking budgets, while maintaining a similar node feature distribution. Through our comprehensive experiments over eight acknowledged benchmark datasets with different characteristics, we demonstrate the superior performance of our proposed G2A2C over the existing state-of-the-art attackers. Source code is publicly available at: https://github.com/jumxglhf/G2A2C.", + "primary_area": "data mining and knowledge management", + "author": "Mingxuan Ju; Yujie Fan; Chuxu Zhang; Yanfang Ye", + "authorids": "", + "aff": "University of Notre Dame; Case Western Reserve University; Brandeis University; University of Notre Dame", + "bibtex": "@article{Ju_Fan_Zhang_Ye_2023, title={Let Graph Be the Go Board: Gradient-Free Node Injection Attack for Graph Neural Networks via Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25558}, DOI={10.1609/aaai.v37i4.25558}, abstractNote={Graph Neural Networks (GNNs) have drawn significant attentions over the years and been broadly applied to essential applications requiring solid robustness or vigorous security standards, such as product recommendation and user behavior modeling. Under these scenarios, exploiting GNN\u2019s vulnerabilities and further downgrading its performance become extremely incentive for adversaries. Previous attackers mainly focus on structural perturbations or node injections to the existing graphs, guided by gradients from the surrogate models. Although they deliver promising results, several limitations still exist. For the structural perturbation attack, to launch a proposed attack, adversaries need to manipulate the existing graph topology, which is impractical in most circumstances. Whereas for the node injection attack, though being more practical, current approaches require training surrogate models to simulate a white-box setting, which results in significant performance downgrade when the surrogate architecture diverges from the actual victim model. To bridge these gaps, in this paper, we study the problem of black-box node injection attack, without training a potentially misleading surrogate model. Specifically, we model the node injection attack as a Markov decision process and propose Gradient-free Graph Advantage Actor Critic, namely G2A2C, a reinforcement learning framework in the fashion of advantage actor critic. By directly querying the victim model, G2A2C learns to inject highly malicious nodes with extremely limited attacking budgets, while maintaining a similar node feature distribution. Through our comprehensive experiments over eight acknowledged benchmark datasets with different characteristics, we demonstrate the superior performance of our proposed G2A2C over the existing state-of-the-art attackers. Source code is publicly available at: https://github.com/jumxglhf/G2A2C.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ju, Mingxuan and Fan, Yujie and Zhang, Chuxu and Ye, Yanfang}, year={2023}, month={Jun.}, pages={4383-4390} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25558/25330", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25558", + "pdf_size": 2618000, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6377861388152259813&as_sdt=20005&sciodt=0,9&hl=en", + "gs_version_total": 8, + "aff_domain": "nd.edu;case.edu;brandeis.edu;nd.edu", + "email": "nd.edu;case.edu;brandeis.edu;nd.edu", + "github": "https://github.com/jumxglhf/G2A2C", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Notre Dame;Case Western Reserve University;Brandeis University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nd.edu;https://www.case.edu;https://www.brandeis.edu", + "aff_unique_abbr": "Notre Dame;CWRU;Brandeis", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26333", + "title": "Let the Data Choose: Flexible and Diverse Anchor Graph Fusion for Scalable Multi-View Clustering", + "track": "main", + "status": "Technical", + "abstract": "In the past few years, numerous multi-view graph clustering algorithms have been proposed to enhance the clustering performance by exploring information from multiple views. Despite the superior performance, the high time and space expenditures limit their scalability. Accordingly, anchor graph learning has been introduced to alleviate the computational complexity. However, existing approaches can be further improved by the following considerations: (i) Existing anchor-based methods share the same number of anchors across views. This strategy violates the diversity and flexibility of multi-view data distribution. (ii) Searching for the optimal anchor number within hyper-parameters takes much extra tuning time, which makes existing methods impractical. (iii) How to flexibly fuse multi-view anchor graphs of diverse sizes has not been well explored in existing literature. To address the above issues, we propose a novel anchor-based method termed Flexible and Diverse Anchor Graph Fusion for Scalable Multi-view Clustering (FDAGF) in this paper. Instead of manually tuning optimal anchor with massive hyper-parameters, we propose to optimize the contribution weights of a group of pre-defined anchor numbers to avoid extra time expenditure among views. Most importantly, we propose a novel hybrid fusion strategy for multi-size anchor graphs with theoretical proof, which allows flexible and diverse anchor graph fusion. Then, an efficient linear optimization algorithm is proposed to solve the resultant problem. Comprehensive experimental results demonstrate the effectiveness and efficiency of our proposed framework. The source code is available at https://github.com/Jeaninezpp/FDAGF.", + "primary_area": "machine learning iv", + "author": "Pei Zhang; Siwei Wang; Liang Li; Changwang Zhang; Xinwang Liu; En Zhu; Zhe Liu; Lu Zhou; Lei Luo", + "authorids": "", + "aff": "School of Computer, National University of Defense Technology, Changsha, China, 410073; School of Computer, National University of Defense Technology, Changsha, China, 410073; School of Computer, National University of Defense Technology, Changsha, China, 410073; Huawei Poisson Lab, Shenzhen, China, 518129; School of Computer, National University of Defense Technology, Changsha, China, 410073; School of Computer, National University of Defense Technology, Changsha, China, 410073; Nanjing University of Aeronautics and Astronautics, Nanjing, China, 210000; Nanjing University of Aeronautics and Astronautics, Nanjing, China, 210000; School of Computer, National University of Defense Technology, Changsha, China, 410073", + "bibtex": "@article{Zhang_Wang_Li_Zhang_Liu_Zhu_Liu_Zhou_Luo_2023, title={Let the Data Choose: Flexible and Diverse Anchor Graph Fusion for Scalable Multi-View Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26333}, DOI={10.1609/aaai.v37i9.26333}, abstractNote={In the past few years, numerous multi-view graph clustering algorithms have been proposed to enhance the clustering performance by exploring information from multiple views. Despite the superior performance, the high time and space expenditures limit their scalability. Accordingly, anchor graph learning has been introduced to alleviate the computational complexity. However, existing approaches can be further improved by the following considerations: (i) Existing anchor-based methods share the same number of anchors across views. This strategy violates the diversity and flexibility of multi-view data distribution. (ii) Searching for the optimal anchor number within hyper-parameters takes much extra tuning time, which makes existing methods impractical. (iii) How to flexibly fuse multi-view anchor graphs of diverse sizes has not been well explored in existing literature. To address the above issues, we propose a novel anchor-based method termed Flexible and Diverse Anchor Graph Fusion for Scalable Multi-view Clustering (FDAGF) in this paper. Instead of manually tuning optimal anchor with massive hyper-parameters, we propose to optimize the contribution weights of a group of pre-defined anchor numbers to avoid extra time expenditure among views. Most importantly, we propose a novel hybrid fusion strategy for multi-size anchor graphs with theoretical proof, which allows flexible and diverse anchor graph fusion. Then, an efficient linear optimization algorithm is proposed to solve the resultant problem. Comprehensive experimental results demonstrate the effectiveness and efficiency of our proposed framework. The source code is available at https://github.com/Jeaninezpp/FDAGF.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Pei and Wang, Siwei and Li, Liang and Zhang, Changwang and Liu, Xinwang and Zhu, En and Liu, Zhe and Zhou, Lu and Luo, Lei}, year={2023}, month={Jun.}, pages={11262-11269} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26333/26105", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26333", + "pdf_size": 1877150, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15202990944525110037&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "nudt.edu.cn;nudt.edu.cn;alumni.hust.edu.cn;foxmail.com;nudt.edu.cn;nudt.edu.cn;nuaa.edu.cn;nuaa.edu.cn;nudt.edu.cn", + "email": "nudt.edu.cn;nudt.edu.cn;alumni.hust.edu.cn;foxmail.com;nudt.edu.cn;nudt.edu.cn;nuaa.edu.cn;nuaa.edu.cn;nudt.edu.cn", + "github": "https://github.com/Jeaninezpp/FDAGF", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;1;0;0;2;2;0", + "aff_unique_norm": "National University of Defense Technology;Huawei;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "School of Computer;Poisson Lab;", + "aff_unique_url": ";https://www.huawei.com;http://www.nuaa.edu.cn", + "aff_unique_abbr": ";Huawei;NUAA", + "aff_campus_unique_index": "0;0;0;1;0;0;2;2;0", + "aff_campus_unique": "Changsha;Shenzhen;Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26191", + "title": "Leveraging Contaminated Datasets to Learn Clean-Data Distribution with Purified Generative Adversarial Networks", + "track": "main", + "status": "Technical", + "abstract": "Generative adversarial networks (GANs) are known for their strong abilities on capturing the underlying distribution of training instances. Since the seminal work of GAN, many variants of GAN have been proposed. However, existing GANs are almost established on the assumption that the training dataset is clean. But in many real-world applications, this may not hold, that is, the training dataset may be contaminated by a proportion of undesired instances. When training on such datasets, existing GANs will learn a mixture distribution of desired and contaminated instances, rather than the desired distribution of desired data only (target distribution). To learn the target distribution from contaminated datasets, two purified generative adversarial networks (PuriGAN) are developed, in which the discriminators are augmented with the capability to distinguish between target and contaminated instances by leveraging an extra dataset solely composed of contamination instances. We prove that under some mild conditions, the proposed PuriGANs are guaranteed to converge to the distribution of desired instances. Experimental results on several datasets demonstrate that the proposed PuriGANs are able to generate much better images from the desired distribution than comparable baselines when trained on contaminated datasets. In addition, we also demonstrate the usefulness of PuriGAN on downstream applications by applying it to the tasks of semi-supervised anomaly detection on contaminated datasets and PU-learning. Experimental results show that PuriGAN is able to deliver the best performance over comparable baselines on both tasks.", + "primary_area": "machine learning iii", + "author": "Bowen Tian; Qinliang Su; Jianxing Yu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Key Laboratory of Big Data Analysis and Processing, Guangzhou, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Key Laboratory of Big Data Analysis and Processing, Guangzhou, China; School of Artificial Intelligence, Sun Yat-sen University, Guangdong, China", + "bibtex": "@article{Tian_Su_Yu_2023, title={Leveraging Contaminated Datasets to Learn Clean-Data Distribution with Purified Generative Adversarial Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26191}, DOI={10.1609/aaai.v37i8.26191}, abstractNote={Generative adversarial networks (GANs) are known for their strong abilities on capturing the underlying distribution of training instances. Since the seminal work of GAN, many variants of GAN have been proposed. However, existing GANs are almost established on the assumption that the training dataset is clean. But in many real-world applications, this may not hold, that is, the training dataset may be contaminated by a proportion of undesired instances. When training on such datasets, existing GANs will learn a mixture distribution of desired and contaminated instances, rather than the desired distribution of desired data only (target distribution). To learn the target distribution from contaminated datasets, two purified generative adversarial networks (PuriGAN) are developed, in which the discriminators are augmented with the capability to distinguish between target and contaminated instances by leveraging an extra dataset solely composed of contamination instances. We prove that under some mild conditions, the proposed PuriGANs are guaranteed to converge to the distribution of desired instances. Experimental results on several datasets demonstrate that the proposed PuriGANs are able to generate much better images from the desired distribution than comparable baselines when trained on contaminated datasets. In addition, we also demonstrate the usefulness of PuriGAN on downstream applications by applying it to the tasks of semi-supervised anomaly detection on contaminated datasets and PU-learning. Experimental results show that PuriGAN is able to deliver the best performance over comparable baselines on both tasks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Bowen and Su, Qinliang and Yu, Jianxing}, year={2023}, month={Jun.}, pages={9989-9996} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26191/25963", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26191", + "pdf_size": 1972997, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17072011197043600735&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0", + "aff_unique_norm": "Sun Yat-sen University;Guangdong Key Laboratory of Big Data Analysis and Processing", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.sysu.edu.cn;", + "aff_unique_abbr": "SYSU;", + "aff_campus_unique_index": "0+0;0+0;1", + "aff_campus_unique": "Guangzhou;Guangdong", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26484", + "title": "Leveraging Modality-Specific Representations for Audio-Visual Speech Recognition via Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Audio-visual speech recognition (AVSR) has gained remarkable success for ameliorating the noise-robustness of speech recognition. Mainstream methods focus on fusing audio and visual inputs to obtain modality-invariant representations. However, such representations are prone to over-reliance on audio modality as it is much easier to recognize than video modality in clean conditions. As a result, the AVSR model underestimates the importance of visual stream in face of noise corruption. To this end, we leverage visual modality-specific representations to provide stable complementary information for the AVSR task. Specifically, we propose a reinforcement learning (RL) based framework called MSRL, where the agent dynamically harmonizes modality-invariant and modality-specific representations in the auto-regressive decoding process. We customize a reward function directly related to task-specific metrics (i.e., word error rate), which encourages the MSRL to effectively explore the optimal integration strategy. Experimental results on the LRS3 dataset show that the proposed method achieves state-of-the-art in both clean and various noisy conditions. Furthermore, we demonstrate the better generality of MSRL system than other baselines when test set contains unseen noises.", + "primary_area": "speech natural language processing", + "author": "Chen Chen; Yuchen Hu; Qiang Zhang; Heqing Zou; Beier Zhu; Eng Siong Chng", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanyang Technological University; School of Computer Science and Engineering, Nanyang Technological University; ZJU-Hangzhou Global Scientific and Technological Innovation Center+College of Computer Science and Technology, Zhejiang University; School of Computer Science and Engineering, Nanyang Technological University; School of Computer Science and Engineering, Nanyang Technological University; School of Computer Science and Engineering, Nanyang Technological University", + "bibtex": "@article{Chen_Hu_Zhang_Zou_Zhu_Chng_2023, title={Leveraging Modality-Specific Representations for Audio-Visual Speech Recognition via Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26484}, DOI={10.1609/aaai.v37i11.26484}, abstractNote={Audio-visual speech recognition (AVSR) has gained remarkable success for ameliorating the noise-robustness of speech recognition. Mainstream methods focus on fusing audio and visual inputs to obtain modality-invariant representations. However, such representations are prone to over-reliance on audio modality as it is much easier to recognize than video modality in clean conditions. As a result, the AVSR model underestimates the importance of visual stream in face of noise corruption. To this end, we leverage visual modality-specific representations to provide stable complementary information for the AVSR task. Specifically, we propose a reinforcement learning (RL) based framework called MSRL, where the agent dynamically harmonizes modality-invariant and modality-specific representations in the auto-regressive decoding process. We customize a reward function directly related to task-specific metrics (i.e., word error rate), which encourages the MSRL to effectively explore the optimal integration strategy. Experimental results on the LRS3 dataset show that the proposed method achieves state-of-the-art in both clean and various noisy conditions. Furthermore, we demonstrate the better generality of MSRL system than other baselines when test set contains unseen noises.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Chen and Hu, Yuchen and Zhang, Qiang and Zou, Heqing and Zhu, Beier and Chng, Eng Siong}, year={2023}, month={Jun.}, pages={12607-12615} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26484/26256", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26484", + "pdf_size": 1043695, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9170839330193410026&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1+1;0;0;0", + "aff_unique_norm": "Nanyang Technological University;Zhejiang University", + "aff_unique_dep": "School of Computer Science and Engineering;Global Scientific and Technological Innovation Center", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.zju.edu.cn", + "aff_unique_abbr": "NTU;ZJU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;1+1;0;0;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26659", + "title": "Leveraging Old Knowledge to Continually Learn New Classes in Medical Images", + "track": "aaai special track", + "status": "Technical", + "abstract": "Class-incremental continual learning is a core step towards developing artificial intelligence systems that can continuously adapt to changes in the environment by learning new concepts without forgetting those previously learned. This is especially needed in the medical domain where continually learning from new incoming data is required to classify an expanded set of diseases. In this work, we focus on how old knowledge can be leveraged to learn new classes without catastrophic forgetting. We propose a framework that comprises of two main components: (1) a dynamic architecture with expanding representations to preserve previously learned features and accommodate new features; and (2) a training procedure alternating between two objectives to balance the learning of new features while maintaining the model\u2019s performance on old classes. Experiment results on multiple medical datasets show that our solution is able to achieve superior performance over state-of-the-art baselines in terms of class accuracy and forgetting.", + "primary_area": "ai for social impact", + "author": "Evelyn Chee; Mong Li Lee; Wynne Hsu", + "authorids": "", + "aff": "School of Computing, National University of Singapore + Institute of Data Science, National University of Singapore; School of Computing, National University of Singapore + Institute of Data Science, National University of Singapore; School of Computing, National University of Singapore + Institute of Data Science, National University of Singapore", + "bibtex": "@article{Chee_Lee_Hsu_2023, title={Leveraging Old Knowledge to Continually Learn New Classes in Medical Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26659}, DOI={10.1609/aaai.v37i12.26659}, abstractNote={Class-incremental continual learning is a core step towards developing artificial intelligence systems that can continuously adapt to changes in the environment by learning new concepts without forgetting those previously learned. This is especially needed in the medical domain where continually learning from new incoming data is required to classify an expanded set of diseases. In this work, we focus on how old knowledge can be leveraged to learn new classes without catastrophic forgetting. We propose a framework that comprises of two main components: (1) a dynamic architecture with expanding representations to preserve previously learned features and accommodate new features; and (2) a training procedure alternating between two objectives to balance the learning of new features while maintaining the model\u2019s performance on old classes. Experiment results on multiple medical datasets show that our solution is able to achieve superior performance over state-of-the-art baselines in terms of class accuracy and forgetting.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chee, Evelyn and Lee, Mong Li and Hsu, Wynne}, year={2023}, month={Jun.}, pages={14178-14186} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26659/26431", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26659", + "pdf_size": 13120853, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5656812921605018861&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "comp.nus.edu.sg;comp.nus.edu.sg;comp.nus.edu.sg", + "email": "comp.nus.edu.sg;comp.nus.edu.sg;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26316", + "title": "Leveraging Structure for Improved Classification of Grouped Biased Data", + "track": "main", + "status": "Technical", + "abstract": "We consider semi-supervised binary classification for applications in which data points are naturally grouped (e.g., survey responses grouped by state) and the labeled data is biased (e.g., survey respondents are not representative of the population). The groups overlap in the feature space and consequently the input-output patterns are related across the groups. To model the inherent structure in such data, we assume the partition-projected class-conditional invariance across groups, defined in terms of the group-agnostic feature space. We demonstrate that under this assumption, the group carries additional information about the class, over the group-agnostic features, with provably improved area under the ROC curve. Further assuming invariance of partition-projected class-conditional distributions across both labeled and unlabeled data, we derive a semi-supervised algorithm that explicitly leverages the structure to learn an optimal, group-aware, probability-calibrated classifier, despite the bias in the labeled data. Experiments on synthetic and real data demonstrate the efficacy of our algorithm over suitable baselines and ablative models, spanning standard supervised and semi-supervised learning approaches, with and without incorporating the group directly as a feature.", + "primary_area": "machine learning iv", + "author": "Daniel Zeiberg; Shantanu Jain; Predrag Radivojac", + "authorids": "", + "aff": "Khoury College of Computer Sciences, Northeastern University, Boston, MA 02115, U.S.A.; Khoury College of Computer Sciences, Northeastern University, Boston, MA 02115, U.S.A.; Khoury College of Computer Sciences, Northeastern University, Boston, MA 02115, U.S.A.", + "bibtex": "@article{Zeiberg_Jain_Radivojac_2023, title={Leveraging Structure for Improved Classification of Grouped Biased Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26316}, DOI={10.1609/aaai.v37i9.26316}, abstractNote={We consider semi-supervised binary classification for applications in which data points are naturally grouped (e.g., survey responses grouped by state) and the labeled data is biased (e.g., survey respondents are not representative of the population). The groups overlap in the feature space and consequently the input-output patterns are related across the groups. To model the inherent structure in such data, we assume the partition-projected class-conditional invariance across groups, defined in terms of the group-agnostic feature space. We demonstrate that under this assumption, the group carries additional information about the class, over the group-agnostic features, with provably improved area under the ROC curve. Further assuming invariance of partition-projected class-conditional distributions across both labeled and unlabeled data, we derive a semi-supervised algorithm that explicitly leverages the structure to learn an optimal, group-aware, probability-calibrated classifier, despite the bias in the labeled data. Experiments on synthetic and real data demonstrate the efficacy of our algorithm over suitable baselines and ablative models, spanning standard supervised and semi-supervised learning approaches, with and without incorporating the group directly as a feature.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeiberg, Daniel and Jain, Shantanu and Radivojac, Predrag}, year={2023}, month={Jun.}, pages={11113-11120} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26316/26088", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26316", + "pdf_size": 395750, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5791300575202923727&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "northeastern.edu;northeastern.edu;northeastern.edu", + "email": "northeastern.edu;northeastern.edu;northeastern.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Northeastern University", + "aff_unique_dep": "Khoury College of Computer Sciences", + "aff_unique_url": "https://www.northeastern.edu", + "aff_unique_abbr": "NU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Boston", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25168", + "title": "Leveraging Sub-class Discimination for Compositional Zero-Shot Learning", + "track": "main", + "status": "Technical", + "abstract": "Compositional Zero-Shot Learning (CZSL) aims at identifying unseen compositions composed of previously seen attributes and objects during the test phase. In real images, the visual appearances of attributes and objects (primitive concepts) generally interact with each other. Namely, the visual appearances of an attribute may change when composed with different objects, and vice versa. But previous works overlook this important property. In this paper, we introduce a simple yet effective approach with leveraging sub-class discrimination. Specifically, we define the primitive concepts in different compositions as sub-classes, and then maintain the sub-class discrimination to address the above challenge. More specifically, inspired by the observation that the composed recognition models could account for the differences across sub-classes, we first propose to impose the embedding alignment between the composed and disentangled recognition to incorporate sub-class discrimination at the feature level. Then we develop the prototype modulator networks to adjust the class prototypes w.r.t. the composition information, which can enhance sub-class discrimination at the classifier level. We conduct extensive experiments on the challenging benchmark datasets, and the considerable performance improvement over state-of-the-art approaches is achieved, which indicates the effectiveness of our method. Our code is available at https://github.com/hxm97/SCD-CZSL.", + "primary_area": "computer vision i", + "author": "Xiaoming Hu; Zilei Wang", + "authorids": "", + "aff": "University of Science and Technology of China, Hefei, China; University of Science and Technology of China, Hefei, China", + "bibtex": "@article{Hu_Wang_2023, title={Leveraging Sub-class Discimination for Compositional Zero-Shot Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25168}, DOI={10.1609/aaai.v37i1.25168}, abstractNote={Compositional Zero-Shot Learning (CZSL) aims at identifying unseen compositions composed of previously seen attributes and objects during the test phase. In real images, the visual appearances of attributes and objects (primitive concepts) generally interact with each other. Namely, the visual appearances of an attribute may change when composed with different objects, and vice versa. But previous works overlook this important property. In this paper, we introduce a simple yet effective approach with leveraging sub-class discrimination. Specifically, we define the primitive concepts in different compositions as sub-classes, and then maintain the sub-class discrimination to address the above challenge. More specifically, inspired by the observation that the composed recognition models could account for the differences across sub-classes, we first propose to impose the embedding alignment between the composed and disentangled recognition to incorporate sub-class discrimination at the feature level. Then we develop the prototype modulator networks to adjust the class prototypes w.r.t. the composition information, which can enhance sub-class discrimination at the classifier level. We conduct extensive experiments on the challenging benchmark datasets, and the considerable performance improvement over state-of-the-art approaches is achieved, which indicates the effectiveness of our method. Our code is available at https://github.com/hxm97/SCD-CZSL.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Xiaoming and Wang, Zilei}, year={2023}, month={Jun.}, pages={890-898} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25168/24940", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25168", + "pdf_size": 953353, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12838974990813623297&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/hxm97/SCD-CZSL", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Hefei", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25343", + "title": "Leveraging Weighted Cross-Graph Attention for Visual and Semantic Enhanced Video Captioning Network", + "track": "main", + "status": "Technical", + "abstract": "Video captioning has become a broad and interesting research area. Attention-based encoder-decoder methods are extensively used for caption generation. However, these methods mostly utilize the visual attentive feature to highlight the video regions while overlooked the semantic features of the available captions. These semantic features contain significant information that helps to generate highly informative human description-like captions. Therefore, we propose a novel visual and semantic enhanced video captioning network, named as VSVCap, that efficiently utilizes multiple ground truth captions. We aim to generate captions that are visually and semantically enhanced by exploiting both video and text modalities. To achieve this, we propose a fine-grained cross-graph attention mechanism that captures detailed graph embedding correspondence between visual graphs and textual knowledge graphs. We have performed node-level matching and structure-level reasoning between the weighted regional graph and knowledge graph. The proposed network achieves promising results on three benchmark datasets, i.e., YouTube2Text, MSR-VTT, and VATEX. The experimental results show that our network accurately captures all key objects, relationships, and semantically enhanced events of a video to generate human annotation-like captions.", + "primary_area": "computer vision ii", + "author": "Deepali Verma; Arya Haldar; Tanima Dutta", + "authorids": "", + "aff": "Department of Computer Science and Engineering, IIT(BHU) Varanasi; Department of Computer Science and Engineering, IIT(BHU) Varanasi; Department of Computer Science and Engineering, IIT(BHU) Varanasi", + "bibtex": "@article{Verma_Haldar_Dutta_2023, title={Leveraging Weighted Cross-Graph Attention for Visual and Semantic Enhanced Video Captioning Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25343}, DOI={10.1609/aaai.v37i2.25343}, abstractNote={Video captioning has become a broad and interesting research area. Attention-based encoder-decoder methods are extensively used for caption generation. However, these methods mostly utilize the visual attentive feature to highlight the video regions while overlooked the semantic features of the available captions. These semantic features contain significant information that helps to generate highly informative human description-like captions. Therefore, we propose a novel visual and semantic enhanced video captioning network, named as VSVCap, that efficiently utilizes multiple ground truth captions. We aim to generate captions that are visually and semantically enhanced by exploiting both video and text modalities. To achieve this, we propose a fine-grained cross-graph attention mechanism that captures detailed graph embedding correspondence between visual graphs and textual knowledge graphs. We have performed node-level matching and structure-level reasoning between the weighted regional graph and knowledge graph. The proposed network achieves promising results on three benchmark datasets, i.e., YouTube2Text, MSR-VTT, and VATEX. The experimental results show that our network accurately captures all key objects, relationships, and semantically enhanced events of a video to generate human annotation-like captions.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Verma, Deepali and Haldar, Arya and Dutta, Tanima}, year={2023}, month={Jun.}, pages={2465-2473} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25343/25115", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25343", + "pdf_size": 402388, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8945991633724810532&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "itbhu.ac.in;itbhu.ac.in;itbhu.ac.in", + "email": "itbhu.ac.in;itbhu.ac.in;itbhu.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology (BHU)", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitbhu.ac.in", + "aff_unique_abbr": "IIT(BHU)", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Varanasi", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25429", + "title": "LidarMultiNet: Towards a Unified Multi-Task Network for LiDAR Perception", + "track": "main", + "status": "Technical", + "abstract": "LiDAR-based 3D object detection, semantic segmentation, and panoptic segmentation are usually implemented in specialized networks with distinctive architectures that are difficult to adapt to each other. This paper presents LidarMultiNet, a LiDAR-based multi-task network that unifies these three major LiDAR perception tasks. Among its many benefits, a multi-task network can reduce the overall cost by sharing weights and computation among multiple tasks. However, it typically underperforms compared to independently combined single-task models. The proposed LidarMultiNet aims to bridge the performance gap between the multi-task network and multiple single-task networks. At the core of LidarMultiNet is a strong 3D voxel-based encoder-decoder architecture with a Global Context Pooling (GCP) module extracting global contextual features from a LiDAR frame. Task-specific heads are added on top of the network to perform the three LiDAR perception tasks. More tasks can be implemented simply by adding new task-specific heads while introducing little additional cost. A second stage is also proposed to refine the first-stage segmentation and generate accurate panoptic segmentation results. LidarMultiNet is extensively tested on both Waymo Open Dataset and nuScenes dataset, demonstrating for the first time that major LiDAR perception tasks can be unified in a single strong network that is trained end-to-end and achieves state-of-the-art performance. Notably, LidarMultiNet reaches the official 1 place in the Waymo Open Dataset 3D semantic segmentation challenge 2022 with the highest mIoU and the best accuracy for most of the 22 classes on the test set, using only LiDAR points as input. It also sets the new state-of-the-art for a single model on the Waymo 3D object detection benchmark and three nuScenes benchmarks.", + "primary_area": "computer vision iii", + "author": "Dongqiangzi Ye; Zixiang Zhou; Weijia Chen; Yufei Xie; Yu Wang; Panqu Wang; Hassan Foroosh", + "authorids": "", + "aff": "TuSimple; TuSimple+University of Central Florida; TuSimple; TuSimple; TuSimple; TuSimple; University of Central Florida", + "bibtex": "@article{Ye_Zhou_Chen_Xie_Wang_Wang_Foroosh_2023, title={LidarMultiNet: Towards a Unified Multi-Task Network for LiDAR Perception}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25429}, DOI={10.1609/aaai.v37i3.25429}, abstractNote={LiDAR-based 3D object detection, semantic segmentation, and panoptic segmentation are usually implemented in specialized networks with distinctive architectures that are difficult to adapt to each other. This paper presents LidarMultiNet, a LiDAR-based multi-task network that unifies these three major LiDAR perception tasks. Among its many benefits, a multi-task network can reduce the overall cost by sharing weights and computation among multiple tasks. However, it typically underperforms compared to independently combined single-task models. The proposed LidarMultiNet aims to bridge the performance gap between the multi-task network and multiple single-task networks. At the core of LidarMultiNet is a strong 3D voxel-based encoder-decoder architecture with a Global Context Pooling (GCP) module extracting global contextual features from a LiDAR frame. Task-specific heads are added on top of the network to perform the three LiDAR perception tasks. More tasks can be implemented simply by adding new task-specific heads while introducing little additional cost. A second stage is also proposed to refine the first-stage segmentation and generate accurate panoptic segmentation results. LidarMultiNet is extensively tested on both Waymo Open Dataset and nuScenes dataset, demonstrating for the first time that major LiDAR perception tasks can be unified in a single strong network that is trained end-to-end and achieves state-of-the-art performance. Notably, LidarMultiNet reaches the official 1 place in the Waymo Open Dataset 3D semantic segmentation challenge 2022 with the highest mIoU and the best accuracy for most of the 22 classes on the test set, using only LiDAR points as input. It also sets the new state-of-the-art for a single model on the Waymo 3D object detection benchmark and three nuScenes benchmarks.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Dongqiangzi and Zhou, Zixiang and Chen, Weijia and Xie, Yufei and Wang, Yu and Wang, Panqu and Foroosh, Hassan}, year={2023}, month={Jun.}, pages={3231-3240} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25429/25201", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25429", + "pdf_size": 2085227, + "gs_citation": 93, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2159261789760932048&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0+1;0;0;0;0;1", + "aff_unique_norm": "TuSimple;University of Central Florida", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tusimple.com;https://www.ucf.edu", + "aff_unique_abbr": "TuSimple;UCF", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26292", + "title": "Lifelong Compression Mixture Model via Knowledge Relationship Graph", + "track": "main", + "status": "Technical", + "abstract": "Task-Free Continual Learning (TFCL) represents a challenging scenario for lifelong learning because the model, under this paradigm, does not access any task information. The Dynamic Expansion Model (DEM) has shown promising results in this scenario due to its scalability and generalisation power. However, DEM focuses only on addressing forgetting and ignores minimizing the model size, which limits its deployment in practical systems. In this work, we aim to simultaneously address network forgetting and model size optimization by developing the Lifelong Compression Mixture Model (LGMM) equipped with the Maximum Mean Discrepancy (MMD) based expansion criterion for model expansion. A diversity-aware sample selection approach is proposed to selectively store a variety of samples to promote information diversity among the components of the LGMM, which allows more knowledge to be captured with an appropriate model size. In order to avoid having multiple components with similar knowledge in the LGMM, we propose a data-free component discarding mechanism that evaluates a knowledge relation graph matrix describing the relevance between each pair of components. A greedy selection procedure is proposed to identify and remove the redundant components from the LGMM. The proposed discarding mechanism can be performed during or after the training. Experiments on different datasets show that LGMM achieves the best performance for TFCL.", + "primary_area": "machine learning iv", + "author": "Fei Ye; Adrian G. Bors", + "authorids": "", + "aff": "Department of Computer Science, University of York, York YO10 5GH, UK; Department of Computer Science, University of York, York YO10 5GH, UK", + "bibtex": "@article{Ye_Bors_2023, title={Lifelong Compression Mixture Model via Knowledge Relationship Graph}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26292}, DOI={10.1609/aaai.v37i9.26292}, abstractNote={Task-Free Continual Learning (TFCL) represents a challenging scenario for lifelong learning because the model, under this paradigm, does not access any task information. The Dynamic Expansion Model (DEM) has shown promising results in this scenario due to its scalability and generalisation power. However, DEM focuses only on addressing forgetting and ignores minimizing the model size, which limits its deployment in practical systems. In this work, we aim to simultaneously address network forgetting and model size optimization by developing the Lifelong Compression Mixture Model (LGMM) equipped with the Maximum Mean Discrepancy (MMD) based expansion criterion for model expansion. A diversity-aware sample selection approach is proposed to selectively store a variety of samples to promote information diversity among the components of the LGMM, which allows more knowledge to be captured with an appropriate model size. In order to avoid having multiple components with similar knowledge in the LGMM, we propose a data-free component discarding mechanism that evaluates a knowledge relation graph matrix describing the relevance between each pair of components. A greedy selection procedure is proposed to identify and remove the redundant components from the LGMM. The proposed discarding mechanism can be performed during or after the training. Experiments on different datasets show that LGMM achieves the best performance for TFCL.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Fei and Bors, Adrian G.}, year={2023}, month={Jun.}, pages={10900-10908} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26292/26064", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26292", + "pdf_size": 778129, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5738955392058389916&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "york.ac.uk;york.ac.uk", + "email": "york.ac.uk;york.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of York", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.york.ac.uk", + "aff_unique_abbr": "York", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "York", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25539", + "title": "Lifelong Embedding Learning and Transfer for Growing Knowledge Graphs", + "track": "main", + "status": "Technical", + "abstract": "Existing knowledge graph (KG) embedding models have primarily focused on static KGs. However, real-world KGs do not remain static, but rather evolve and grow in tandem with the development of KG applications. Consequently, new facts and previously unseen entities and relations continually emerge, necessitating an embedding model that can quickly learn and transfer new knowledge through growth. Motivated by this, we delve into an expanding field of KG embedding in this paper, i.e., lifelong KG embedding. We consider knowledge transfer and retention of the learning on growing snapshots of a KG without having to learn embeddings from scratch. The proposed model includes a masked KG autoencoder for embedding learning and update, with an embedding transfer strategy to inject the learned knowledge into the new entity and relation embeddings, and an embedding regularization method to avoid catastrophic forgetting. To investigate the impacts of different aspects of KG growth, we construct four datasets to evaluate the performance of lifelong KG embedding. Experimental results show that the proposed model outperforms the state-of-the-art inductive and lifelong embedding baselines.", + "primary_area": "data mining and knowledge management", + "author": "Yuanning Cui; Yuxin Wang; Zequn Sun; Wenqiang Liu; Yiqiao Jiang; Kexin Han; Wei Hu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; Interactive Entertainment Group, Tencent Inc, China; Interactive Entertainment Group, Tencent Inc, China; Interactive Entertainment Group, Tencent Inc, China; State Key Laboratory for Novel Software Technology, Nanjing University, China + National Institute of Healthcare Data Science, Nanjing University, China", + "bibtex": "@article{Cui_Wang_Sun_Liu_Jiang_Han_Hu_2023, title={Lifelong Embedding Learning and Transfer for Growing Knowledge Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25539}, DOI={10.1609/aaai.v37i4.25539}, abstractNote={Existing knowledge graph (KG) embedding models have primarily focused on static KGs. However, real-world KGs do not remain static, but rather evolve and grow in tandem with the development of KG applications. Consequently, new facts and previously unseen entities and relations continually emerge, necessitating an embedding model that can quickly learn and transfer new knowledge through growth. Motivated by this, we delve into an expanding field of KG embedding in this paper, i.e., lifelong KG embedding. We consider knowledge transfer and retention of the learning on growing snapshots of a KG without having to learn embeddings from scratch. The proposed model includes a masked KG autoencoder for embedding learning and update, with an embedding transfer strategy to inject the learned knowledge into the new entity and relation embeddings, and an embedding regularization method to avoid catastrophic forgetting. To investigate the impacts of different aspects of KG growth, we construct four datasets to evaluate the performance of lifelong KG embedding. Experimental results show that the proposed model outperforms the state-of-the-art inductive and lifelong embedding baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Yuanning and Wang, Yuxin and Sun, Zequn and Liu, Wenqiang and Jiang, Yiqiao and Han, Kexin and Hu, Wei}, year={2023}, month={Jun.}, pages={4217-4224} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25539/25311", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25539", + "pdf_size": 1364172, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14364465081081947935&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;gmail.com;tencent.com;tencent.com;tencent.com;nju.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;tencent.com;tencent.com;tencent.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;1;1;0+0", + "aff_unique_norm": "Nanjing University;Tencent Inc", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;Interactive Entertainment Group", + "aff_unique_url": "http://www.nju.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "Nanjing U;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25436", + "title": "Lifelong Person Re-identification via Knowledge Refreshing and Consolidation", + "track": "main", + "status": "Technical", + "abstract": "Lifelong person re-identification (LReID) is in significant demand for real-world development as a large amount of ReID data is captured from diverse locations over time and cannot be accessed at once inherently. However, a key challenge for LReID is how to incrementally preserve old knowledge and gradually add new capabilities to the system. Unlike most existing LReID methods, which mainly focus on dealing with catastrophic forgetting, our focus is on a more challenging problem, which is, not only trying to reduce the forgetting on old tasks but also aiming to improve the model performance on both new and old tasks during the lifelong learning process. Inspired by the biological process of human cognition where the somatosensory neocortex and the hippocampus work together in memory consolidation, we formulated a model called Knowledge Refreshing and Consolidation (KRC) that achieves both positive forward and backward transfer. More specifically, a knowledge refreshing scheme is incorporated with the knowledge rehearsal mechanism to enable bi-directional knowledge transfer by introducing a dynamic memory model and an adaptive working model. Moreover, a knowledge consolidation scheme operating on the dual space further improves model stability over the long-term. Extensive evaluations show KRC\u2019s superiority over the state-of-the-art LReID methods with challenging pedestrian benchmarks. Code is available at https://github.com/cly234/LReID-KRKC.", + "primary_area": "computer vision iii", + "author": "Chunlin Yu; Ye Shi; Zimo Liu; Shenghua Gao; Jingya Wang", + "authorids": "", + "aff": "ShanghaiTech University; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; Peng Cheng Laboratory; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "bibtex": "@article{Yu_Shi_Liu_Gao_Wang_2023, title={Lifelong Person Re-identification via Knowledge Refreshing and Consolidation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25436}, DOI={10.1609/aaai.v37i3.25436}, abstractNote={Lifelong person re-identification (LReID) is in significant demand for real-world development as a large amount of ReID data is captured from diverse locations over time and cannot be accessed at once inherently. However, a key challenge for LReID is how to incrementally preserve old knowledge and gradually add new capabilities to the system. Unlike most existing LReID methods, which mainly focus on dealing with catastrophic forgetting, our focus is on a more challenging problem, which is, not only trying to reduce the forgetting on old tasks but also aiming to improve the model performance on both new and old tasks during the lifelong learning process. Inspired by the biological process of human cognition where the somatosensory neocortex and the hippocampus work together in memory consolidation, we formulated a model called Knowledge Refreshing and Consolidation (KRC) that achieves both positive forward and backward transfer. More specifically, a knowledge refreshing scheme is incorporated with the knowledge rehearsal mechanism to enable bi-directional knowledge transfer by introducing a dynamic memory model and an adaptive working model. Moreover, a knowledge consolidation scheme operating on the dual space further improves model stability over the long-term. Extensive evaluations show KRC\u2019s superiority over the state-of-the-art LReID methods with challenging pedestrian benchmarks. Code is available at https://github.com/cly234/LReID-KRKC.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Chunlin and Shi, Ye and Liu, Zimo and Gao, Shenghua and Wang, Jingya}, year={2023}, month={Jun.}, pages={3295-3303} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25436/25208", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25436", + "pdf_size": 261913, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12295986642706346557&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;pcl.ac.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;pcl.ac.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", + "github": "https://github.com/cly234/LReID-KRKC", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;0+1;0+1", + "aff_unique_norm": "ShanghaiTech University;Shanghai Engineering Research Center of Intelligent Vision and Imaging;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.shanghaitech.edu.cn;;http://www.pcl.ac.cn", + "aff_unique_abbr": "ShanghaiTech;;PCL", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26293", + "title": "Lifelong Variational Autoencoder via Online Adversarial Expansion Strategy", + "track": "main", + "status": "Technical", + "abstract": "The Variational Autoencoder (VAE) suffers from a significant loss of information when trained on a non-stationary data distribution. This loss in VAE models, called catastrophic forgetting, has not been studied theoretically before. We analyse the forgetting behaviour of a VAE in continual generative modelling by developing a new lower bound on the data likelihood, which interprets the forgetting process as an increase in the probability distance between the generator's distribution and the evolved data distribution. The proposed bound shows that a VAE-based dynamic expansion model can achieve better performance if its capacity increases appropriately considering the shift in the data distribution. Based on this analysis, we propose a novel expansion criterion that aims to preserve the information diversity among the VAE components, while ensuring that it acquires more knowledge with fewer parameters. Specifically, we implement this expansion criterion from the perspective of a multi-player game and propose the Online Adversarial Expansion Strategy (OAES), which considers all previously learned components as well as the currently updated component as multiple players in a game, while an adversary model evaluates their performance. The proposed OAES can dynamically estimate the discrepancy between each player and the adversary without accessing task information. This leads to the gradual addition of new components while ensuring the knowledge diversity among all of them. We show theoretically and empirically that the proposed extension strategy can enable a VAE model to achieve the best performance given an appropriate model size.", + "primary_area": "machine learning iv", + "author": "Fei Ye; Adrian G. Bors", + "authorids": "", + "aff": "Department of Computer Science, University of York, York YO10 5GH, UK; Department of Computer Science, University of York, York YO10 5GH, UK", + "bibtex": "@article{Ye_Bors_2023, title={Lifelong Variational Autoencoder via Online Adversarial Expansion Strategy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26293}, DOI={10.1609/aaai.v37i9.26293}, abstractNote={The Variational Autoencoder (VAE) suffers from a significant loss of information when trained on a non-stationary data distribution. This loss in VAE models, called catastrophic forgetting, has not been studied theoretically before. We analyse the forgetting behaviour of a VAE in continual generative modelling by developing a new lower bound on the data likelihood, which interprets the forgetting process as an increase in the probability distance between the generator\u2019s distribution and the evolved data distribution. The proposed bound shows that a VAE-based dynamic expansion model can achieve better performance if its capacity increases appropriately considering the shift in the data distribution. Based on this analysis, we propose a novel expansion criterion that aims to preserve the information diversity among the VAE components, while ensuring that it acquires more knowledge with fewer parameters. Specifically, we implement this expansion criterion from the perspective of a multi-player game and propose the Online Adversarial Expansion Strategy (OAES), which considers all previously learned components as well as the currently updated component as multiple players in a game, while an adversary model evaluates their performance. The proposed OAES can dynamically estimate the discrepancy between each player and the adversary without accessing task information. This leads to the gradual addition of new components while ensuring the knowledge diversity among all of them. We show theoretically and empirically that the proposed extension strategy can enable a VAE model to achieve the best performance given an appropriate model size.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Fei and Bors, Adrian G.}, year={2023}, month={Jun.}, pages={10909-10917} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26293/26065", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26293", + "pdf_size": 488363, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14365150414328593901&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 7, + "aff_domain": "york.ac.uk;york.ac.uk", + "email": "york.ac.uk;york.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of York", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.york.ac.uk", + "aff_unique_abbr": "York", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "York", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26449", + "title": "Lifted Inference with Linear Order Axiom", + "track": "main", + "status": "Technical", + "abstract": "We consider the task of weighted first-order model counting (WFOMC) used for probabilistic inference in the area of statistical relational learning. Given a formula \u03c6, domain size n and a pair of weight functions, what is the weighted sum of all models of \u03c6 over a domain of size n? It was shown that computing WFOMC of any logical sentence with at most two logical variables can be done in time polynomial in n. However, it was also shown that the task is #P1-complete once we add the third variable, which inspired the search for extensions of the two-variable fragment that would still permit a running time polynomial in n. One of such extension is the two-variable fragment with counting quantifiers. In this paper, we prove that adding a linear order axiom (which forces one of the predicates in \u03c6 to introduce a linear ordering of the domain elements in each model of \u03c6) on top of the counting quantifiers still permits a computation time polynomial in the domain size. We present a new dynamic programming-based algorithm which can compute WFOMC with linear order in time polynomial in n, thus proving our primary claim.", + "primary_area": "reasoning under uncertainty", + "author": "Jan T\u00f3th; Ond\u0159ej Ku\u017eelka", + "authorids": "", + "aff": "Faculty of Electrical Engineering, Czech Technical University in Prague, Prague, Czech Republic; Faculty of Electrical Engineering, Czech Technical University in Prague, Prague, Czech Republic", + "bibtex": "@article{T\u00f3th_Ku\u017eelka_2023, title={Lifted Inference with Linear Order Axiom}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26449}, DOI={10.1609/aaai.v37i10.26449}, abstractNote={We consider the task of weighted first-order model counting (WFOMC) used for probabilistic inference in the area of statistical relational learning. Given a formula \u03c6, domain size n and a pair of weight functions, what is the weighted sum of all models of \u03c6 over a domain of size n? It was shown that computing WFOMC of any logical sentence with at most two logical variables can be done in time polynomial in n. However, it was also shown that the task is #P1-complete once we add the third variable, which inspired the search for extensions of the two-variable fragment that would still permit a running time polynomial in n. One of such extension is the two-variable fragment with counting quantifiers. In this paper, we prove that adding a linear order axiom (which forces one of the predicates in \u03c6 to introduce a linear ordering of the domain elements in each model of \u03c6) on top of the counting quantifiers still permits a computation time polynomial in the domain size. We present a new dynamic programming-based algorithm which can compute WFOMC with linear order in time polynomial in n, thus proving our primary claim.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={T\u00f3th, Jan and Ku\u017eelka, Ond\u0159ej}, year={2023}, month={Jun.}, pages={12295-12304} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26449/26221", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26449", + "pdf_size": 215661, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17643040629591739949&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "fel.cvut.cz;fel.cvut.cz", + "email": "fel.cvut.cz;fel.cvut.cz", + "github": "", + "project": "https://arxiv.org/abs/2211.01164", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Czech Technical University in Prague", + "aff_unique_dep": "Faculty of Electrical Engineering", + "aff_unique_url": "https://www.cvut.cz", + "aff_unique_abbr": "CTU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Prague", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Czech Republic" + }, + { + "id": "article-25504", + "title": "Lifting (D)QBF Preprocessing and Solving Techniques to (D)SSAT", + "track": "main", + "status": "Technical", + "abstract": "Dependency stochastic Boolean satisfiability (DSSAT) generalizes stochastic Boolean satisfiability (SSAT) in existential variables being Henkinized allowing their dependencies on randomized variables to be explicitly specified. It allows NEXPTIME problems of reasoning under uncertainty and partial information to be compactly encoded. To date, no decision procedure has been implemented for solving DSSAT formulas. This work provides the first such tool by converting DSSAT into SSAT with dependency elimination, similar to converting dependency quantified Boolean formula (DQBF) to quantified Boolean formula (QBF). Moreover, we extend (D)QBF preprocessing techniques and implement the first standalone (D)SSAT preprocessor. Experimental results show that solving DSSAT via dependency elimination is highly applicable and that existing SSAT solvers may benefit from preprocessing.", + "primary_area": "constraint satisfaction and optimization", + "author": "Che Cheng; Jie-Hong R. Jiang", + "authorids": "", + "aff": "Graduate Institute of Electronics Engineering, National Taiwan University, Taipei, Taiwan + Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan; Graduate Institute of Electronics Engineering, National Taiwan University, Taipei, Taiwan + Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", + "bibtex": "@article{Cheng_Jiang_2023, title={Lifting (D)QBF Preprocessing and Solving Techniques to (D)SSAT}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25504}, DOI={10.1609/aaai.v37i4.25504}, abstractNote={Dependency stochastic Boolean satisfiability (DSSAT) generalizes stochastic Boolean satisfiability (SSAT) in existential variables being Henkinized allowing their dependencies on randomized variables to be explicitly specified. It allows NEXPTIME problems of reasoning under uncertainty and partial information to be compactly encoded. To date, no decision procedure has been implemented for solving DSSAT formulas. This work provides the first such tool by converting DSSAT into SSAT with dependency elimination, similar to converting dependency quantified Boolean formula (DQBF) to quantified Boolean formula (QBF). Moreover, we extend (D)QBF preprocessing techniques and implement the first standalone (D)SSAT preprocessor. Experimental results show that solving DSSAT via dependency elimination is highly applicable and that existing SSAT solvers may benefit from preprocessing.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Che and Jiang, Jie-Hong R.}, year={2023}, month={Jun.}, pages={3906-3914} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25504/25276", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25504", + "pdf_size": 175146, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16474104342833814752&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ntu.edu.tw;ntu.edu.tw", + "email": "ntu.edu.tw;ntu.edu.tw", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "Graduate Institute of Electronics Engineering", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Taipei", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26946", + "title": "Lightweight Transformer for Multi-Modal Object Detection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "It has become a common practice for many perceptual systems to integrate information from multiple sensors to improve the accuracy of object detection. For example, autonomous vehicles use visible light, and infrared (IR) information to ensure that the car can cope with complex weather conditions. However, the accuracy of the algorithm is usually a trade-off between the computational complexity and memory consumption. In this study, we evaluate the performance and complexity of different fusion operators in multi-modal object detection tasks. On top of that, a Poolformer-based fusion operator (PoolFuser) is proposed to enhance the accuracy of detecting targets without compromising the efficiency of the detection framework.", + "primary_area": "", + "author": "Yue Cao; Yanshuo Fan; Junchi Bin; Zheng Liu", + "authorids": "", + "aff": "School of Engineering, The University of British Columbia, Kelowna, BC, Canada; School of Engineering, The University of British Columbia, Kelowna, BC, Canada; School of Engineering, The University of British Columbia, Kelowna, BC, Canada; School of Engineering, The University of British Columbia, Kelowna, BC, Canada", + "bibtex": "@article{Cao_Fan_Bin_Liu_2024, title={Lightweight Transformer for Multi-Modal Object Detection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26946}, DOI={10.1609/aaai.v37i13.26946}, abstractNote={It has become a common practice for many perceptual systems to integrate information from multiple sensors to improve the accuracy of object detection. For example, autonomous vehicles use visible light, and infrared (IR) information to ensure that the car can cope with complex weather conditions. However, the accuracy of the algorithm is usually a trade-off between the computational complexity and memory consumption. In this study, we evaluate the performance and complexity of different fusion operators in multi-modal object detection tasks. On top of that, a Poolformer-based fusion operator (PoolFuser) is proposed to enhance the accuracy of detecting targets without compromising the efficiency of the detection framework.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Yue and Fan, Yanshuo and Bin, Junchi and Liu, Zheng}, year={2024}, month={Jul.}, pages={16172-16173} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26946/26718", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26946", + "pdf_size": 1085656, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=923218022841364302&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ubc.ca; ; ;ubc.ca", + "email": "mail.ubc.ca; ; ;ubc.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The University of British Columbia", + "aff_unique_dep": "School of Engineering", + "aff_unique_url": "https://www.ubc.ca", + "aff_unique_abbr": "UBC", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Kelowna", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26194", + "title": "Linear Regularizers Enforce the Strict Saddle Property", + "track": "main", + "status": "Technical", + "abstract": "Satisfaction of the strict saddle property has become a standard assumption in non-convex optimization, and it ensures that many first-order optimization\nalgorithms will almost always escape saddle points. However, functions exist in machine learning that do not satisfy this property, such as the loss function of a neural network with at least two hidden layers. First-order methods such as gradient descent may converge to non-strict saddle points of such functions, and there do not currently exist any first-order methods that reliably escape non-strict saddle points. To address this need, we demonstrate that regularizing a function with a linear term enforces the strict saddle property, and we provide justification for only regularizing locally, i.e., when the norm of the gradient falls below a certain threshold. We analyze bifurcations that may result from this form of regularization, and then we provide a selection rule for regularizers that depends only on the gradient of an objective function. This rule is shown to guarantee that gradient descent will escape the neighborhoods around a broad class of non-strict saddle points, and this behavior is demonstrated on numerical examples of non-strict saddle points common in the optimization literature.", + "primary_area": "machine learning iii", + "author": "Matthew Ubl; Matthew Hale; Kasra Yazdani", + "authorids": "", + "aff": "Department of Mechanical and Aerospace Engineering, University of Florida, Gainesville, FL, 32611, USA; Department of Mechanical and Aerospace Engineering, University of Florida, Gainesville, FL, 32611, USA; Department of Mechanical and Aerospace Engineering, University of Florida, Gainesville, FL, 32611, USA", + "bibtex": "@article{Ubl_Hale_Yazdani_2023, title={Linear Regularizers Enforce the Strict Saddle Property}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26194}, DOI={10.1609/aaai.v37i8.26194}, abstractNote={Satisfaction of the strict saddle property has become a standard assumption in non-convex optimization, and it ensures that many first-order optimization\nalgorithms will almost always escape saddle points. However, functions exist in machine learning that do not satisfy this property, such as the loss function of a neural network with at least two hidden layers. First-order methods such as gradient descent may converge to non-strict saddle points of such functions, and there do not currently exist any first-order methods that reliably escape non-strict saddle points. To address this need, we demonstrate that regularizing a function with a linear term enforces the strict saddle property, and we provide justification for only regularizing locally, i.e., when the norm of the gradient falls below a certain threshold. We analyze bifurcations that may result from this form of regularization, and then we provide a selection rule for regularizers that depends only on the gradient of an objective function. This rule is shown to guarantee that gradient descent will escape the neighborhoods around a broad class of non-strict saddle points, and this behavior is demonstrated on numerical examples of non-strict saddle points common in the optimization literature.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ubl, Matthew and Hale, Matthew and Yazdani, Kasra}, year={2023}, month={Jun.}, pages={10017-10024} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26194/25966", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26194", + "pdf_size": 488965, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1811485659560190742&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ufl.edu;ufl.edu;ufl.edu", + "email": "ufl.edu;ufl.edu;ufl.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Florida", + "aff_unique_dep": "Department of Mechanical and Aerospace Engineering", + "aff_unique_url": "https://www.ufl.edu", + "aff_unique_abbr": "UF", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Gainesville", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25209", + "title": "Linking People across Text and Images Based on Social Relation Reasoning", + "track": "main", + "status": "Technical", + "abstract": "As a sub-task of visual grounding, linking people across text and images aims to localize target people in images with corresponding sentences. Existing approaches tend to capture superficial features of people (e.g., dress and location) that suffer from the incompleteness information across text and images. We observe that humans are adept at exploring social relations to assist identifying people. Therefore, we propose a Social Relation Reasoning (SRR) model to address the aforementioned issues. Firstly, we design a Social Relation Extraction (SRE) module to extract social relations between people in the input sentence. Specially, the SRE module based on zero-shot learning is able to extract social relations even though they are not defined in the existing datasets. A Reasoning based Cross-modal Matching (RCM) module is further used to generate matching matrices by reasoning on the social relations and visual features. Experimental results show that the accuracy of our proposed SRR model outperforms the state-of-the-art models on the challenging datasets Who's Waldo and FL: MSRE, by more than 5\\% and 7\\%, respectively. Our source code is available at https://github.com/VILAN-Lab/SRR.", + "primary_area": "computer vision i", + "author": "Yang Lei; Peizhi Zhao; Pijian Li; Yi Cai; Qingbao Huang", + "authorids": "", + "aff": "School of Electrical Engineering, Guangxi University, Nanning, China; School of Electrical Engineering, Guangxi University, Nanning, China; School of Electrical Engineering, Guangxi University, Nanning, China; School of Software Engineering, South China University of Technology, Guangzhou, China + Key Laboratory of Big Data and Intelligent Robot (SCUT), MOE of China + Peng Cheng Laboratory, Shenzhen, China; School of Electrical Engineering, Guangxi University, Nanning, China + Guangxi Key Laboratory of Multimedia Communications and Network Technology", + "bibtex": "@article{Lei_Zhao_Li_Cai_Huang_2023, title={Linking People across Text and Images Based on Social Relation Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25209}, DOI={10.1609/aaai.v37i1.25209}, abstractNote={As a sub-task of visual grounding, linking people across text and images aims to localize target people in images with corresponding sentences. Existing approaches tend to capture superficial features of people (e.g., dress and location) that suffer from the incompleteness information across text and images. We observe that humans are adept at exploring social relations to assist identifying people. Therefore, we propose a Social Relation Reasoning (SRR) model to address the aforementioned issues. Firstly, we design a Social Relation Extraction (SRE) module to extract social relations between people in the input sentence. Specially, the SRE module based on zero-shot learning is able to extract social relations even though they are not defined in the existing datasets. A Reasoning based Cross-modal Matching (RCM) module is further used to generate matching matrices by reasoning on the social relations and visual features. Experimental results show that the accuracy of our proposed SRR model outperforms the state-of-the-art models on the challenging datasets Who\u2019s Waldo and FL: MSRE, by more than 5\\% and 7\\%, respectively. Our source code is available at https://github.com/VILAN-Lab/SRR.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lei, Yang and Zhao, Peizhi and Li, Pijian and Cai, Yi and Huang, Qingbao}, year={2023}, month={Jun.}, pages={1260-1268} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25209/24981", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25209", + "pdf_size": 2464107, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:BlKn13RojJAJ:scholar.google.com/&scioq=Linking+People+across+Text+and+Images+Based+on+Social+Relation+Reasoning&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "st.gxu.edu.cn;st.gxu.edu.cn;st.gxu.edu.cn;scut.edu.cn;gxu.edu.cn", + "email": "st.gxu.edu.cn;st.gxu.edu.cn;st.gxu.edu.cn;scut.edu.cn;gxu.edu.cn", + "github": "https://github.com/VILAN-Lab/SRR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+1+2;0+0", + "aff_unique_norm": "Guangxi University;South China University of Technology;Peng Cheng Laboratory", + "aff_unique_dep": "School of Electrical Engineering;School of Software Engineering;", + "aff_unique_url": "http://www.gxu.edu.cn;https://www.scut.edu.cn;", + "aff_unique_abbr": ";SCUT;", + "aff_campus_unique_index": "0;0;0;1+3;0", + "aff_campus_unique": "Nanning;Guangzhou;;Shenzhen", + "aff_country_unique_index": "0;0;0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26314", + "title": "Linking Sketch Patches by Learning Synonymous Proximity for Graphic Sketch Representation", + "track": "main", + "status": "Technical", + "abstract": "Graphic sketch representations are effective for representing sketches. Existing methods take the patches cropped from sketches as the graph nodes, and construct the edges based on sketch's drawing order or Euclidean distances on the canvas. However, the drawing order of a sketch may not be unique, while the patches from semantically related parts of a sketch may be far away from each other on the canvas. In this paper, we propose an order-invariant, semantics-aware method for graphic sketch representations. The cropped sketch patches are linked according to their global semantics or local geometric shapes, namely the synonymous proximity, by computing the cosine similarity between the captured patch embeddings. Such constructed edges are learnable to adapt to the variation of sketch drawings, which enable the message passing among synonymous patches. Aggregating the messages from synonymous patches by graph convolutional networks plays a role of denoising, which is beneficial to produce robust patch embeddings and accurate sketch representations. Furthermore, we enforce a clustering constraint over the embeddings jointly with the network learning. The synonymous patches are self-organized as compact clusters, and their embeddings are guided to move towards their assigned cluster centroids. It raises the accuracy of the computed synonymous proximity. Experimental results show that our method significantly improves the performance on both controllable sketch synthesis and sketch healing.", + "primary_area": "machine learning iv", + "author": "Sicong Zang; Shikui Tu; Lei Xu", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China", + "bibtex": "@article{Zang_Tu_Xu_2023, title={Linking Sketch Patches by Learning Synonymous Proximity for Graphic Sketch Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26314}, DOI={10.1609/aaai.v37i9.26314}, abstractNote={Graphic sketch representations are effective for representing sketches. Existing methods take the patches cropped from sketches as the graph nodes, and construct the edges based on sketch\u2019s drawing order or Euclidean distances on the canvas. However, the drawing order of a sketch may not be unique, while the patches from semantically related parts of a sketch may be far away from each other on the canvas. In this paper, we propose an order-invariant, semantics-aware method for graphic sketch representations. The cropped sketch patches are linked according to their global semantics or local geometric shapes, namely the synonymous proximity, by computing the cosine similarity between the captured patch embeddings. Such constructed edges are learnable to adapt to the variation of sketch drawings, which enable the message passing among synonymous patches. Aggregating the messages from synonymous patches by graph convolutional networks plays a role of denoising, which is beneficial to produce robust patch embeddings and accurate sketch representations. Furthermore, we enforce a clustering constraint over the embeddings jointly with the network learning. The synonymous patches are self-organized as compact clusters, and their embeddings are guided to move towards their assigned cluster centroids. It raises the accuracy of the computed synonymous proximity. Experimental results show that our method significantly improves the performance on both controllable sketch synthesis and sketch healing.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zang, Sicong and Tu, Shikui and Xu, Lei}, year={2023}, month={Jun.}, pages={11096-11103} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26314/26086", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26314", + "pdf_size": 932453, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14507140114867186469&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26906", + "title": "Literacy and STEM Teachers Adapt AI Ethics Curriculum", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "This article examines the ways secondary computer science and English Language Arts teachers in urban, suburban, and semi-rural schools adapted a project-based AI ethics curriculum to make it better fit their local contexts. AI ethics is an urgent topic with tangible consequences for youths\u2019 current and future lives, but one that is rarely taught in schools. Few teachers have formal training in this area as it is an emerging field even at the university level. Exploring AI ethics involves examining biases related to race, gender, and social class, a challenging task for all teachers, and an unfamiliar one for most computer science teachers. It also requires teaching technical content which falls outside the comfort zone of most humanities teachers. Although none of our partner teachers had previously taught an AI ethics project, this study demonstrates that their expertise and experience in other domains played an essential role in providing high quality instruction. Teachers designed and redesigned tasks and incorporated texts and apps to ensure the AI ethics project would adhere to district and department level requirements; they led equity-focused inquiry in a way that both protected vulnerable students and accounted for local cultures and politics; and they adjusted technical content and developed hands-on computer science experiences to better challenge and engage their students. We use Mishra and Kohler\u2019s TPACK framework to highlight the ways teachers leveraged their own expertise in some areas, while relying on materials and support from our research team in others, to create stronger learning experiences.", + "primary_area": "", + "author": "Benjamin Walsh; Bridget Dalton; Stacey Forsyth; Tom Yeh", + "authorids": "", + "aff": "Department of Computer Science, University of Colorado Boulder; School of Education, University of Colorado Boulder; Science Discovery, University of Colorado Boulder; Department of Computer Science, University of Colorado Boulder", + "bibtex": "@article{Walsh_Dalton_Forsyth_Yeh_2024, title={Literacy and STEM Teachers Adapt AI Ethics Curriculum}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26906}, DOI={10.1609/aaai.v37i13.26906}, abstractNote={This article examines the ways secondary computer science and English Language Arts teachers in urban, suburban, and semi-rural schools adapted a project-based AI ethics curriculum to make it better fit their local contexts. AI ethics is an urgent topic with tangible consequences for youths\u2019 current and future lives, but one that is rarely taught in schools. Few teachers have formal training in this area as it is an emerging field even at the university level. Exploring AI ethics involves examining biases related to race, gender, and social class, a challenging task for all teachers, and an unfamiliar one for most computer science teachers. It also requires teaching technical content which falls outside the comfort zone of most humanities teachers. Although none of our partner teachers had previously taught an AI ethics project, this study demonstrates that their expertise and experience in other domains played an essential role in providing high quality instruction. Teachers designed and redesigned tasks and incorporated texts and apps to ensure the AI ethics project would adhere to district and department level requirements; they led equity-focused inquiry in a way that both protected vulnerable students and accounted for local cultures and politics; and they adjusted technical content and developed hands-on computer science experiences to better challenge and engage their students. We use Mishra and Kohler\u2019s TPACK framework to highlight the ways teachers leveraged their own expertise in some areas, while relying on materials and support from our research team in others, to create stronger learning experiences.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Walsh, Benjamin and Dalton, Bridget and Forsyth, Stacey and Yeh, Tom}, year={2024}, month={Jul.}, pages={16048-16055} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26906/26678", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26906", + "pdf_size": 619407, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7123380006440368803&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "colorado.edu;colorado.edu;colorado.edu;colorado.edu", + "email": "colorado.edu;colorado.edu;colorado.edu;colorado.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Colorado Boulder", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.colorado.edu", + "aff_unique_abbr": "CU Boulder", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Boulder", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26014", + "title": "LoNe Sampler: Graph Node Embeddings by Coordinated Local Neighborhood Sampling", + "track": "main", + "status": "Technical", + "abstract": "Local graph neighborhood sampling is a fundamental computational problem that is at the heart of algorithms for node representation learning. \nSeveral works have presented algorithms for learning discrete node embeddings where graph nodes are represented by discrete features such as attributes of neighborhood nodes. Discrete embeddings offer several advantages compared to continuous word2vec-like node embeddings: ease of computation, scalability, and interpretability. We present LoNe Sampler, a suite of algorithms for generating discrete node embeddings by Local Neighborhood Sampling, and address two shortcomings of previous work. First, our algorithms have rigorously understood theoretical properties. Second, we show how to generate approximate explicit vector maps that avoid the expensive computation of a Gram matrix for the training of a kernel model. Experiments on benchmark datasets confirm the theoretical findings and demonstrate the advantages of the proposed methods.", + "primary_area": "machine learning ii", + "author": "Konstantin Kutzkov", + "authorids": "", + "aff": "Teva Pharmaceuticals", + "bibtex": "@article{Kutzkov_2023, title={LoNe Sampler: Graph Node Embeddings by Coordinated Local Neighborhood Sampling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26014}, DOI={10.1609/aaai.v37i7.26014}, abstractNote={Local graph neighborhood sampling is a fundamental computational problem that is at the heart of algorithms for node representation learning. Several works have presented algorithms for learning discrete node embeddings where graph nodes are represented by discrete features such as attributes of neighborhood nodes. Discrete embeddings offer several advantages compared to continuous word2vec-like node embeddings: ease of computation, scalability, and interpretability. We present LoNe Sampler, a suite of algorithms for generating discrete node embeddings by Local Neighborhood Sampling, and address two shortcomings of previous work. First, our algorithms have rigorously understood theoretical properties. Second, we show how to generate approximate explicit vector maps that avoid the expensive computation of a Gram matrix for the training of a kernel model. Experiments on benchmark datasets confirm the theoretical findings and demonstrate the advantages of the proposed methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kutzkov, Konstantin}, year={2023}, month={Jun.}, pages={8413-8420} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26014/25786", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26014", + "pdf_size": 229553, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5386107479980648453&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com", + "email": "gmail.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Teva Pharmaceuticals", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tevapharm.com", + "aff_unique_abbr": "Teva", + "aff_country_unique_index": "0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25675", + "title": "Loan Fraud Users Detection in Online Lending Leveraging Multiple Data Views", + "track": "main", + "status": "Technical", + "abstract": "In recent years, online lending platforms have been becoming attractive for micro-financing and popular in financial industries. However, such online lending platforms face a high risk of failure due to the lack of expertise on borrowers' creditworthness. Thus, risk forecasting is important to avoid economic loss. Detecting loan fraud users in advance is at the heart of risk forecasting. The purpose of fraud user (borrower) detection is to predict whether one user will fail to make required payments in the future. Detecting fraud users depend on historical loan records. However, a large proportion of users lack such information, especially for new users. In this paper, we attempt to detect loan fraud users from cross domain heterogeneous data views, including user attributes, installed app lists, app installation behaviors, and app-in logs, which compensate for the lack of historical loan records. However, it is difficult to effectively fuse the multiple heterogeneous data views. Moreover, some samples miss one or even more data views, increasing the difficulty in fusion. To address the challenges, we propose a novel end-to-end deep multiview learning approach, which encodes heterogeneous data views into homogeneous ones, generates the missing views based on the learned relationship among all the views, and then fuses all the views together to a comprehensive view for identifying fraud users. Our model is evaluated on a real-world large-scale dataset consisting of 401,978 loan records of 228,117 users from January 1, 2019, to September 30, 2019, achieving the state-of-the-art performance.", + "primary_area": "domain s of application", + "author": "Sha Zhao; Yongrui Huang; Ling Chen; Chunping Wang; Shijian Li; Lei Chen; Gang Pan", + "authorids": "", + "aff": "Department of Computer Science, Zhejiang University, Hangzhou 310027, China+State Key Laboratory of Brain Machine Intelligence, Zhejiang University, Hangzhou 311121, China; Department of Computer Science, Zhejiang University, Hangzhou 310027, China+State Key Laboratory of Brain Machine Intelligence, Zhejiang University, Hangzhou 311121, China; FinVolution Group (FINV), Shanghai 201203, China; FinVolution Group (FINV), Shanghai 201203, China; Department of Computer Science, Zhejiang University, Hangzhou 310027, China+State Key Laboratory of Brain Machine Intelligence, Zhejiang University, Hangzhou 311121, China; FinVolution Group (FINV), Shanghai 201203, China; Department of Computer Science, Zhejiang University, Hangzhou 310027, China+State Key Laboratory of Brain Machine Intelligence, Zhejiang University, Hangzhou 311121, China", + "bibtex": "@article{Zhao_Huang_Chen_Wang_Li_Chen_Pan_2023, title={Loan Fraud Users Detection in Online Lending Leveraging Multiple Data Views}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25675}, DOI={10.1609/aaai.v37i4.25675}, abstractNote={In recent years, online lending platforms have been becoming attractive for micro-financing and popular in financial industries. However, such online lending platforms face a high risk of failure due to the lack of expertise on borrowers\u2019 creditworthness. Thus, risk forecasting is important to avoid economic loss. Detecting loan fraud users in advance is at the heart of risk forecasting. The purpose of fraud user (borrower) detection is to predict whether one user will fail to make required payments in the future. Detecting fraud users depend on historical loan records. However, a large proportion of users lack such information, especially for new users. In this paper, we attempt to detect loan fraud users from cross domain heterogeneous data views, including user attributes, installed app lists, app installation behaviors, and app-in logs, which compensate for the lack of historical loan records. However, it is difficult to effectively fuse the multiple heterogeneous data views. Moreover, some samples miss one or even more data views, increasing the difficulty in fusion. To address the challenges, we propose a novel end-to-end deep multiview learning approach, which encodes heterogeneous data views into homogeneous ones, generates the missing views based on the learned relationship among all the views, and then fuses all the views together to a comprehensive view for identifying fraud users. Our model is evaluated on a real-world large-scale dataset consisting of 401,978 loan records of 228,117 users from January 1, 2019, to September 30, 2019, achieving the state-of-the-art performance.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Sha and Huang, Yongrui and Chen, Ling and Wang, Chunping and Li, Shijian and Chen, Lei and Pan, Gang}, year={2023}, month={Jun.}, pages={5428-5436} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25675/25447", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25675", + "pdf_size": 294760, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13228457278828353313&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "zju.edu.cn;zju.edu.cn;xinye.com;xinye.com;zju.edu.cn;xinye.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;xinye.com;xinye.com;zju.edu.cn;xinye.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;0+0;1;1;0+0;1;0+0", + "aff_unique_norm": "Zhejiang University;FinVolution Group", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "http://www.zju.edu.cn;", + "aff_unique_abbr": "ZJU;FINV", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Hangzhou;", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26081", + "title": "Local Explanations for Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Many works in explainable AI have focused on explaining black-box classification models. Explaining deep reinforcement learning (RL) policies in a manner that could be understood by domain users has received much less attention. In this paper, we propose a novel perspective to understanding RL policies based on identifying important states from automatically learned meta-states. The key conceptual difference between our approach and many previous ones is that we form meta-states based on locality governed by the expert policy dynamics rather than based on similarity of actions, and that we do not assume any particular knowledge of the underlying topology of the state space. Theoretically, we show that our algorithm to find meta-states converges and the objective that selects important states from each meta-state is submodular leading to efficient high quality greedy selection. Experiments on four domains (four rooms, door-key, minipacman, and pong) and a carefully conducted user study illustrate that our perspective leads to better understanding of the policy. We conjecture that this is a result of our meta-states being more intuitive in that the corresponding important states are strong indicators of tractable intermediate goals that are easier for humans to interpret and follow.", + "primary_area": "machine learning ii", + "author": "Ronny Luss; Amit Dhurandhar; Miao Liu", + "authorids": "", + "aff": "IBM Research, Yorktown Heights, NY; IBM Research, Yorktown Heights, NY; IBM Research, Yorktown Heights, NY", + "bibtex": "@article{Luss_Dhurandhar_Liu_2023, title={Local Explanations for Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26081}, DOI={10.1609/aaai.v37i7.26081}, abstractNote={Many works in explainable AI have focused on explaining black-box classification models. Explaining deep reinforcement learning (RL) policies in a manner that could be understood by domain users has received much less attention. In this paper, we propose a novel perspective to understanding RL policies based on identifying important states from automatically learned meta-states. The key conceptual difference between our approach and many previous ones is that we form meta-states based on locality governed by the expert policy dynamics rather than based on similarity of actions, and that we do not assume any particular knowledge of the underlying topology of the state space. Theoretically, we show that our algorithm to find meta-states converges and the objective that selects important states from each meta-state is submodular leading to efficient high quality greedy selection. Experiments on four domains (four rooms, door-key, minipacman, and pong) and a carefully conducted user study illustrate that our perspective leads to better understanding of the policy. We conjecture that this is a result of our meta-states being more intuitive in that the corresponding important states are strong indicators of tractable intermediate goals that are easier for humans to interpret and follow.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luss, Ronny and Dhurandhar, Amit and Liu, Miao}, year={2023}, month={Jun.}, pages={9002-9010} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26081/25853", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26081", + "pdf_size": 1028696, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9401960144054037985&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "us.ibm.com;us.ibm.com;ibm.com", + "email": "us.ibm.com;us.ibm.com;ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Yorktown Heights", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25935", + "title": "Local Intrinsic Dimensional Entropy", + "track": "main", + "status": "Technical", + "abstract": "Most entropy measures depend on the spread of the probability distribution over the sample space |X|, and the maximum entropy achievable scales proportionately with the sample space cardinality |X|. For a finite |X|, this yields robust entropy measures which satisfy many important properties, such as invariance to bijections, while the same is not true for continuous spaces (where |X|=infinity). Furthermore, since R and R^d (d in Z+) have the same cardinality (from Cantor's correspondence argument), cardinality-dependent entropy measures cannot encode the data dimensionality. In this work, we question the role of cardinality and distribution spread in defining entropy measures for continuous spaces, which can undergo multiple rounds of transformations and distortions, e.g., in neural networks. We find that the average value of the local intrinsic dimension of a distribution, denoted as ID-Entropy, can serve as a robust entropy measure for continuous spaces, while capturing the data dimensionality. We find that ID-Entropy satisfies many desirable properties and can be extended to conditional entropy, joint entropy and mutual-information variants. ID-Entropy also yields new information bottleneck principles and also links to causality. In the context of deep learning, for feedforward architectures, we show, theoretically and empirically, that the ID-Entropy of a hidden layer directly controls the generalization gap for both classifiers and auto-encoders, when the target function is Lipschitz continuous. Our work primarily shows that, for continuous spaces, taking a structural rather than a statistical approach yields entropy measures which preserve intrinsic data dimensionality, while being relevant for studying various architectures.", + "primary_area": "machine learning i", + "author": "Rohan Ghosh; Mehul Motani", + "authorids": "", + "aff": "College of Design and Engineering, Department of Electrical and Computer Engineering, National University of Singapore; N.1 Institute for Health, Institute for Digital Medicine (WisDM), Institute of Data Science, National University of Singapore", + "bibtex": "@article{Ghosh_Motani_2023, title={Local Intrinsic Dimensional Entropy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25935}, DOI={10.1609/aaai.v37i6.25935}, abstractNote={Most entropy measures depend on the spread of the probability distribution over the sample space |X|, and the maximum entropy achievable scales proportionately with the sample space cardinality |X|. For a finite |X|, this yields robust entropy measures which satisfy many important properties, such as invariance to bijections, while the same is not true for continuous spaces (where |X|=infinity). Furthermore, since R and R^d (d in Z+) have the same cardinality (from Cantor\u2019s correspondence argument), cardinality-dependent entropy measures cannot encode the data dimensionality. In this work, we question the role of cardinality and distribution spread in defining entropy measures for continuous spaces, which can undergo multiple rounds of transformations and distortions, e.g., in neural networks. We find that the average value of the local intrinsic dimension of a distribution, denoted as ID-Entropy, can serve as a robust entropy measure for continuous spaces, while capturing the data dimensionality. We find that ID-Entropy satisfies many desirable properties and can be extended to conditional entropy, joint entropy and mutual-information variants. ID-Entropy also yields new information bottleneck principles and also links to causality. In the context of deep learning, for feedforward architectures, we show, theoretically and empirically, that the ID-Entropy of a hidden layer directly controls the generalization gap for both classifiers and auto-encoders, when the target function is Lipschitz continuous. Our work primarily shows that, for continuous spaces, taking a structural rather than a statistical approach yields entropy measures which preserve intrinsic data dimensionality, while being relevant for studying various architectures.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosh, Rohan and Motani, Mehul}, year={2023}, month={Jun.}, pages={7714-7721} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25935/25707", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25935", + "pdf_size": 747284, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11555152894031881167&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;nus.edu.sg", + "email": "gmail.com;nus.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25737", + "title": "Local Justice and Machine Learning: Modeling and Inferring Dynamic Ethical Preferences toward Allocations", + "track": "main", + "status": "Technical", + "abstract": "We consider a setting in which a social planner has to make a sequence of decisions to allocate scarce resources in a high-stakes domain. Our goal is to understand stakeholders' dynamic moral preferences toward such allocational policies. In particular, we evaluate the sensitivity of moral preferences to the history of allocations and their perceived future impact on various socially salient groups. We propose a mathematical model to capture and infer such dynamic moral preferences. We illustrate our model through small-scale human-subject experiments focused on the allocation of scarce medical resource distributions during a hypothetical viral epidemic. We observe that participants' preferences are indeed history- and impact-dependent. Additionally, our preliminary experimental results reveal intriguing patterns specific to medical resources---a topic that is particularly salient against the backdrop of the global covid-19 pandemic.", + "primary_area": "humans and ai", + "author": "Violet (Xinying) Chen; Joshua Williams; Derek Leben; Hoda Heidari", + "authorids": "", + "aff": "Stevens Institute of Technology; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", + "bibtex": "@article{Chen_Williams_Leben_Heidari_2023, title={Local Justice and Machine Learning: Modeling and Inferring Dynamic Ethical Preferences toward Allocations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25737}, DOI={10.1609/aaai.v37i5.25737}, abstractNote={We consider a setting in which a social planner has to make a sequence of decisions to allocate scarce resources in a high-stakes domain. Our goal is to understand stakeholders\u2019 dynamic moral preferences toward such allocational policies. In particular, we evaluate the sensitivity of moral preferences to the history of allocations and their perceived future impact on various socially salient groups. We propose a mathematical model to capture and infer such dynamic moral preferences. We illustrate our model through small-scale human-subject experiments focused on the allocation of scarce medical resource distributions during a hypothetical viral epidemic. We observe that participants\u2019 preferences are indeed history- and impact-dependent. Additionally, our preliminary experimental results reveal intriguing patterns specific to medical resources---a topic that is particularly salient against the backdrop of the global covid-19 pandemic.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Violet (Xinying) and Williams, Joshua and Leben, Derek and Heidari, Hoda}, year={2023}, month={Jun.}, pages={5956-5964} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25737/25509", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25737", + "pdf_size": 825156, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3465615947143103501&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stevens.edu;andrew.cmu.edu;andrew.cmu.edu;cmu.edu", + "email": "stevens.edu;andrew.cmu.edu;andrew.cmu.edu;cmu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Stevens Institute of Technology;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stevens.edu;https://www.cmu.edu", + "aff_unique_abbr": "SIT;CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25422", + "title": "Local Path Integration for Attribution", + "track": "main", + "status": "Technical", + "abstract": "Path attribution methods are a popular tool to interpret a visual model's prediction on an input. They integrate model gradients for the input features over a path defined between the input and a reference, thereby satisfying certain desirable theoretical properties. However, their reliability hinges on the choice of the reference. Moreover, they do not exhibit weak dependence on the input, which leads to counter-intuitive feature attribution mapping. We show that path-based attribution can account for the weak dependence property by choosing the reference from the local distribution of the input. We devise a method to identify the local input distribution and propose a technique to stochastically integrate the model gradients over the paths defined by the references sampled from that distribution. Our local path integration (LPI) method is found to consistently outperform existing path attribution techniques when evaluated on deep visual models. Contributing to the ongoing search of reliable evaluation metrics for the interpretation methods, we also introduce DiffID metric that uses the relative difference between insertion and deletion games to alleviate the distribution shift problem faced by existing metrics. Our code is available at https://github.com/ypeiyu/LPI.", + "primary_area": "computer vision iii", + "author": "Peiyu Yang; Naveed Akhtar; Zeyi Wen; Ajmal Mian", + "authorids": "", + "aff": "The University of Western Australia; The University of Western Australia; Hong Kong University of Science and Technology (Guangzhou)+Hong Kong University of Science and Technology; The University of Western Australia", + "bibtex": "@article{Yang_Akhtar_Wen_Mian_2023, title={Local Path Integration for Attribution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25422}, DOI={10.1609/aaai.v37i3.25422}, abstractNote={Path attribution methods are a popular tool to interpret a visual model\u2019s prediction on an input. They integrate model gradients for the input features over a path defined between the input and a reference, thereby satisfying certain desirable theoretical properties. However, their reliability hinges on the choice of the reference. Moreover, they do not exhibit weak dependence on the input, which leads to counter-intuitive feature attribution mapping. We show that path-based attribution can account for the weak dependence property by choosing the reference from the local distribution of the input. We devise a method to identify the local input distribution and propose a technique to stochastically integrate the model gradients over the paths defined by the references sampled from that distribution. Our local path integration (LPI) method is found to consistently outperform existing path attribution techniques when evaluated on deep visual models. Contributing to the ongoing search of reliable evaluation metrics for the interpretation methods, we also introduce DiffID metric that uses the relative difference between insertion and deletion games to alleviate the distribution shift problem faced by existing metrics. Our code is available at https://github.com/ypeiyu/LPI.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Peiyu and Akhtar, Naveed and Wen, Zeyi and Mian, Ajmal}, year={2023}, month={Jun.}, pages={3173-3180} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25422/25194", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25422", + "pdf_size": 1157549, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4845895326140495709&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "research.uwa.edu.au;uwa.edu.au;ust.hk;uwa.edu.au", + "email": "research.uwa.edu.au;uwa.edu.au;ust.hk;uwa.edu.au", + "github": "https://github.com/ypeiyu/LPI", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+1;0", + "aff_unique_norm": "University of Western Australia;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uwa.edu.au;https://www.ust.hk", + "aff_unique_abbr": "UWA;HKUST", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;1+1;0", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-25979", + "title": "Local-Global Defense against Unsupervised Adversarial Attacks on Graphs", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised pre-training algorithms for graph representation learning are vulnerable to adversarial attacks, such as first-order perturbations on graphs, which will have an impact on particular downstream applications. Designing an effective representation learning strategy against white-box attacks remains a crucial open topic. Prior research attempts to improve representation robustness by maximizing mutual information between the representation and the perturbed graph, which is sub-optimal because it does not adapt its defense techniques to the severity of the attack. To address this issue, we propose an unsupervised defense method that combines local and global defense to improve the robustness of representation. Note that we put forward the Perturbed Edges Harmfulness (PEH) metric to determine the riskiness of the attack. Thus, when the edges are attacked, the model can automatically identify the risk of attack. We present a method of attention-based protection against high-risk attacks that penalizes attention coefficients of perturbed edges to encoders. Extensive experiments demonstrate that our strategies can enhance the robustness of representation against various adversarial attacks on three benchmark graphs.", + "primary_area": "machine learning ii", + "author": "Di Jin; Bingdao Feng; Siqi Guo; Xiaobao Wang; Jianguo Wei; Zhen Wang", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; School of Cybersecurity, Northwestern Polytechnical University, Xi\u2019an, Shaanxi, China", + "bibtex": "@article{Jin_Feng_Guo_Wang_Wei_Wang_2023, title={Local-Global Defense against Unsupervised Adversarial Attacks on Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25979}, DOI={10.1609/aaai.v37i7.25979}, abstractNote={Unsupervised pre-training algorithms for graph representation learning are vulnerable to adversarial attacks, such as first-order perturbations on graphs, which will have an impact on particular downstream applications. Designing an effective representation learning strategy against white-box attacks remains a crucial open topic. Prior research attempts to improve representation robustness by maximizing mutual information between the representation and the perturbed graph, which is sub-optimal because it does not adapt its defense techniques to the severity of the attack. To address this issue, we propose an unsupervised defense method that combines local and global defense to improve the robustness of representation. Note that we put forward the Perturbed Edges Harmfulness (PEH) metric to determine the riskiness of the attack. Thus, when the edges are attacked, the model can automatically identify the risk of attack. We present a method of attention-based protection against high-risk attacks that penalizes attention coefficients of perturbed edges to encoders. Extensive experiments demonstrate that our strategies can enhance the robustness of representation against various adversarial attacks on three benchmark graphs.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Di and Feng, Bingdao and Guo, Siqi and Wang, Xiaobao and Wei, Jianguo and Wang, Zhen}, year={2023}, month={Jun.}, pages={8105-8113} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25979/25751", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25979", + "pdf_size": 1131871, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10713806818126961161&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;nwpu.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;nwpu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Tianjin University;Northwestern Polytechnical University", + "aff_unique_dep": "College of Intelligence and Computing;School of Cybersecurity", + "aff_unique_url": "http://www.tju.edu.cn;https://www.nwpu.edu.cn", + "aff_unique_abbr": "Tianjin University;NPU", + "aff_campus_unique_index": "0;0;0;0;0;1", + "aff_campus_unique": "Tianjin;Xi'an", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26357", + "title": "Locate Then Generate: Bridging Vision and Language with Bounding Box for Scene-Text VQA", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we propose a novel multi-modal framework for Scene Text Visual Question Answering (STVQA), which requires models to read scene text in images for question answering. Apart from text or visual objects, which could exist independently, scene text naturally links text and visual modalities together by conveying linguistic semantics while being a visual object in an image simultaneously. Different to conventional STVQA models which take the linguistic semantics and visual semantics in scene text as two separate features, in this paper, we propose a paradigm of \"Locate Then Generate\" (LTG), which explicitly unifies this two semantics with the spatial bounding box as a bridge connecting them. Specifically, at first, LTG locates the region in an image that may contain the answer words with an answer location module (ALM) consisting of a region proposal network and a language refinement network, both of which can transform to each other with one-to-one mapping via the scene text bounding box. Next, given the answer words selected by ALM, LTG generates a readable answer sequence with an answer generation module (AGM) based on a pre-trained language model. As a benefit of the explicit alignment of the visual and linguistic semantics, even without any scene text based pre-training tasks, LTG can boost the absolute accuracy by +6.06% and +6.92% on the TextVQA dataset and the ST-VQA dataset respectively, compared with a non-pre-training baseline. We further demonstrate that LTG effectively unifies visual and text modalities through the spatial bounding box connection, which is underappreciated in previous methods.", + "primary_area": "machine learning iv", + "author": "Yongxin Zhu; Zhen Liu; Yukang Liang; Xin Li; Hao Liu; Changcun Bao; Linli Xu", + "authorids": "", + "aff": "School of Data Science, University of Science and Technology of China; School of Computer Science and Technology, University of Science and Technology of China; State Key Laboratory of Cognitive Intelligence; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; School of Computer Science and Technology, University of Science and Technology of China", + "bibtex": "@article{Zhu_Liu_Liang_Li_Liu_Bao_Xu_2023, title={Locate Then Generate: Bridging Vision and Language with Bounding Box for Scene-Text VQA}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26357}, DOI={10.1609/aaai.v37i9.26357}, abstractNote={In this paper, we propose a novel multi-modal framework for Scene Text Visual Question Answering (STVQA), which requires models to read scene text in images for question answering. Apart from text or visual objects, which could exist independently, scene text naturally links text and visual modalities together by conveying linguistic semantics while being a visual object in an image simultaneously. Different to conventional STVQA models which take the linguistic semantics and visual semantics in scene text as two separate features, in this paper, we propose a paradigm of "Locate Then Generate" (LTG), which explicitly unifies this two semantics with the spatial bounding box as a bridge connecting them. Specifically, at first, LTG locates the region in an image that may contain the answer words with an answer location module (ALM) consisting of a region proposal network and a language refinement network, both of which can transform to each other with one-to-one mapping via the scene text bounding box. Next, given the answer words selected by ALM, LTG generates a readable answer sequence with an answer generation module (AGM) based on a pre-trained language model. As a benefit of the explicit alignment of the visual and linguistic semantics, even without any scene text based pre-training tasks, LTG can boost the absolute accuracy by +6.06% and +6.92% on the TextVQA dataset and the ST-VQA dataset respectively, compared with a non-pre-training baseline. We further demonstrate that LTG effectively unifies visual and text modalities through the spatial bounding box connection, which is underappreciated in previous methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Yongxin and Liu, Zhen and Liang, Yukang and Li, Xin and Liu, Hao and Bao, Changcun and Xu, Linli}, year={2023}, month={Jun.}, pages={11479-11487} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26357/26129", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26357", + "pdf_size": 4500524, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9553017514750508352&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;tencent.com;tencent.com;tencent.com;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;tencent.com;tencent.com;tencent.com;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;2;2;0", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;Tencent", + "aff_unique_dep": "School of Data Science;;YouTu Lab", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.tencent.com", + "aff_unique_abbr": "USTC;;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27046", + "title": "Logic Error Localization and Correction with Machine Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "We aim to propose a system repairing programs with logic errors to be functionally correct among different programming languages. Logic error program repair has always been a thorny problem: First, a logic error is usually harder to repair than a syntax error in a program because it has no diagnostic feedback from compilers. Second, it requires inferring in different ranges (i.e., the distance of related code lines) and tracking symbols across its pseudocode, source code, and test cases. Third, the logic error datasets are scarce, since an ideal logic error dataset should contain lots of components during the development procedure of a program, including a program specification, pseudocode, source code, test cases, and test reports (i.e., test case failure report). In our work, we propose novel solutions to these challenges. First, we introduce pseudocode information to assist logic error localization and correction. We construct a code-pseudocode graph to connect symbols across a source code and its pseudocode and then apply a graph neural network to localize and correct logic errors. Second, we collect logic errors generated in the process of syntax error repairing via DrRepair from 500 programs in the SPoC dataset and reconstruct them to our single logic error dataset, which we leverage to train and evaluate our models. Our experimental results show that we achieve 99.39% localization accuracy and 19.20% full repair accuracy on logic errors with five-fold cross-validation. Based on our current work, we will replenish and construct more complete public logic error datasets and propose a novel system to comprehend different programming languages from several perspectives and correct logic errors to be functionally correct.", + "primary_area": "", + "author": "Zhenyu Xu; Victor S. Sheng; Keyi Lu", + "authorids": "", + "aff": "Department of Computer Science, Texas Tech University; Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science, Texas Tech University", + "bibtex": "@article{Xu_Sheng_Lu_2024, title={Logic Error Localization and Correction with Machine Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27046}, DOI={10.1609/aaai.v37i13.27046}, abstractNote={We aim to propose a system repairing programs with logic errors to be functionally correct among different programming languages. Logic error program repair has always been a thorny problem: First, a logic error is usually harder to repair than a syntax error in a program because it has no diagnostic feedback from compilers. Second, it requires inferring in different ranges (i.e., the distance of related code lines) and tracking symbols across its pseudocode, source code, and test cases. Third, the logic error datasets are scarce, since an ideal logic error dataset should contain lots of components during the development procedure of a program, including a program specification, pseudocode, source code, test cases, and test reports (i.e., test case failure report). In our work, we propose novel solutions to these challenges. First, we introduce pseudocode information to assist logic error localization and correction. We construct a code-pseudocode graph to connect symbols across a source code and its pseudocode and then apply a graph neural network to localize and correct logic errors. Second, we collect logic errors generated in the process of syntax error repairing via DrRepair from 500 programs in the SPoC dataset and reconstruct them to our single logic error dataset, which we leverage to train and evaluate our models. Our experimental results show that we achieve 99.39% localization accuracy and 19.20% full repair accuracy on logic errors with five-fold cross-validation. Based on our current work, we will replenish and construct more complete public logic error datasets and propose a novel system to comprehend different programming languages from several perspectives and correct logic errors to be functionally correct.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zhenyu and Sheng, Victor S. and Lu, Keyi}, year={2024}, month={Jul.}, pages={16372-16373} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27046/26818", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27046", + "pdf_size": 244110, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:BNh6wknU968J:scholar.google.com/&scioq=Logic+Error+Localization+and+Correction+with+Machine+Learning+(Student+Abstract)&hl=en&as_sdt=0,11", + "gs_version_total": 2, + "aff_domain": "ttu.edu;osu.edu;ttu.edu", + "email": "ttu.edu;osu.edu;ttu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Texas Tech University;The Ohio State University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.ttu.edu;https://www.osu.edu", + "aff_unique_abbr": "TTU;OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25579", + "title": "Logic and Commonsense-Guided Temporal Knowledge Graph Completion", + "track": "main", + "status": "Technical", + "abstract": "A temporal knowledge graph (TKG) stores the events derived from the data involving time. Predicting events is extremely challenging due to the time-sensitive property of events. Besides, the previous TKG completion (TKGC) approaches cannot represent both the timeliness and the causality properties of events, simultaneously. To address these challenges, we propose a Logic and Commonsense-Guided Embedding model (LCGE) to jointly learn the time-sensitive representation involving timeliness and causality of events, together with the time-independent representation of events from the perspective of commonsense. Specifically, we design a temporal rule learning algorithm to construct a rule-guided predicate embedding regularization strategy for learning the causality among events. Furthermore, we could accurately evaluate the plausibility of events via auxiliary commonsense knowledge. The experimental results of TKGC task illustrate the significant performance improvements of our model compared with the existing approaches. More interestingly, our model is able to provide the explainability of the predicted results in the view of causal inference. The appendix, source code and datasets of this paper are available at https://github.com/ngl567/LCGE.", + "primary_area": "data mining and knowledge management", + "author": "Guanglin Niu; Bo Li", + "authorids": "", + "aff": "Institute of Artificial Intelligence, Beihang University, Beijing, China; Institute of Artificial Intelligence, Beihang University, Beijing, China + Hangzhou Innovation Institute, Beihang University, Hangzhou, China", + "bibtex": "@article{Niu_Li_2023, title={Logic and Commonsense-Guided Temporal Knowledge Graph Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25579}, DOI={10.1609/aaai.v37i4.25579}, abstractNote={A temporal knowledge graph (TKG) stores the events derived from the data involving time. Predicting events is extremely challenging due to the time-sensitive property of events. Besides, the previous TKG completion (TKGC) approaches cannot represent both the timeliness and the causality properties of events, simultaneously. To address these challenges, we propose a Logic and Commonsense-Guided Embedding model (LCGE) to jointly learn the time-sensitive representation involving timeliness and causality of events, together with the time-independent representation of events from the perspective of commonsense. Specifically, we design a temporal rule learning algorithm to construct a rule-guided predicate embedding regularization strategy for learning the causality among events. Furthermore, we could accurately evaluate the plausibility of events via auxiliary commonsense knowledge. The experimental results of TKGC task illustrate the significant performance improvements of our model compared with the existing approaches. More interestingly, our model is able to provide the explainability of the predicted results in the view of causal inference. The appendix, source code and datasets of this paper are available at https://github.com/ngl567/LCGE.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Niu, Guanglin and Li, Bo}, year={2023}, month={Jun.}, pages={4569-4577} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25579/25351", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25579", + "pdf_size": 315856, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8853390033407152888&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/ngl567/LCGE", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "Institute of Artificial Intelligence", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "BUAA", + "aff_campus_unique_index": "0;0+1", + "aff_campus_unique": "Beijing;Hangzhou", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26174", + "title": "Logical Satisfiability of Counterfactuals for Faithful Explanations in NLI", + "track": "main", + "status": "Technical", + "abstract": "Evaluating an explanation's faithfulness is desired for many reasons such as trust, interpretability and diagnosing the sources of model's errors. In this work, which focuses on the NLI task, we introduce the methodology of Faithfulness-through-Counterfactuals, which first generates a counterfactual hypothesis based on the logical predicates expressed in the explanation, and then evaluates if the model's prediction on the counterfactual is consistent with that expressed logic (i.e. if the new formula is \\textit{logically satisfiable}). In contrast to existing approaches, this does not require any explanations for training a separate verification model. We first validate the efficacy of automatic counterfactual hypothesis generation, leveraging on the few-shot priming paradigm. Next, we show that our proposed metric distinguishes between human-model agreement and disagreement on new counterfactual input. In addition, we conduct a sensitivity analysis to validate that our metric is sensitive to unfaithful explanations.", + "primary_area": "machine learning iii", + "author": "Suzanna Sia; Anton Belyy; Amjad Almahairi; Madian Khabsa; Luke Zettlemoyer; Lambert Mathias", + "authorids": "", + "aff": "Johns Hopkins University; Johns Hopkins University; Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research", + "bibtex": "@article{Sia_Belyy_Almahairi_Khabsa_Zettlemoyer_Mathias_2023, title={Logical Satisfiability of Counterfactuals for Faithful Explanations in NLI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26174}, DOI={10.1609/aaai.v37i8.26174}, abstractNote={Evaluating an explanation\u2019s faithfulness is desired for many reasons such as trust, interpretability and diagnosing the sources of model\u2019s errors. In this work, which focuses on the NLI task, we introduce the methodology of Faithfulness-through-Counterfactuals, which first generates a counterfactual hypothesis based on the logical predicates expressed in the explanation, and then evaluates if the model\u2019s prediction on the counterfactual is consistent with that expressed logic (i.e. if the new formula is \\textit{logically satisfiable}). In contrast to existing approaches, this does not require any explanations for training a separate verification model. We first validate the efficacy of automatic counterfactual hypothesis generation, leveraging on the few-shot priming paradigm. Next, we show that our proposed metric distinguishes between human-model agreement and disagreement on new counterfactual input. In addition, we conduct a sensitivity analysis to validate that our metric is sensitive to unfaithful explanations.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sia, Suzanna and Belyy, Anton and Almahairi, Amjad and Khabsa, Madian and Zettlemoyer, Luke and Mathias, Lambert}, year={2023}, month={Jun.}, pages={9837-9845} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26174/25946", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26174", + "pdf_size": 955044, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12725319452023450064&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "jh.edu; ; ; ; ; ", + "email": "jh.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;1;1", + "aff_unique_norm": "Johns Hopkins University;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI Research", + "aff_unique_url": "https://www.jhu.edu;https://meta.com", + "aff_unique_abbr": "JHU;Meta AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27042", + "title": "Long Legal Article Question Answering via Cascaded Key Segment Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Current sentence-level evidence extraction based methods may lose the discourse coherence of legal articles since they tend to make the extracted sentences scattered over the article. To solve the problem, this paper proposes a Cascaded Answer-guided key segment learning framework for long Legal article Question Answering, namely CALQA. The framework consists of three cascaded modules: Sifter, Reader, and Responder. The Sifter transfers a long legal article into several segments and works in an answer-guided way by automatically sifting out key fact segments in a coarse-to-fine approach through multiple iterations. The Reader utilizes a set of attention mechanisms to obtain semantic representations of the question and key fact segments. Finally, considering it a multi-label classification task the Responder predicts final answers in a cascaded manner. CALQA outperforms state-of-the-art methods in CAIL 2021 Law dataset.", + "primary_area": "", + "author": "Shugui Xie; Lin Li; Jingling Yuan; Qing Xie; Xiaohui Tao", + "authorids": "", + "aff": "School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan, 430070, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan, 430070, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan, 430070, China; School of Computer Science and Artificial Intelligence, Wuhan University of Technology, Wuhan, 430070, China; School of Mathematics, Physics, and Computing, University of Southern Queensland, Toowoomba, Australia", + "bibtex": "@article{Xie_Li_Yuan_Xie_Tao_2024, title={Long Legal Article Question Answering via Cascaded Key Segment Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27042}, DOI={10.1609/aaai.v37i13.27042}, abstractNote={Current sentence-level evidence extraction based methods may lose the discourse coherence of legal articles since they tend to make the extracted sentences scattered over the article. To solve the problem, this paper proposes a Cascaded Answer-guided key segment learning framework for long Legal article Question Answering, namely CALQA. The framework consists of three cascaded modules: Sifter, Reader, and Responder. The Sifter transfers a long legal article into several segments and works in an answer-guided way by automatically sifting out key fact segments in a coarse-to-fine approach through multiple iterations. The Reader utilizes a set of attention mechanisms to obtain semantic representations of the question and key fact segments. Finally, considering it a multi-label classification task the Responder predicts final answers in a cascaded manner. CALQA outperforms state-of-the-art methods in CAIL 2021 Law dataset.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Shugui and Li, Lin and Yuan, Jingling and Xie, Qing and Tao, Xiaohui}, year={2024}, month={Jul.}, pages={16364-16365} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27042/26814", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27042", + "pdf_size": 203101, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10126946510833117162&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn;usq.edu.au", + "email": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whut.edu.cn;usq.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Wuhan University of Technology;University of Southern Queensland", + "aff_unique_dep": "School of Computer Science and Artificial Intelligence;School of Mathematics, Physics, and Computing", + "aff_unique_url": "http://www.wut.edu.cn;https://www.usq.edu.au", + "aff_unique_abbr": "WUT;USQ", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Wuhan;Toowoomba", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25927", + "title": "Long-Tail Cross Modal Hashing", + "track": "main", + "status": "Technical", + "abstract": "", + "primary_area": "machine learning i", + "author": "Zijun Gao; Jun Wang; Guoxian Yu; Zhongmin Yan; Carlotta Domeniconi; Jinglin Zhang", + "authorids": "", + "aff": ";;;;;", + "bibtex": "", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25927/25699", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25927", + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13261525645493665004&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "article-26159", + "title": "Losses over Labels: Weakly Supervised Learning via Direct Loss Construction", + "track": "main", + "status": "Technical", + "abstract": "Owing to the prohibitive costs of generating large amounts of labeled data, programmatic weak supervision is a growing paradigm within machine learning. In this setting, users design heuristics that provide noisy labels for subsets of the data. These weak labels are combined (typically via a graphical model) to form pseudolabels, which are then used to train a downstream model. In this work, we question a foundational premise of the typical weakly supervised learning pipeline: given that the heuristic provides all \u201clabel\u201d information, why do we need to generate pseudolabels at all? Instead, we propose to directly transform the heuristics themselves into corresponding loss functions that penalize differences between our model and the heuristic. By constructing losses directly from the heuristics, we can incorporate more information than is used in the standard weakly supervised pipeline, such as how the heuristics make their decisions, which explicitly informs feature selection during training. We call our method Losses over Labels (LoL) as it creates losses directly from heuristics without going through the intermediate step of a label. We show that LoL improves upon existing weak supervision methods on several benchmark text and image classification tasks and further demonstrate that incorporating gradient information leads to better performance on almost every task.", + "primary_area": "machine learning iii", + "author": "Dylan Sam; J. Zico Kolter", + "authorids": "", + "aff": "Machine Learning Department, Carnegie Mellon University; Bosch Center for Artificial Intelligence + Machine Learning Department, Carnegie Mellon University", + "bibtex": "@article{Sam_Kolter_2023, title={Losses over Labels: Weakly Supervised Learning via Direct Loss Construction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26159}, DOI={10.1609/aaai.v37i8.26159}, abstractNote={Owing to the prohibitive costs of generating large amounts of labeled data, programmatic weak supervision is a growing paradigm within machine learning. In this setting, users design heuristics that provide noisy labels for subsets of the data. These weak labels are combined (typically via a graphical model) to form pseudolabels, which are then used to train a downstream model. In this work, we question a foundational premise of the typical weakly supervised learning pipeline: given that the heuristic provides all \u201clabel\u201d information, why do we need to generate pseudolabels at all? Instead, we propose to directly transform the heuristics themselves into corresponding loss functions that penalize differences between our model and the heuristic. By constructing losses directly from the heuristics, we can incorporate more information than is used in the standard weakly supervised pipeline, such as how the heuristics make their decisions, which explicitly informs feature selection during training. We call our method Losses over Labels (LoL) as it creates losses directly from heuristics without going through the intermediate step of a label. We show that LoL improves upon existing weak supervision methods on several benchmark text and image classification tasks and further demonstrate that incorporating gradient information leads to better performance on almost every task.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sam, Dylan and Kolter, J. Zico}, year={2023}, month={Jun.}, pages={9695-9703} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26159/25931", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26159", + "pdf_size": 535574, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10260584150618560644&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "andrew.cmu.edu;cs.cmu.edu", + "email": "andrew.cmu.edu;cs.cmu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+0", + "aff_unique_norm": "Carnegie Mellon University;Bosch Center for Artificial Intelligence", + "aff_unique_dep": "Machine Learning Department;Center for Artificial Intelligence", + "aff_unique_url": "https://www.cmu.edu;https://www.bosch-ai.com", + "aff_unique_abbr": "CMU;BCAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "article-26297", + "title": "Lottery Pools: Winning More by Interpolating Tickets without Increasing Training or Inference Cost", + "track": "main", + "status": "Technical", + "abstract": "Lottery tickets (LTs) is able to discover accurate and sparse subnetworks that could be trained in isolation to match the performance of dense networks. Ensemble, in parallel, is one of the oldest time-proven tricks in machine learning to improve performance by combining the output of multiple independent models. However, the benefits of ensemble in the context of LTs will be diluted since ensemble does not directly lead to stronger sparse subnetworks, but leverages their predictions for a better decision. In this work, we first observe that directly averaging the weights of the adjacent learned subnetworks significantly boosts the performance of LTs. Encouraged by this observation, we further propose an alternative way to perform an \"ensemble'' over the subnetworks identified by iterative magnitude pruning via a simple interpolating strategy. We call our method Lottery Pools. In contrast to the naive ensemble which brings no performance gains to each single subnetwork, Lottery Pools yields much stronger sparse subnetworks than the original LTs without requiring any extra training or inference cost. Across various modern architectures on CIFAR-10/100 and ImageNet, we show that our method achieves significant performance gains in both, in-distribution and out-of-distribution scenarios. Impressively, evaluated with VGG-16 and ResNet-18, the produced sparse subnetworks outperform the original LTs by up to 1.88% on CIFAR-100 and 2.36% on CIFAR-100-C; the resulting dense network surpasses the pre-trained dense-model up to \n 2.22% on CIFAR-100 and 2.38% on CIFAR-100-C. Our source code can be found at https://github.com/luuyin/Lottery-pools.", + "primary_area": "machine learning iv", + "author": "Lu Yin; Shiwei Liu; Meng Fang; Tianjin Huang; Vlado Menkovski; Mykola Pechenizkiy", + "authorids": "", + "aff": "Eindhoven University of Technology; Eindhoven University of Technology + University of Texas at Austin; University of Liverpool; Eindhoven University of Technology; Eindhoven University of Technology; Eindhoven University of Technology", + "bibtex": "@article{Yin_Liu_Fang_Huang_Menkovski_Pechenizkiy_2023, title={Lottery Pools: Winning More by Interpolating Tickets without Increasing Training or Inference Cost}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26297}, DOI={10.1609/aaai.v37i9.26297}, abstractNote={Lottery tickets (LTs) is able to discover accurate and sparse subnetworks that could be trained in isolation to match the performance of dense networks. Ensemble, in parallel, is one of the oldest time-proven tricks in machine learning to improve performance by combining the output of multiple independent models. However, the benefits of ensemble in the context of LTs will be diluted since ensemble does not directly lead to stronger sparse subnetworks, but leverages their predictions for a better decision. In this work, we first observe that directly averaging the weights of the adjacent learned subnetworks significantly boosts the performance of LTs. Encouraged by this observation, we further propose an alternative way to perform an "ensemble\u2019\u2019 over the subnetworks identified by iterative magnitude pruning via a simple interpolating strategy. We call our method Lottery Pools. In contrast to the naive ensemble which brings no performance gains to each single subnetwork, Lottery Pools yields much stronger sparse subnetworks than the original LTs without requiring any extra training or inference cost. Across various modern architectures on CIFAR-10/100 and ImageNet, we show that our method achieves significant performance gains in both, in-distribution and out-of-distribution scenarios. Impressively, evaluated with VGG-16 and ResNet-18, the produced sparse subnetworks outperform the original LTs by up to 1.88% on CIFAR-100 and 2.36% on CIFAR-100-C; the resulting dense network surpasses the pre-trained dense-model up to 2.22% on CIFAR-100 and 2.38% on CIFAR-100-C. Our source code can be found at https://github.com/luuyin/Lottery-pools.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yin, Lu and Liu, Shiwei and Fang, Meng and Huang, Tianjin and Menkovski, Vlado and Pechenizkiy, Mykola}, year={2023}, month={Jun.}, pages={10945-10953} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26297/26069", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26297", + "pdf_size": 262528, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=197617521677515220&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "tue.nl;tue.nl;liverpool.ac.uk;tue.nl;tue.nl;tue.nl", + "email": "tue.nl;tue.nl;liverpool.ac.uk;tue.nl;tue.nl;tue.nl", + "github": "https://github.com/luuyin/Lottery-pools", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;2;0;0;0", + "aff_unique_norm": "Eindhoven University of Technology;University of Texas at Austin;University of Liverpool", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.tue.nl;https://www.utexas.edu;https://www.liverpool.ac.uk", + "aff_unique_abbr": "TU/e;UT Austin;Liv Uni", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0;0+1;2;0;0;0", + "aff_country_unique": "Netherlands;United States;United Kingdom" + }, + { + "id": "article-26668", + "title": "Low Emission Building Control with Zero-Shot Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Heating and cooling systems in buildings account for 31% of global energy use, much of which are regulated by Rule Based Controllers (RBCs) that neither maximise energy efficiency nor minimise emissions by interacting optimally with the grid. Control via Reinforcement Learning (RL) has been shown to significantly improve building energy efficiency, but existing solutions require access to building-specific simulators or data that cannot be expected for every building in the world. In response, we show it is possible to obtain emission-reducing policies without such knowledge a priori\u2013a paradigm we call zero-shot building control. We combine ideas from system identification and model-based RL to create PEARL (Probabilistic Emission-Abating Reinforcement Learning) and show that a short period of active exploration is all that is required to build a performant model. In experiments across three varied building energy simulations, we show PEARL outperforms an existing RBC once, and popular RL baselines in all cases, reducing building emissions by as much as 31% whilst maintaining thermal comfort. Our source code is available online via: https://enjeeneer.io/projects/pearl/.", + "primary_area": "ai for social impact", + "author": "Scott Jeen; Alessandro Abate; Jonathan M. Cullen", + "authorids": "", + "aff": "University of Cambridge + Alan Turing Institute; University of Oxford + Alan Turing Institute; University of Cambridge", + "bibtex": "@article{Jeen_Abate_Cullen_2023, title={Low Emission Building Control with Zero-Shot Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26668}, DOI={10.1609/aaai.v37i12.26668}, abstractNote={Heating and cooling systems in buildings account for 31% of global energy use, much of which are regulated by Rule Based Controllers (RBCs) that neither maximise energy efficiency nor minimise emissions by interacting optimally with the grid. Control via Reinforcement Learning (RL) has been shown to significantly improve building energy efficiency, but existing solutions require access to building-specific simulators or data that cannot be expected for every building in the world. In response, we show it is possible to obtain emission-reducing policies without such knowledge a priori\u2013a paradigm we call zero-shot building control. We combine ideas from system identification and model-based RL to create PEARL (Probabilistic Emission-Abating Reinforcement Learning) and show that a short period of active exploration is all that is required to build a performant model. In experiments across three varied building energy simulations, we show PEARL outperforms an existing RBC once, and popular RL baselines in all cases, reducing building emissions by as much as 31% whilst maintaining thermal comfort. Our source code is available online via: https://enjeeneer.io/projects/pearl/.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jeen, Scott and Abate, Alessandro and Cullen, Jonathan M.}, year={2023}, month={Jun.}, pages={14259-14267} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26668/26440", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26668", + "pdf_size": 2372566, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1609977587786438599&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cam.ac.uk;cs.ox.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;cs.ox.ac.uk;cam.ac.uk", + "github": "", + "project": "https://enjeeneer.io/projects/pearl/", + "author_num": 3, + "aff_unique_index": "0+1;2+1;0", + "aff_unique_norm": "University of Cambridge;Alan Turing Institute;University of Oxford", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cam.ac.uk;https://www.turing.ac.uk;https://www.ox.ac.uk", + "aff_unique_abbr": "Cambridge;ATI;Oxford", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26540", + "title": "Low Resource Quantitative Information Extraction via Structure Searching and Prefix-Based Text Generation", + "track": "main", + "status": "Technical", + "abstract": "Quantitative information plays an important part in the financial and data analysis areas. Prior work relied on pattern-matching methods and complex hand-crafted rules to extract quantitative information due to the lack of labeled data. Such methods can be unstable and difficult to scale to the open domain. In this paper, we study quantitative information extraction in the low-resource setting. We propose a search-based approach by searching from the syntactic structures to acquire basic training data. The search process is simple yet effective. Then, a prefix-based text-to-text generation method is employed to extract the quantitative information. The prefix design can fully leverage pre-trained language models for text generation to serve the information extraction purpose. Experimental results show that our approaches achieves high performance with a limited amount of labeled data. The extraction result could further boost the performance of other tasks such as quantitative reasoning.", + "primary_area": "speech natural language processing", + "author": "Tongliang Li; Zixiang Wang; Zhoujun Li", + "authorids": "", + "aff": "State Key Lab of Software Development Environment, Beihang University, Beijing, China; State Key Lab of Software Development Environment, Beihang University, Beijing, China; State Key Lab of Software Development Environment, Beihang University, Beijing, China", + "bibtex": "@article{Li_Wang_Li_2023, title={Low Resource Quantitative Information Extraction via Structure Searching and Prefix-Based Text Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26540}, DOI={10.1609/aaai.v37i11.26540}, abstractNote={Quantitative information plays an important part in the financial and data analysis areas. Prior work relied on pattern-matching methods and complex hand-crafted rules to extract quantitative information due to the lack of labeled data. Such methods can be unstable and difficult to scale to the open domain. In this paper, we study quantitative information extraction in the low-resource setting. We propose a search-based approach by searching from the syntactic structures to acquire basic training data. The search process is simple yet effective. Then, a prefix-based text-to-text generation method is employed to extract the quantitative information. The prefix design can fully leverage pre-trained language models for text generation to serve the information extraction purpose. Experimental results show that our approaches achieves high performance with a limited amount of labeled data. The extraction result could further boost the performance of other tasks such as quantitative reasoning.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Tongliang and Wang, Zixiang and Li, Zhoujun}, year={2023}, month={Jun.}, pages={13112-13120} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26540/26312", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26540", + "pdf_size": 237691, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5515197723043231831&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "State Key Lab of Software Development Environment", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "BUAA", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25427", + "title": "Low-Light Image Enhancement Network Based on Multi-Scale Feature Complementation", + "track": "main", + "status": "Technical", + "abstract": "Images captured in low-light environments have problems of insufficient brightness and low contrast, which will affect subsequent image processing tasks. Although most current enhancement methods can obtain high-contrast images, they still suffer from noise amplification and color distortion. To address these issues, this paper proposes a low-light image enhancement network based on multi-scale feature complementation (LIEN-MFC), which is a U-shaped encoder-decoder network supervised by multiple images of different scales. In the encoder, four feature extraction branches are constructed to extract features of low-light images at different scales. In the decoder, to ensure the integrity of the learned features at each scale, a feature supplementary fusion module (FSFM) is proposed to complement and integrate features from different branches of the encoder and decoder. In addition, a feature restoration module (FRM) and an image reconstruction module (IRM) are built in each branch to reconstruct the restored features and output enhanced images. To better train the network, a joint loss function is defined, in which a discriminative loss term is designed to ensure that the enhanced results better meet the visual properties of the human eye. Extensive experiments on benchmark datasets show that the proposed method outperforms some state-of-the-art methods subjectively and objectively.", + "primary_area": "computer vision iii", + "author": "Yong Yang; Wenzhi Xu; Shuying Huang; Weiguo Wan", + "authorids": "", + "aff": "School of Computer Science and Technology, Tiangong University, Tianjin, China; School of Information Technology, Jiangxi University of Finance and Economics, Nanchang, China; School of Software, Tiangong University, Tianjin, China; School of Software and Internet of Things Engineering, Jiangxi University of Finance and Economics, Nanchang, China", + "bibtex": "@article{Yang_Xu_Huang_Wan_2023, title={Low-Light Image Enhancement Network Based on Multi-Scale Feature Complementation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25427}, DOI={10.1609/aaai.v37i3.25427}, abstractNote={Images captured in low-light environments have problems of insufficient brightness and low contrast, which will affect subsequent image processing tasks. Although most current enhancement methods can obtain high-contrast images, they still suffer from noise amplification and color distortion. To address these issues, this paper proposes a low-light image enhancement network based on multi-scale feature complementation (LIEN-MFC), which is a U-shaped encoder-decoder network supervised by multiple images of different scales. In the encoder, four feature extraction branches are constructed to extract features of low-light images at different scales. In the decoder, to ensure the integrity of the learned features at each scale, a feature supplementary fusion module (FSFM) is proposed to complement and integrate features from different branches of the encoder and decoder. In addition, a feature restoration module (FRM) and an image reconstruction module (IRM) are built in each branch to reconstruct the restored features and output enhanced images. To better train the network, a joint loss function is defined, in which a discriminative loss term is designed to ensure that the enhanced results better meet the visual properties of the human eye. Extensive experiments on benchmark datasets show that the proposed method outperforms some state-of-the-art methods subjectively and objectively.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Yong and Xu, Wenzhi and Huang, Shuying and Wan, Weiguo}, year={2023}, month={Jun.}, pages={3214-3221} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25427/25199", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25427", + "pdf_size": 820188, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11250747518355550519&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "126.com;163.com;tiangong.edu.cn;jxufe.edu.cn", + "email": "126.com;163.com;tiangong.edu.cn;jxufe.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Tiangong University;Jiangxi University of Finance and Economics", + "aff_unique_dep": "School of Computer Science and Technology;School of Information Technology", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "0;1;0;1", + "aff_campus_unique": "Tianjin;Nanchang", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25257", + "title": "Low-Light Video Enhancement with Synthetic Event Guidance", + "track": "main", + "status": "Technical", + "abstract": "Low-light video enhancement (LLVE) is an important yet challenging task with many applications such as photographing and autonomous driving. Unlike single image low-light enhancement, most LLVE methods utilize temporal information from adjacent frames to restore the color and remove the noise of the target frame. However, these algorithms, based on the framework of multi-frame alignment and enhancement, may produce multi-frame fusion artifacts when encountering extreme low light or fast motion. In this paper, inspired by the low latency and high dynamic range of events, we use synthetic events from multiple frames to guide the enhancement and restoration of low-light videos. Our method contains three stages: 1) event synthesis and enhancement, 2) event and image fusion, and 3) low-light enhancement. In this framework, we design two novel modules (event-image fusion transform and event-guided dual branch) for the second and third stages, respectively. Extensive experiments show that our method outperforms existing low-light video or single image enhancement approaches on both synthetic and real LLVE datasets. Our code will be available at https://gitee.com/mindspore/models/tree/master/research/cv/LLVE-SEG.", + "primary_area": "computer vision ii", + "author": "Lin Liu; Junfeng An; Jianzhuang Liu; Shanxin Yuan; Xiangyu Chen; Wengang Zhou; Houqiang Li; Yan Feng Wang; Qi Tian", + "authorids": "", + "aff": "CAS Key Laboratory of Technology in GIPAS, EEIS Department, University of Science and Technology of China; Independent Researcher; Huawei Noah\u2019s Ark Lab; Queen Mary University of London; University of Macau + Shenzhen Institute of Advanced Technology (SIAT); CAS Key Laboratory of Technology in GIPAS, EEIS Department, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, EEIS Department, University of Science and Technology of China; Cooperative medianet innovation center of Shanghai Jiao Tong University; Huawei Cloud BU", + "bibtex": "@article{Liu_An_Liu_Yuan_Chen_Zhou_Li_Wang_Tian_2023, title={Low-Light Video Enhancement with Synthetic Event Guidance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25257}, DOI={10.1609/aaai.v37i2.25257}, abstractNote={Low-light video enhancement (LLVE) is an important yet challenging task with many applications such as photographing and autonomous driving. Unlike single image low-light enhancement, most LLVE methods utilize temporal information from adjacent frames to restore the color and remove the noise of the target frame. However, these algorithms, based on the framework of multi-frame alignment and enhancement, may produce multi-frame fusion artifacts when encountering extreme low light or fast motion. In this paper, inspired by the low latency and high dynamic range of events, we use synthetic events from multiple frames to guide the enhancement and restoration of low-light videos. Our method contains three stages: 1) event synthesis and enhancement, 2) event and image fusion, and 3) low-light enhancement. In this framework, we design two novel modules (event-image fusion transform and event-guided dual branch) for the second and third stages, respectively. Extensive experiments show that our method outperforms existing low-light video or single image enhancement approaches on both synthetic and real LLVE datasets. Our code will be available at https://gitee.com/mindspore/models/tree/master/research/cv/LLVE-SEG.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Lin and An, Junfeng and Liu, Jianzhuang and Yuan, Shanxin and Chen, Xiangyu and Zhou, Wengang and Li, Houqiang and Wang, Yan Feng and Tian, Qi}, year={2023}, month={Jun.}, pages={1692-1700} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25257/25029", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25257", + "pdf_size": 4428336, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12224589718186323820&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "; ; ; ; ; ; ; ; ", + "email": "; ; ; ; ; ; ; ; ", + "github": "", + "project": "https://gitee.com/mindspore/models/tree/master/research/cv/LLVE-SEG", + "author_num": 9, + "aff_unique_index": "0;1;2;3;4+5;0;0;6;2", + "aff_unique_norm": "University of Science and Technology of China;Independent Researcher;Huawei;Queen Mary University of London;University of Macau;Shenzhen Institute of Advanced Technology;Shanghai Jiao Tong University", + "aff_unique_dep": "EEIS Department;;Noah\u2019s Ark Lab;;;;Cooperative medianet innovation center", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.huawei.com;https://www.qmul.ac.uk;https://www.um.edu.mo;http://www.siat.ac.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "USTC;;Huawei;QMUL;UM;SIAT;SJTU", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";London;Shenzhen", + "aff_country_unique_index": "0;0;2;3+0;0;0;0;0", + "aff_country_unique": "China;;United Kingdom;Macau" + }, + { + "id": "article-25572", + "title": "Low-Resource Personal Attribute Prediction from Conversations", + "track": "main", + "status": "Technical", + "abstract": "Personal knowledge bases (PKBs) are crucial for a broad range of applications such as personalized recommendation and Web-based chatbots. A critical challenge to build PKBs is extracting personal attribute knowledge from users' conversation data. Given some users of a conversational system, a personal attribute and these users' utterances, our goal is to predict the ranking of the given personal attribute values for each user. Previous studies often rely on a relative number of resources such as labeled utterances and external data, yet the attribute knowledge embedded in unlabeled utterances is underutilized and their performance of predicting some difficult personal attributes is still unsatisfactory. In addition, it is found that some text classification methods could be employed to resolve this task directly. However, they also perform not well over those difficult personal attributes. In this paper, we propose a novel framework PEARL to predict personal attributes from conversations by leveraging the abundant personal attribute knowledge from utterances under a low-resource setting in which no labeled utterances or external data are utilized. PEARL combines the biterm semantic information with the word co-occurrence information seamlessly via employing the updated prior attribute knowledge to refine the biterm topic model's Gibbs sampling process in an iterative manner. The extensive experimental results show that PEARL outperforms all the baseline methods not only on the task of personal attribute prediction from conversations over two data sets, but also on the more general weakly supervised text classification task over one data set.", + "primary_area": "data mining and knowledge management", + "author": "Yinan Liu; Hu Chen; Wei Shen; Jiaoyan Chen", + "authorids": "", + "aff": "TKLNDST, College of Computer Science, Nankai University, Tianjin 300350, China; TKLNDST, College of Computer Science, Nankai University, Tianjin 300350, China; TKLNDST, College of Computer Science, Nankai University, Tianjin 300350, China; Department of Computer Science, The University of Manchester", + "bibtex": "@article{Liu_Chen_Shen_Chen_2023, title={Low-Resource Personal Attribute Prediction from Conversations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25572}, DOI={10.1609/aaai.v37i4.25572}, abstractNote={Personal knowledge bases (PKBs) are crucial for a broad range of applications such as personalized recommendation and Web-based chatbots. A critical challenge to build PKBs is extracting personal attribute knowledge from users\u2019 conversation data. Given some users of a conversational system, a personal attribute and these users\u2019 utterances, our goal is to predict the ranking of the given personal attribute values for each user. Previous studies often rely on a relative number of resources such as labeled utterances and external data, yet the attribute knowledge embedded in unlabeled utterances is underutilized and their performance of predicting some difficult personal attributes is still unsatisfactory. In addition, it is found that some text classification methods could be employed to resolve this task directly. However, they also perform not well over those difficult personal attributes. In this paper, we propose a novel framework PEARL to predict personal attributes from conversations by leveraging the abundant personal attribute knowledge from utterances under a low-resource setting in which no labeled utterances or external data are utilized. PEARL combines the biterm semantic information with the word co-occurrence information seamlessly via employing the updated prior attribute knowledge to refine the biterm topic model\u2019s Gibbs sampling process in an iterative manner. The extensive experimental results show that PEARL outperforms all the baseline methods not only on the task of personal attribute prediction from conversations over two data sets, but also on the more general weakly supervised text classification task over one data set.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yinan and Chen, Hu and Shen, Wei and Chen, Jiaoyan}, year={2023}, month={Jun.}, pages={4507-4515} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25572/25344", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25572", + "pdf_size": 1420409, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17331712478547159733&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.nankai.edu.cn;nankai.edu.cn; ;manchester.ac.uk", + "email": "mail.nankai.edu.cn;nankai.edu.cn; ;manchester.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Nankai University;The University of Manchester", + "aff_unique_dep": "College of Computer Science;Department of Computer Science", + "aff_unique_url": "http://www.nankai.edu.cn;https://www.manchester.ac.uk", + "aff_unique_abbr": "Nankai;UoM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26601", + "title": "M-sense: Modeling Narrative Structure in Short Personal Narratives Using Protagonist\u2019s Mental Representations", + "track": "main", + "status": "Technical", + "abstract": "Narrative is a ubiquitous component of human communication. Understanding its structure plays a critical role in a wide variety of applications, ranging from simple comparative analyses to enhanced narrative retrieval, comprehension, or reasoning capabilities. Prior research in narratology has highlighted the importance of studying the links between cognitive and linguistic aspects of narratives for effective comprehension. This interdependence is related to the textual semantics and mental language in narratives, referring to characters' motivations, feelings or emotions, and beliefs. However, this interdependence is hardly explored for modeling narratives. In this work, we propose the task of automatically detecting prominent elements of the narrative structure by analyzing the role of characters' inferred mental state along with linguistic information at the syntactic and semantic levels. We introduce a STORIES dataset of short personal narratives containing manual annotations of key elements of narrative structure, specifically climax and resolution. To this end, we implement a computational model that leverages the protagonist's mental state information obtained from a pre-trained model trained on social commonsense knowledge and integrates their representations with contextual semantic embed-dings using a multi-feature fusion approach. Evaluating against prior zero-shot and supervised baselines, we find that our model is able to achieve significant improvements in the task of identifying climax and resolution.", + "primary_area": "speech natural language processing", + "author": "Prashanth Vijayaraghavan; Deb Roy", + "authorids": "", + "aff": "MIT Media Lab, 75 Amherst Street, Cambridge, MA, 02139 USA; MIT Media Lab, 75 Amherst Street, Cambridge, MA, 02139 USA", + "bibtex": "@article{Vijayaraghavan_Roy_2023, title={M-sense: Modeling Narrative Structure in Short Personal Narratives Using Protagonist\u2019s Mental Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26601}, DOI={10.1609/aaai.v37i11.26601}, abstractNote={Narrative is a ubiquitous component of human communication. Understanding its structure plays a critical role in a wide variety of applications, ranging from simple comparative analyses to enhanced narrative retrieval, comprehension, or reasoning capabilities. Prior research in narratology has highlighted the importance of studying the links between cognitive and linguistic aspects of narratives for effective comprehension. This interdependence is related to the textual semantics and mental language in narratives, referring to characters\u2019 motivations, feelings or emotions, and beliefs. However, this interdependence is hardly explored for modeling narratives. In this work, we propose the task of automatically detecting prominent elements of the narrative structure by analyzing the role of characters\u2019 inferred mental state along with linguistic information at the syntactic and semantic levels. We introduce a STORIES dataset of short personal narratives containing manual annotations of key elements of narrative structure, specifically climax and resolution. To this end, we implement a computational model that leverages the protagonist\u2019s mental state information obtained from a pre-trained model trained on social commonsense knowledge and integrates their representations with contextual semantic embed-dings using a multi-feature fusion approach. Evaluating against prior zero-shot and supervised baselines, we find that our model is able to achieve significant improvements in the task of identifying climax and resolution.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vijayaraghavan, Prashanth and Roy, Deb}, year={2023}, month={Jun.}, pages={13664-13672} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26601/26373", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26601", + "pdf_size": 678704, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16459890665730184741&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mit.edu;media.mit.edu", + "email": "mit.edu;media.mit.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "Media Lab", + "aff_unique_url": "http://www.media.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25253", + "title": "M3AE: Multimodal Representation Learning for Brain Tumor Segmentation with Missing Modalities", + "track": "main", + "status": "Technical", + "abstract": "Multimodal magnetic resonance imaging (MRI) provides complementary information for sub-region analysis of brain tumors. Plenty of methods have been proposed for automatic brain tumor segmentation using four common MRI modalities and achieved remarkable performance. In practice, however, it is common to have one or more modalities missing due to image corruption, artifacts, acquisition protocols, allergy to contrast agents, or simply cost. In this work, we propose a novel two-stage framework for brain tumor segmentation with missing modalities. In the first stage, a multimodal masked autoencoder (M3AE) is proposed, where both random modalities (i.e., modality dropout) and random patches of the remaining modalities are masked for a reconstruction task, for self-supervised learning of robust multimodal representations against missing modalities. To this end, we name our framework M3AE. Meanwhile, we employ model inversion to optimize a representative full-modal image at marginal extra cost, which will be used to substitute for the missing modalities and boost performance during inference. Then in the second stage, a memory-efficient self distillation is proposed to distill knowledge between heterogenous missing-modal situations while fine-tuning the model for supervised segmentation. Our M3AE belongs to the \u2018catch-all\u2019 genre where a single model can be applied to all possible subsets of modalities, thus is economic for both training and deployment. Extensive experiments on BraTS 2018 and 2020 datasets demonstrate its superior performance to existing state-of-the-art methods with missing modalities, as well as the efficacy of its components. Our code is available at: https://github.com/ccarliu/m3ae.", + "primary_area": "computer vision ii", + "author": "Hong Liu; Dong Wei; Donghuan Lu; Jinghan Sun; Liansheng Wang; Yefeng Zheng", + "authorids": "", + "aff": "School of informatics, Xiamen University, Xiamen, China + Tencent Jarvis Lab, Tencent Healthcare (Shenzhen) Co., Ltd., Shenzhen, China; Tencent Jarvis Lab, Tencent Healthcare (Shenzhen) Co., Ltd., Shenzhen, China; Tencent Jarvis Lab, Tencent Healthcare (Shenzhen) Co., Ltd., Shenzhen, China; School of Medicine, Xiamen University, Xiamen, China + Tencent Jarvis Lab, Tencent Healthcare (Shenzhen) Co., Ltd., Shenzhen, China; School of informatics, Xiamen University, Xiamen, China; Tencent Jarvis Lab, Tencent Healthcare (Shenzhen) Co., Ltd., Shenzhen, China", + "bibtex": "@article{Liu_Wei_Lu_Sun_Wang_Zheng_2023, title={M3AE: Multimodal Representation Learning for Brain Tumor Segmentation with Missing Modalities}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25253}, DOI={10.1609/aaai.v37i2.25253}, abstractNote={Multimodal magnetic resonance imaging (MRI) provides complementary information for sub-region analysis of brain tumors. Plenty of methods have been proposed for automatic brain tumor segmentation using four common MRI modalities and achieved remarkable performance. In practice, however, it is common to have one or more modalities missing due to image corruption, artifacts, acquisition protocols, allergy to contrast agents, or simply cost. In this work, we propose a novel two-stage framework for brain tumor segmentation with missing modalities. In the first stage, a multimodal masked autoencoder (M3AE) is proposed, where both random modalities (i.e., modality dropout) and random patches of the remaining modalities are masked for a reconstruction task, for self-supervised learning of robust multimodal representations against missing modalities. To this end, we name our framework M3AE. Meanwhile, we employ model inversion to optimize a representative full-modal image at marginal extra cost, which will be used to substitute for the missing modalities and boost performance during inference. Then in the second stage, a memory-efficient self distillation is proposed to distill knowledge between heterogenous missing-modal situations while fine-tuning the model for supervised segmentation. Our M3AE belongs to the \u2018catch-all\u2019 genre where a single model can be applied to all possible subsets of modalities, thus is economic for both training and deployment. Extensive experiments on BraTS 2018 and 2020 datasets demonstrate its superior performance to existing state-of-the-art methods with missing modalities, as well as the efficacy of its components. Our code is available at: https://github.com/ccarliu/m3ae.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Hong and Wei, Dong and Lu, Donghuan and Sun, Jinghan and Wang, Liansheng and Zheng, Yefeng}, year={2023}, month={Jun.}, pages={1657-1665} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25253/25025", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25253", + "pdf_size": 750739, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9560749203568963623&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xmu.edu.cn;tencent.com;tencent.com;stu.xmu.edu.cn;xmu.edu.cn;tencent.com", + "email": "stu.xmu.edu.cn;tencent.com;tencent.com;stu.xmu.edu.cn;xmu.edu.cn;tencent.com", + "github": "https://github.com/ccarliu/m3ae", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;0+1;0;1", + "aff_unique_norm": "Xiamen University;Tencent Healthcare (Shenzhen) Co., Ltd.", + "aff_unique_dep": "School of informatics;Tencent Jarvis Lab", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "XMU;Tencent", + "aff_campus_unique_index": "0+1;1;1;0+1;0;1", + "aff_campus_unique": "Xiamen;Shenzhen", + "aff_country_unique_index": "0+0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25547", + "title": "MA-GCL: Model Augmentation Tricks for Graph Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning (CL), which can extract the information shared between different contrastive views, has become a popular paradigm for vision representation learning. Inspired by the success in computer vision, recent work introduces CL into graph modeling, dubbed as graph contrastive learning (GCL). However, generating contrastive views in graphs is more challenging than that in images, since we have little prior knowledge on how to significantly augment a graph without changing its labels. We argue that typical data augmentation techniques (e.g., edge dropping) in GCL cannot generate diverse enough contrastive views to filter out noises. Moreover, previous GCL methods employ two view encoders with exactly the same neural architecture and tied parameters, which further harms the diversity of augmented views. To address this limitation, we propose a novel paradigm named model augmented GCL (MA-GCL), which will focus on manipulating the architectures of view encoders instead of perturbing graph inputs. Specifically, we present three easy-to-implement model augmentation tricks for GCL, namely asymmetric, random and shuffling, which can respectively help alleviate high-frequency noises, enrich training instances and bring safer augmentations. All three tricks are compatible with typical data augmentations. Experimental results show that MA-GCL can achieve state-of-the-art performance on node classification benchmarks by applying the three tricks on a simple base model. Extensive studies also validate our motivation and the effectiveness of each trick. (Code, data and appendix are available at https://github.com/GXM1141/MA-GCL. )", + "primary_area": "data mining and knowledge management", + "author": "Xumeng Gong; Cheng Yang; Chuan Shi", + "authorids": "", + "aff": "Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications", + "bibtex": "@article{Gong_Yang_Shi_2023, title={MA-GCL: Model Augmentation Tricks for Graph Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25547}, DOI={10.1609/aaai.v37i4.25547}, abstractNote={Contrastive learning (CL), which can extract the information shared between different contrastive views, has become a popular paradigm for vision representation learning. Inspired by the success in computer vision, recent work introduces CL into graph modeling, dubbed as graph contrastive learning (GCL). However, generating contrastive views in graphs is more challenging than that in images, since we have little prior knowledge on how to significantly augment a graph without changing its labels. We argue that typical data augmentation techniques (e.g., edge dropping) in GCL cannot generate diverse enough contrastive views to filter out noises. Moreover, previous GCL methods employ two view encoders with exactly the same neural architecture and tied parameters, which further harms the diversity of augmented views. To address this limitation, we propose a novel paradigm named model augmented GCL (MA-GCL), which will focus on manipulating the architectures of view encoders instead of perturbing graph inputs. Specifically, we present three easy-to-implement model augmentation tricks for GCL, namely asymmetric, random and shuffling, which can respectively help alleviate high-frequency noises, enrich training instances and bring safer augmentations. All three tricks are compatible with typical data augmentations. Experimental results show that MA-GCL can achieve state-of-the-art performance on node classification benchmarks by applying the three tricks on a simple base model. Extensive studies also validate our motivation and the effectiveness of each trick. (Code, data and appendix are available at https://github.com/GXM1141/MA-GCL. )}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gong, Xumeng and Yang, Cheng and Shi, Chuan}, year={2023}, month={Jun.}, pages={4284-4292} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25547/25319", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25547", + "pdf_size": 459980, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15114598059680092544&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "https://github.com/GXM1141/MA-GCL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25311", + "title": "MAGIC: Mask-Guided Image Synthesis by Inverting a Quasi-robust Classifier", + "track": "main", + "status": "Technical", + "abstract": "We offer a method for one-shot mask-guided image synthesis that allows controlling manipulations of a single image by inverting a quasi-robust classifier equipped with strong regularizers. Our proposed method, entitled MAGIC, leverages structured gradients from a pre-trained quasi-robust classifier to better preserve the input semantics while preserving its classification accuracy, thereby guaranteeing credibility in the synthesis.\nUnlike current methods that use complex primitives to supervise the process or use attention maps as a weak supervisory signal, MAGIC aggregates gradients over the input, driven by a guide binary mask that enforces a strong, spatial prior. MAGIC implements a series of manipulations with a single framework achieving shape and location control, intense non-rigid shape deformations, and copy/move operations in the presence of repeating objects and gives users firm control over the synthesis by requiring to simply specify binary guide masks. \nOur study and findings are supported by various qualitative comparisons with the state-of-the-art on the same images sampled from ImageNet and quantitative analysis using machine perception along with a user survey of 100+ participants that endorse our synthesis quality.", + "primary_area": "computer vision ii", + "author": "Mozhdeh Rouhsedaghat; Masoud Monajatipoor; C.-C. Jay Kuo; Iacopo Masi", + "authorids": "", + "aff": "University of Southern California (USC); University of California, Los Angeles (UCLA); University of Southern California (USC); Sapienza, University of Rome", + "bibtex": "@article{Rouhsedaghat_Monajatipoor_Kuo_Masi_2023, title={MAGIC: Mask-Guided Image Synthesis by Inverting a Quasi-robust Classifier}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25311}, DOI={10.1609/aaai.v37i2.25311}, abstractNote={We offer a method for one-shot mask-guided image synthesis that allows controlling manipulations of a single image by inverting a quasi-robust classifier equipped with strong regularizers. Our proposed method, entitled MAGIC, leverages structured gradients from a pre-trained quasi-robust classifier to better preserve the input semantics while preserving its classification accuracy, thereby guaranteeing credibility in the synthesis.\nUnlike current methods that use complex primitives to supervise the process or use attention maps as a weak supervisory signal, MAGIC aggregates gradients over the input, driven by a guide binary mask that enforces a strong, spatial prior. MAGIC implements a series of manipulations with a single framework achieving shape and location control, intense non-rigid shape deformations, and copy/move operations in the presence of repeating objects and gives users firm control over the synthesis by requiring to simply specify binary guide masks. Our study and findings are supported by various qualitative comparisons with the state-of-the-art on the same images sampled from ImageNet and quantitative analysis using machine perception along with a user survey of 100+ participants that endorse our synthesis quality.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rouhsedaghat, Mozhdeh and Monajatipoor, Masoud and Kuo, C.-C. Jay and Masi, Iacopo}, year={2023}, month={Jun.}, pages={2172-2179} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25311/25083", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25311", + "pdf_size": 2550490, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6599373862787972709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "usc.edu;ucla.edu;sipi.usc.edu;di.uniroma1.it", + "email": "usc.edu;ucla.edu;sipi.usc.edu;di.uniroma1.it", + "github": "https://github.com/mozhdehrouhsedaghat/magic", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "University of Southern California;University of California, Los Angeles;University of Rome", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usc.edu;https://www.ucla.edu;https://www.uniroma1.it", + "aff_unique_abbr": "USC;UCLA;Sapienza", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Los Angeles;Rome", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;Italy" + }, + { + "id": "article-25787", + "title": "MAPS-KB: A Million-Scale Probabilistic Simile Knowledge Base", + "track": "main", + "status": "Technical", + "abstract": "The ability to understand and generate similes is an imperative step to realize human-level AI. However, there is still a considerable gap between machine intelligence and human cognition in similes, since deep models based on statistical distribution tend to favour high-frequency similes. Hence, a large-scale symbolic knowledge base of similes is required, as it contributes to the modeling of diverse yet unpopular similes while facilitating additional evaluation and reasoning. To bridge the gap, we propose a novel framework for large-scale simile knowledge base construction, as well as two probabilistic metrics which enable an improved understanding of simile phenomena in natural language. Overall, we construct MAPS-KB, a million-scale probabilistic simile knowledge base, covering 4.3 million triplets over 0.4 million terms from 70 GB corpora. We conduct sufficient experiments to justify the effectiveness and necessity of the methods of our framework. We also apply MAPS-KB on three downstream tasks to achieve state-of-the-art performance, further demonstrating the value of MAPS-KB. Resources of MAPS-KB are publicly available at https://github.com/Abbey4799/MAPS-KB.", + "primary_area": "knowledge representation and reasoning", + "author": "Qianyu He; Xintao Wang; Jiaqing Liang; Yanghua Xiao", + "authorids": "", + "aff": "Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University; School of Data Science, Fudan University; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University+Fudan-Aishu Cognitive Intelligence Joint Research Center, Shanghai, China", + "bibtex": "@article{He_Wang_Liang_Xiao_2023, title={MAPS-KB: A Million-Scale Probabilistic Simile Knowledge Base}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25787}, DOI={10.1609/aaai.v37i5.25787}, abstractNote={The ability to understand and generate similes is an imperative step to realize human-level AI. However, there is still a considerable gap between machine intelligence and human cognition in similes, since deep models based on statistical distribution tend to favour high-frequency similes. Hence, a large-scale symbolic knowledge base of similes is required, as it contributes to the modeling of diverse yet unpopular similes while facilitating additional evaluation and reasoning. To bridge the gap, we propose a novel framework for large-scale simile knowledge base construction, as well as two probabilistic metrics which enable an improved understanding of simile phenomena in natural language. Overall, we construct MAPS-KB, a million-scale probabilistic simile knowledge base, covering 4.3 million triplets over 0.4 million terms from 70 GB corpora. We conduct sufficient experiments to justify the effectiveness and necessity of the methods of our framework. We also apply MAPS-KB on three downstream tasks to achieve state-of-the-art performance, further demonstrating the value of MAPS-KB. Resources of MAPS-KB are publicly available at https://github.com/Abbey4799/MAPS-KB.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Qianyu and Wang, Xintao and Liang, Jiaqing and Xiao, Yanghua}, year={2023}, month={Jun.}, pages={6398-6406} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25787/25559", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25787", + "pdf_size": 369137, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1981103340140126920&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "m.fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "https://github.com/Abbey4799/MAPS-KB", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "0;0;0+0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27076", + "title": "MARCOL: A Maritime Collision Avoidance Decision-Making Testbed", + "track": "demonstrations", + "status": "Technical", + "abstract": "Safe and efficient maritime navigation is fundamental for autonomous surface vehicles to support many applications in the blue economy, including cargo transportation that covers 90% of the global marine industry. We developed MARCOL, a collision avoidance decision-making framework that provides safe, efficient, and explainable collision avoidance strategies and that allows for repeated experiments under diverse high-traffic scenarios.", + "primary_area": "", + "author": "Mingi Jeong; Alberto Quattrini Li", + "authorids": "", + "aff": "Department of Computer Science, Dartmouth College, USA; Department of Computer Science, Dartmouth College, USA", + "bibtex": "@article{Jeong_Quattrini Li_2024, title={MARCOL: A Maritime Collision Avoidance Decision-Making Testbed}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27076}, DOI={10.1609/aaai.v37i13.27076}, abstractNote={Safe and efficient maritime navigation is fundamental for autonomous surface vehicles to support many applications in the blue economy, including cargo transportation that covers 90% of the global marine industry. We developed MARCOL, a collision avoidance decision-making framework that provides safe, efficient, and explainable collision avoidance strategies and that allows for repeated experiments under diverse high-traffic scenarios.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jeong, Mingi and Quattrini Li, Alberto}, year={2024}, month={Jul.}, pages={16452-16454} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27076/26848", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27076", + "pdf_size": 2435796, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8089668945364740006&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "dartmouth.edu;dartmouth.edu", + "email": "dartmouth.edu;dartmouth.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Dartmouth College", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://dartmouth.edu", + "aff_unique_abbr": "Dartmouth", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26640", + "title": "MCL: Multi-Granularity Contrastive Learning Framework for Chinese NER", + "track": "main", + "status": "Technical", + "abstract": "Recently, researchers have applied the word-character lattice framework to integrated word information, which has become very popular for Chinese named entity recognition (NER). However, prior approaches fuse word information by different variants of encoders such as Lattice LSTM or Flat-Lattice Transformer, but are still not data-efficient indeed to fully grasp the depth interaction of cross-granularity and important word information from the lexicon. In this paper, we go beyond the typical lattice structure and propose a novel Multi-Granularity Contrastive Learning framework (MCL), that aims to optimize the inter-granularity distribution distance and emphasize the critical matched words in the lexicon. By carefully combining cross-granularity contrastive learning and bi-granularity contrastive learning, the network can explicitly leverage lexicon information on the initial lattice structure, and further provide more dense interactions of across-granularity, thus significantly improving model performance. Experiments on four Chinese NER datasets show that MCL obtains state-of-the-art results while considering model efficiency. The source code of the proposed method is publicly available at https://github.com/zs50910/MCL", + "primary_area": "speech natural language processing", + "author": "Shan Zhao; ChengYu Wang; Minghao Hu; Tianwei Yan; Meng Wang", + "authorids": "", + "aff": "School of Computer Science and Information Engineering, HeFei University of Technology, HeFei, China; School of Computer Science and Information Engineering, HeFei University of Technology, HeFei, China + College of Computer, National University of Defense Technology, Changsha, China; Information Research Center of Military Science, PLA Academy of Military Science, Beijing, China; College of Computer, National University of Defense Technology, Changsha, China; School of Computer Science and Information Engineering, HeFei University of Technology, HeFei, China", + "bibtex": "@article{Zhao_Wang_Hu_Yan_Wang_2023, title={MCL: Multi-Granularity Contrastive Learning Framework for Chinese NER}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26640}, DOI={10.1609/aaai.v37i11.26640}, abstractNote={Recently, researchers have applied the word-character lattice framework to integrated word information, which has become very popular for Chinese named entity recognition (NER). However, prior approaches fuse word information by different variants of encoders such as Lattice LSTM or Flat-Lattice Transformer, but are still not data-efficient indeed to fully grasp the depth interaction of cross-granularity and important word information from the lexicon. In this paper, we go beyond the typical lattice structure and propose a novel Multi-Granularity Contrastive Learning framework (MCL), that aims to optimize the inter-granularity distribution distance and emphasize the critical matched words in the lexicon. By carefully combining cross-granularity contrastive learning and bi-granularity contrastive learning, the network can explicitly leverage lexicon information on the initial lattice structure, and further provide more dense interactions of across-granularity, thus significantly improving model performance. Experiments on four Chinese NER datasets show that MCL obtains state-of-the-art results while considering model efficiency. The source code of the proposed method is publicly available at https://github.com/zs50910/MCL}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Shan and Wang, ChengYu and Hu, Minghao and Yan, Tianwei and Wang, Meng}, year={2023}, month={Jun.}, pages={14011-14019} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26640/26412", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26640", + "pdf_size": 724511, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3711075895527377755&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "hfut.edu.cn;gmail.com;gmail.com;gmail.com;gmail.com", + "email": "hfut.edu.cn;gmail.com;gmail.com;gmail.com;gmail.com", + "github": "https://github.com/zs50910/MCL", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;1;0", + "aff_unique_norm": "HeFei University of Technology;National University of Defense Technology;PLA Academy of Military Science", + "aff_unique_dep": "School of Computer Science and Information Engineering;College of Computer;Information Research Center of Military Science", + "aff_unique_url": "http://www.hfut.edu.cn;http://www.nudt.edu.cn;", + "aff_unique_abbr": "HFUT;NUDT;", + "aff_campus_unique_index": "0;0+1;2;1;0", + "aff_campus_unique": "HeFei;Changsha;Beijing", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25639", + "title": "MDM: Molecular Diffusion Model for 3D Molecule Generation", + "track": "main", + "status": "Technical", + "abstract": "Molecule generation, especially generating 3D molecular geometries from scratch (i.e., 3D de novo generation), has become a fundamental task in drug design. Existing diffusion based 3D molecule generation methods could suffer from unsatisfactory performances, especially when generating large molecules. At the same time, the generated molecules lack enough diversity. This paper proposes a novel diffusion model to address those two challenges. \n\nFirst, interatomic relations are not included in molecules' 3D point cloud representations. Thus, it is difficult for existing generative models to capture the potential interatomic forces and abundant local constraints. \nTo tackle this challenge, we propose to augment the potential interatomic forces and further involve dual equivariant encoders to encode interatomic forces of different strengths.\nSecond, existing diffusion-based models essentially shift elements in geometry along the gradient of data density. Such a process lacks enough exploration in the intermediate steps of the Langevin dynamics. To address this issue, we introduce a distributional controlling variable in each diffusion/reverse step to enforce thorough explorations and further improve generation diversity.\n\nExtensive experiments on multiple benchmarks demonstrate that the proposed model significantly outperforms existing methods for both unconditional and conditional generation tasks. We also conduct case studies to help understand the physicochemical properties of the generated molecules. The codes are available at https://github.com/tencent-ailab/MDM.", + "primary_area": "domain s of application", + "author": "Lei Huang; Hengtong Zhang; Tingyang Xu; Ka-Chun Wong", + "authorids": "", + "aff": "City University of Hong Kong+Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; City University of Hong Kong+Tencent AI Lab", + "bibtex": "@article{Huang_Zhang_Xu_Wong_2023, title={MDM: Molecular Diffusion Model for 3D Molecule Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25639}, DOI={10.1609/aaai.v37i4.25639}, abstractNote={Molecule generation, especially generating 3D molecular geometries from scratch (i.e., 3D de novo generation), has become a fundamental task in drug design. Existing diffusion based 3D molecule generation methods could suffer from unsatisfactory performances, especially when generating large molecules. At the same time, the generated molecules lack enough diversity. This paper proposes a novel diffusion model to address those two challenges. First, interatomic relations are not included in molecules\u2019 3D point cloud representations. Thus, it is difficult for existing generative models to capture the potential interatomic forces and abundant local constraints. To tackle this challenge, we propose to augment the potential interatomic forces and further involve dual equivariant encoders to encode interatomic forces of different strengths.\nSecond, existing diffusion-based models essentially shift elements in geometry along the gradient of data density. Such a process lacks enough exploration in the intermediate steps of the Langevin dynamics. To address this issue, we introduce a distributional controlling variable in each diffusion/reverse step to enforce thorough explorations and further improve generation diversity. Extensive experiments on multiple benchmarks demonstrate that the proposed model significantly outperforms existing methods for both unconditional and conditional generation tasks. We also conduct case studies to help understand the physicochemical properties of the generated molecules. The codes are available at https://github.com/tencent-ailab/MDM.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Lei and Zhang, Hengtong and Xu, Tingyang and Wong, Ka-Chun}, year={2023}, month={Jun.}, pages={5105-5112} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25639/25411", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25639", + "pdf_size": 518934, + "gs_citation": 89, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10369999193371475105&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 8, + "aff_domain": "my.cityu.edu.hk;gmail.com;tencent.com;cityu.edu.hk", + "email": "my.cityu.edu.hk;gmail.com;tencent.com;cityu.edu.hk", + "github": "https://github.com/tencent-ailab/MDM", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0+1", + "aff_unique_norm": "City University of Hong Kong;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.cityu.edu.hk;https://ai.tencent.com", + "aff_unique_abbr": "CityU;Tencent AI Lab", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25230", + "title": "MEID: Mixture-of-Experts with Internal Distillation for Long-Tailed Video Recognition", + "track": "main", + "status": "Technical", + "abstract": "The long-tailed video recognition problem is especially challenging, as videos tend to be long and untrimmed, and each video may contain multiple classes, causing frame-level class imbalance. The previous method tackles the long-tailed video recognition only through frame-level sampling for class re-balance without distinguishing the frame-level feature representation between head and tail classes. To improve the frame-level feature representation of tail classes, we modulate the frame-level features with an auxiliary distillation loss to reduce the distribution distance between head and tail classes. Moreover, we design a mixture-of-experts framework with two different expert designs, i.e., the first expert with an attention-based classification network handling the original long-tailed distribution, and the second expert dealing with the re-balanced distribution from class-balanced sampling. Notably, in the second expert, we specifically focus on the frames unsolved by the first expert through designing a complementary frame selection module, which inherits the attention weights from the first expert and selects frames with low attention weights, and we also enhance the motion feature representation for these selected frames. To highlight the multi-label challenge in long-tailed video recognition, we create two additional benchmarks based on Charades and CharadesEgo videos with the multi-label property, called CharadesLT and CharadesEgoLT. Extensive experiments are conducted on the existing long-tailed video benchmark VideoLT and the two new benchmarks to verify the effectiveness of our proposed method with state-of-the-art performance. The code and proposed benchmarks are released at https://github.com/VisionLanguageLab/MEID.", + "primary_area": "computer vision ii", + "author": "Xinjie Li; Huijuan Xu", + "authorids": "", + "aff": "Pennsylvania State University, University Park, USA; Pennsylvania State University, University Park, USA", + "bibtex": "@article{Li_Xu_2023, title={MEID: Mixture-of-Experts with Internal Distillation for Long-Tailed Video Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25230}, DOI={10.1609/aaai.v37i2.25230}, abstractNote={The long-tailed video recognition problem is especially challenging, as videos tend to be long and untrimmed, and each video may contain multiple classes, causing frame-level class imbalance. The previous method tackles the long-tailed video recognition only through frame-level sampling for class re-balance without distinguishing the frame-level feature representation between head and tail classes. To improve the frame-level feature representation of tail classes, we modulate the frame-level features with an auxiliary distillation loss to reduce the distribution distance between head and tail classes. Moreover, we design a mixture-of-experts framework with two different expert designs, i.e., the first expert with an attention-based classification network handling the original long-tailed distribution, and the second expert dealing with the re-balanced distribution from class-balanced sampling. Notably, in the second expert, we specifically focus on the frames unsolved by the first expert through designing a complementary frame selection module, which inherits the attention weights from the first expert and selects frames with low attention weights, and we also enhance the motion feature representation for these selected frames. To highlight the multi-label challenge in long-tailed video recognition, we create two additional benchmarks based on Charades and CharadesEgo videos with the multi-label property, called CharadesLT and CharadesEgoLT. Extensive experiments are conducted on the existing long-tailed video benchmark VideoLT and the two new benchmarks to verify the effectiveness of our proposed method with state-of-the-art performance. The code and proposed benchmarks are released at https://github.com/VisionLanguageLab/MEID.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xinjie and Xu, Huijuan}, year={2023}, month={Jun.}, pages={1451-1459} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25230/25002", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25230", + "pdf_size": 335301, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14653168191211953382&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "psu.edu;psu.edu", + "email": "psu.edu;psu.edu", + "github": "https://github.com/VisionLanguageLab/MEID", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Pennsylvania State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.psu.edu", + "aff_unique_abbr": "PSU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "University Park", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25112", + "title": "MGFN: Magnitude-Contrastive Glance-and-Focus Network for Weakly-Supervised Video Anomaly Detection", + "track": "main", + "status": "Technical", + "abstract": "Weakly supervised detection of anomalies in surveillance videos is a challenging task. Going beyond existing works that have deficient capabilities to localize anomalies in long videos, we propose a novel glance and focus network to effectively integrate spatial-temporal information for accurate anomaly detection. In addition, we empirically found that existing approaches that use feature magnitudes to represent the degree of anomalies typically ignore the effects of scene variations, and hence result in sub-optimal performance due to the inconsistency of feature magnitudes across scenes. To address this issue, we propose the Feature Amplification Mechanism and a Magnitude Contrastive Loss to enhance the discriminativeness of feature magnitudes for detecting anomalies. Experimental results on two large-scale benchmarks UCF-Crime and XD-Violence manifest that our method outperforms state-of-the-art approaches.", + "primary_area": "computer vision i", + "author": "Yingxian Chen; Zhengzhe Liu; Baoheng Zhang; Wilton Fok; Xiaojuan Qi; Yik-Chung Wu", + "authorids": "", + "aff": "Department of Electrical and Electronic Engineering, The University of Hong Kong; The Chinese University of Hong Kong; Department of Electrical and Electronic Engineering, The University of Hong Kong; Department of Electrical and Electronic Engineering, The University of Hong Kong; Department of Electrical and Electronic Engineering, The University of Hong Kong; Department of Electrical and Electronic Engineering, The University of Hong Kong", + "bibtex": "@article{Chen_Liu_Zhang_Fok_Qi_Wu_2023, title={MGFN: Magnitude-Contrastive Glance-and-Focus Network for Weakly-Supervised Video Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25112}, DOI={10.1609/aaai.v37i1.25112}, abstractNote={Weakly supervised detection of anomalies in surveillance videos is a challenging task. Going beyond existing works that have deficient capabilities to localize anomalies in long videos, we propose a novel glance and focus network to effectively integrate spatial-temporal information for accurate anomaly detection. In addition, we empirically found that existing approaches that use feature magnitudes to represent the degree of anomalies typically ignore the effects of scene variations, and hence result in sub-optimal performance due to the inconsistency of feature magnitudes across scenes. To address this issue, we propose the Feature Amplification Mechanism and a Magnitude Contrastive Loss to enhance the discriminativeness of feature magnitudes for detecting anomalies. Experimental results on two large-scale benchmarks UCF-Crime and XD-Violence manifest that our method outperforms state-of-the-art approaches.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Yingxian and Liu, Zhengzhe and Zhang, Baoheng and Fok, Wilton and Qi, Xiaojuan and Wu, Yik-Chung}, year={2023}, month={Jun.}, pages={387-395} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25112/24884", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25112", + "pdf_size": 1469982, + "gs_citation": 134, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2759024596595887775&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "hku.hk;cse.cuhk.edu.hk;hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk", + "email": "hku.hk;cse.cuhk.edu.hk;hku.hk;eee.hku.hk;eee.hku.hk;eee.hku.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "The University of Hong Kong;The Chinese University of Hong Kong", + "aff_unique_dep": "Department of Electrical and Electronic Engineering;", + "aff_unique_url": "https://www.hku.hk;https://www.cuhk.edu.hk", + "aff_unique_abbr": "HKU;CUHK", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26995", + "title": "MGIA: Mutual Gradient Inversion Attack in Multi-Modal Federated Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Recent studies have demonstrated that local training data in Federated Learning can be recovered from gradients, which are called gradient inversion attacks. These attacks display powerful effects on either computer vision or natural language processing tasks. As it is known that there are certain correlations between multi-modality data, we argue that the threat of such attacks combined with Multi-modal Learning may cause more severe effects. Different modalities may communicate through gradients to provide richer information for the attackers, thus improving the strength and efficiency of the gradient inversion attacks. In this paper, we propose the Mutual Gradient Inversion Attack (MGIA), by utilizing the shared labels between image and text modalities combined with the idea of knowledge distillation. Our experimental results show that MGIA achieves the best quality of both modality data and label recoveries in comparison with other methods. In the meanwhile, MGIA verifies that multi-modality gradient inversion attacks are more likely to disclose private information than the existing single-modality attacks.", + "primary_area": "", + "author": "Xuan Liu; Siqi Cai; Lin Li; Rui Zhang; Song Guo", + "authorids": "", + "aff": "The Hong Kong Polytechnic University; School of Computer Science and Artificial Intelligence, Wuhan University of Technology; School of Computer Science and Artificial Intelligence, Wuhan University of Technology; The Hong Kong Polytechnic University; The Hong Kong Polytechnic University", + "bibtex": "@article{Liu_Cai_Li_Zhang_Guo_2024, title={MGIA: Mutual Gradient Inversion Attack in Multi-Modal Federated Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26995}, DOI={10.1609/aaai.v37i13.26995}, abstractNote={Recent studies have demonstrated that local training data in Federated Learning can be recovered from gradients, which are called gradient inversion attacks. These attacks display powerful effects on either computer vision or natural language processing tasks. As it is known that there are certain correlations between multi-modality data, we argue that the threat of such attacks combined with Multi-modal Learning may cause more severe effects. Different modalities may communicate through gradients to provide richer information for the attackers, thus improving the strength and efficiency of the gradient inversion attacks. In this paper, we propose the Mutual Gradient Inversion Attack (MGIA), by utilizing the shared labels between image and text modalities combined with the idea of knowledge distillation. Our experimental results show that MGIA achieves the best quality of both modality data and label recoveries in comparison with other methods. In the meanwhile, MGIA verifies that multi-modality gradient inversion attacks are more likely to disclose private information than the existing single-modality attacks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xuan and Cai, Siqi and Li, Lin and Zhang, Rui and Guo, Song}, year={2024}, month={Jul.}, pages={16270-16271} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26995/26767", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26995", + "pdf_size": 1623440, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=27635622207801692&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "connect.polyu.hk;whut.edu.cn;whut.edu.cn;comp.polyu.hk;polyu.hk", + "email": "connect.polyu.hk;whut.edu.cn;whut.edu.cn;comp.polyu.hk;polyu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;0", + "aff_unique_norm": "The Hong Kong Polytechnic University;Wuhan University of Technology", + "aff_unique_dep": ";School of Computer Science and Artificial Intelligence", + "aff_unique_url": "https://www.polyu.edu.hk;http://www.wut.edu.cn", + "aff_unique_abbr": "PolyU;WUT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Wuhan", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25200", + "title": "MGTANet: Encoding Sequential LiDAR Points Using Long Short-Term Motion-Guided Temporal Attention for 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Most scanning LiDAR sensors generate a sequence of point clouds in real-time. While conventional 3D object detectors use a set of unordered LiDAR points acquired over a fixed time interval, recent studies have revealed that substantial performance improvement can be achieved by exploiting the spatio-temporal context present in a sequence of LiDAR point sets. In this paper, we propose a novel 3D object detection architecture, which can encode LiDAR point cloud sequences acquired by multiple successive scans. The encoding process of the point cloud sequence is performed on two different time scales. We first design a short-term motion-aware voxel encoding that captures the short-term temporal changes of point clouds driven by the motion of objects in each voxel. We also propose long-term motion-guided bird\u2019s eye view (BEV) feature enhancement that adaptively aligns and aggregates the BEV feature maps obtained by the short-term voxel encoding by utilizing the dynamic motion context inferred from the sequence of the feature maps. The experiments conducted on the public nuScenes benchmark demonstrate that the proposed 3D object detector offers significant improvements in performance compared to the baseline methods and that it sets a state-of-the-art performance for certain 3D object detection categories. Code is available at https://github.com/HYjhkoh/MGTANet.git.", + "primary_area": "computer vision i", + "author": "Junho Koh; Junhyung Lee; Youngwoo Lee; Jaekyum Kim; Jun Won Choi", + "authorids": "", + "aff": "Hanyang University; Hanyang University; Hanyang University; Hanyang University; Hanyang University", + "bibtex": "@article{Koh_Lee_Lee_Kim_Choi_2023, title={MGTANet: Encoding Sequential LiDAR Points Using Long Short-Term Motion-Guided Temporal Attention for 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25200}, DOI={10.1609/aaai.v37i1.25200}, abstractNote={Most scanning LiDAR sensors generate a sequence of point clouds in real-time. While conventional 3D object detectors use a set of unordered LiDAR points acquired over a fixed time interval, recent studies have revealed that substantial performance improvement can be achieved by exploiting the spatio-temporal context present in a sequence of LiDAR point sets. In this paper, we propose a novel 3D object detection architecture, which can encode LiDAR point cloud sequences acquired by multiple successive scans. The encoding process of the point cloud sequence is performed on two different time scales. We first design a short-term motion-aware voxel encoding that captures the short-term temporal changes of point clouds driven by the motion of objects in each voxel. We also propose long-term motion-guided bird\u2019s eye view (BEV) feature enhancement that adaptively aligns and aggregates the BEV feature maps obtained by the short-term voxel encoding by utilizing the dynamic motion context inferred from the sequence of the feature maps. The experiments conducted on the public nuScenes benchmark demonstrate that the proposed 3D object detector offers significant improvements in performance compared to the baseline methods and that it sets a state-of-the-art performance for certain 3D object detection categories. Code is available at https://github.com/HYjhkoh/MGTANet.git.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Koh, Junho and Lee, Junhyung and Lee, Youngwoo and Kim, Jaekyum and Choi, Jun Won}, year={2023}, month={Jun.}, pages={1179-1187} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25200/24972", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25200", + "pdf_size": 664975, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13339759640978480618&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "spa.hanyang.ac.kr;spa.hanyang.ac.kr;spa.hanyang.ac.kr;spa.hanyang.ac.kr;hanyang.ac.kr", + "email": "spa.hanyang.ac.kr;spa.hanyang.ac.kr;spa.hanyang.ac.kr;spa.hanyang.ac.kr;hanyang.ac.kr", + "github": "https://github.com/HYjhkoh/MGTANet.git", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Hanyang University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.hanyang.ac.kr", + "aff_unique_abbr": "HYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25638", + "title": "MGTCF: Multi-Generator Tropical Cyclone Forecasting with Heterogeneous Meteorological Data", + "track": "main", + "status": "Technical", + "abstract": "Accurate forecasting of tropical cyclone (TC) plays a critical role in the prevention and defense of TC disasters. We must explore a more accurate method for TC prediction. Deep learning methods are increasingly being implemented to make TC prediction more accurate. However, most existing methods lack a generic framework for adapting heterogeneous meteorological data and do not focus on the importance of the environment. Therefore, we propose a Multi-Generator Tropical Cyclone Forecasting model (MGTCF), a generic, extensible, multi-modal TC prediction model with the key modules of Generator Chooser Network (GC-Net) and Environment Net (Env-Net). The proposed method can utilize heterogeneous meteorologic data efficiently and mine environmental factors. In addition, the Multi-generator with Generator Chooser Net is proposed to tackle the drawbacks of single-generator TC prediction methods: the prediction of undesired out-of-distribution samples and the problems stemming from insufficient learning ability. To prove the effectiveness of MGTCF, we conduct extensive experiments on the China Meteorological Administration Tropical Cyclone Best Track Dataset. MGTCF obtains better performance compared with other deep learning methods and outperforms the official prediction method of the China Central Meteorological Observatory in most indexes.", + "primary_area": "domain s of application", + "author": "Cheng Huang; Cong Bai; Sixian Chan; Jinglin Zhang; YuQuan Wu", + "authorids": "", + "aff": "College of Computer Science, Zhejiang University of Technology; College of Computer Science, Zhejiang University of Technology + Key Laboratory of Visual Media Intelligent Processing Technology of Zhejiang Province; KLME, CIC-FEMD, Nanjing University of Information Science & Technology; School of Control Science and Engineering, Shangdong University; Institute of Software Chinese Academy of Sciences", + "bibtex": "@article{Huang_Bai_Chan_Zhang_Wu_2023, title={MGTCF: Multi-Generator Tropical Cyclone Forecasting with Heterogeneous Meteorological Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25638}, DOI={10.1609/aaai.v37i4.25638}, abstractNote={Accurate forecasting of tropical cyclone (TC) plays a critical role in the prevention and defense of TC disasters. We must explore a more accurate method for TC prediction. Deep learning methods are increasingly being implemented to make TC prediction more accurate. However, most existing methods lack a generic framework for adapting heterogeneous meteorological data and do not focus on the importance of the environment. Therefore, we propose a Multi-Generator Tropical Cyclone Forecasting model (MGTCF), a generic, extensible, multi-modal TC prediction model with the key modules of Generator Chooser Network (GC-Net) and Environment Net (Env-Net). The proposed method can utilize heterogeneous meteorologic data efficiently and mine environmental factors. In addition, the Multi-generator with Generator Chooser Net is proposed to tackle the drawbacks of single-generator TC prediction methods: the prediction of undesired out-of-distribution samples and the problems stemming from insufficient learning ability. To prove the effectiveness of MGTCF, we conduct extensive experiments on the China Meteorological Administration Tropical Cyclone Best Track Dataset. MGTCF obtains better performance compared with other deep learning methods and outperforms the official prediction method of the China Central Meteorological Observatory in most indexes.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Cheng and Bai, Cong and Chan, Sixian and Zhang, Jinglin and Wu, YuQuan}, year={2023}, month={Jun.}, pages={5096-5104} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25638/25410", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25638", + "pdf_size": 12944404, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4779476054002366760&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zjut.edu.cn;zjut.edu.cn;zjut.edu.cn;sdu.edu.cn;iscas.ac.cn", + "email": "zjut.edu.cn;zjut.edu.cn;zjut.edu.cn;sdu.edu.cn;iscas.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;3;4", + "aff_unique_norm": "Zhejiang University of Technology;Zhejiang Province Key Laboratory of Visual Media Intelligent Processing Technology;Nanjing University of Information Science & Technology;Shandong University;Chinese Academy of Sciences", + "aff_unique_dep": "College of Computer Science;Key Laboratory of Visual Media Intelligent Processing Technology;;School of Control Science and Engineering;Institute of Software", + "aff_unique_url": "https://www.zjut.edu.cn;;http://www.nuist.edu.cn;http://www.sdu.edu.cn;http://www.ios.ac.cn", + "aff_unique_abbr": ";;NUIST;;CAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26098", + "title": "MHCCL: Masked Hierarchical Cluster-Wise Contrastive Learning for Multivariate Time Series", + "track": "main", + "status": "Technical", + "abstract": "Learning semantic-rich representations from raw unlabeled time series data is critical for downstream tasks such as classification and forecasting. Contrastive learning has recently shown its promising representation learning capability in the absence of expert annotations. However, existing contrastive approaches generally treat each instance independently, which leads to false negative pairs that share the same semantics. To tackle this problem, we propose MHCCL, a Masked Hierarchical Cluster-wise Contrastive Learning model, which exploits semantic information obtained from the hierarchical structure consisting of multiple latent partitions for multivariate time series. Motivated by the observation that fine-grained clustering preserves higher purity while coarse-grained one reflects higher-level semantics, we propose a novel downward masking strategy to filter out fake negatives and supplement positives by incorporating the multi-granularity information from the clustering hierarchy. In addition, a novel upward masking strategy is designed in MHCCL to remove outliers of clusters at each partition to refine prototypes, which helps speed up the hierarchical clustering process and improves the clustering quality. We conduct experimental evaluations on seven widely-used multivariate time series datasets. The results demonstrate the superiority of MHCCL over the state-of-the-art approaches for unsupervised time series representation learning.", + "primary_area": "machine learning iii", + "author": "Qianwen Meng; Hangwei Qian; Yong Liu; Lizhen Cui; Yonghui Xu; Zhiqi Shen", + "authorids": "", + "aff": "School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; Lund University, Sweden; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Meng_Qian_Liu_Cui_Xu_Shen_2023, title={MHCCL: Masked Hierarchical Cluster-Wise Contrastive Learning for Multivariate Time Series}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26098}, DOI={10.1609/aaai.v37i8.26098}, abstractNote={Learning semantic-rich representations from raw unlabeled time series data is critical for downstream tasks such as classification and forecasting. Contrastive learning has recently shown its promising representation learning capability in the absence of expert annotations. However, existing contrastive approaches generally treat each instance independently, which leads to false negative pairs that share the same semantics. To tackle this problem, we propose MHCCL, a Masked Hierarchical Cluster-wise Contrastive Learning model, which exploits semantic information obtained from the hierarchical structure consisting of multiple latent partitions for multivariate time series. Motivated by the observation that fine-grained clustering preserves higher purity while coarse-grained one reflects higher-level semantics, we propose a novel downward masking strategy to filter out fake negatives and supplement positives by incorporating the multi-granularity information from the clustering hierarchy. In addition, a novel upward masking strategy is designed in MHCCL to remove outliers of clusters at each partition to refine prototypes, which helps speed up the hierarchical clustering process and improves the clustering quality. We conduct experimental evaluations on seven widely-used multivariate time series datasets. The results demonstrate the superiority of MHCCL over the state-of-the-art approaches for unsupervised time series representation learning.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Meng, Qianwen and Qian, Hangwei and Liu, Yong and Cui, Lizhen and Xu, Yonghui and Shen, Zhiqi}, year={2023}, month={Jun.}, pages={9153-9161} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26098/25870", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26098", + "pdf_size": 300426, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10499394419778035299&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.sdu.edu.cn;math.lth.se;ntu.edu.sg;sdu.edu.cn;sdu.edu.cn;ntu.edu.sg", + "email": "mail.sdu.edu.cn;math.lth.se;ntu.edu.sg;sdu.edu.cn;sdu.edu.cn;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1;2;0+0;0+0;2", + "aff_unique_norm": "Shandong University;Lund University;Nanyang Technological University", + "aff_unique_dep": "School of Software;;School of Computer Science and Engineering", + "aff_unique_url": "http://www.sdu.edu.cn;https://www.lunduniversity.lu.se;https://www.ntu.edu.sg", + "aff_unique_abbr": ";LU;NTU", + "aff_campus_unique_index": "0+0;2;0+0;0+0;2", + "aff_campus_unique": "Jinan;;Singapore", + "aff_country_unique_index": "0+0;1;2;0+0;0+0;2", + "aff_country_unique": "China;Sweden;Singapore" + }, + { + "id": "article-25313", + "title": "MIDMs: Matching Interleaved Diffusion Models for Exemplar-Based Image Translation", + "track": "main", + "status": "Technical", + "abstract": "We present a novel method for exemplar-based image translation, called matching interleaved diffusion models (MIDMs). Most existing methods for this task were formulated as GAN-based matching-then-generation framework. However, in this framework, matching errors induced by the difficulty of semantic matching across cross-domain, e.g., sketch and photo, can be easily propagated to the generation step, which in turn leads to the degenerated results. Motivated by the recent success of diffusion models, overcoming the shortcomings of GANs, we incorporate the diffusion models to overcome these limitations. Specifically, we formulate a diffusion-based matching-and-generation framework that interleaves cross-domain matching and diffusion steps in the latent space by iteratively feeding the intermediate warp into the noising process and denoising it to generate a translated image. In addition, to improve the reliability of diffusion process, we design confidence-aware process using cycle-consistency to consider only confident regions during translation. Experimental results show that our MIDMs generate more plausible images than state-of-the-art methods.", + "primary_area": "computer vision ii", + "author": "Junyoung Seo; Gyuseong Lee; Seokju Cho; Jiyoung Lee; Seungryong Kim", + "authorids": "", + "aff": "Korea University; Korea University; Korea University; NAVER AI Lab; Korea University", + "bibtex": "@article{Seo_Lee_Cho_Lee_Kim_2023, title={MIDMs: Matching Interleaved Diffusion Models for Exemplar-Based Image Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25313}, DOI={10.1609/aaai.v37i2.25313}, abstractNote={We present a novel method for exemplar-based image translation, called matching interleaved diffusion models (MIDMs). Most existing methods for this task were formulated as GAN-based matching-then-generation framework. However, in this framework, matching errors induced by the difficulty of semantic matching across cross-domain, e.g., sketch and photo, can be easily propagated to the generation step, which in turn leads to the degenerated results. Motivated by the recent success of diffusion models, overcoming the shortcomings of GANs, we incorporate the diffusion models to overcome these limitations. Specifically, we formulate a diffusion-based matching-and-generation framework that interleaves cross-domain matching and diffusion steps in the latent space by iteratively feeding the intermediate warp into the noising process and denoising it to generate a translated image. In addition, to improve the reliability of diffusion process, we design confidence-aware process using cycle-consistency to consider only confident regions during translation. Experimental results show that our MIDMs generate more plausible images than state-of-the-art methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Seo, Junyoung and Lee, Gyuseong and Cho, Seokju and Lee, Jiyoung and Kim, Seungryong}, year={2023}, month={Jun.}, pages={2191-2199} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25313/25085", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25313", + "pdf_size": 2801525, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5325231799165570438&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;korea.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Korea University;NAVER Corporation", + "aff_unique_dep": ";NAVER AI Lab", + "aff_unique_url": "https://www.korea.ac.kr;https://www.naver.com", + "aff_unique_abbr": "KU;NAVER", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26504", + "title": "MIGA: A Unified Multi-Task Generation Framework for Conversational Text-to-SQL", + "track": "main", + "status": "Technical", + "abstract": "Conversational text-to-SQL is designed to translate multi-turn natural language questions into their corresponding SQL queries. Most advanced conversational text-to-SQL methods are incompatible with generative pre-trained language models (PLMs), such as T5. In this paper, we present a two-stage unified MultI-task Generation frAmework (MIGA) that leverages PLMs\u2019 ability to tackle conversational text-to-SQL. In the pre-training stage, MIGA first decomposes the main task into several related sub-tasks and then unifies them into the same sequence-to-sequence (Seq2Seq) paradigm with task-specific natural language prompts to boost the main task from multi-task training. Later in the fine-tuning stage, we propose four SQL perturbations to alleviate the error propagation problem. MIGA tends to achieve state-of-the-art performance on two benchmarks (SparC and CoSQL). We also provide extensive analyses and discussions to shed light on some new perspectives for conversational text-to-SQL.", + "primary_area": "speech natural language processing", + "author": "Yingwen Fu; Wenjie Ou; Zhou Yu; Yue Lin", + "authorids": "", + "aff": "Guangdong University of Foreign Studies+NetEase Games AI Lab; NetEase Games AI Lab; Columbia University; NetEase Games AI Lab", + "bibtex": "@article{Fu_Ou_Yu_Lin_2023, title={MIGA: A Unified Multi-Task Generation Framework for Conversational Text-to-SQL}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26504}, DOI={10.1609/aaai.v37i11.26504}, abstractNote={Conversational text-to-SQL is designed to translate multi-turn natural language questions into their corresponding SQL queries. Most advanced conversational text-to-SQL methods are incompatible with generative pre-trained language models (PLMs), such as T5. In this paper, we present a two-stage unified MultI-task Generation frAmework (MIGA) that leverages PLMs\u2019 ability to tackle conversational text-to-SQL. In the pre-training stage, MIGA first decomposes the main task into several related sub-tasks and then unifies them into the same sequence-to-sequence (Seq2Seq) paradigm with task-specific natural language prompts to boost the main task from multi-task training. Later in the fine-tuning stage, we propose four SQL perturbations to alleviate the error propagation problem. MIGA tends to achieve state-of-the-art performance on two benchmarks (SparC and CoSQL). We also provide extensive analyses and discussions to shed light on some new perspectives for conversational text-to-SQL.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fu, Yingwen and Ou, Wenjie and Yu, Zhou and Lin, Yue}, year={2023}, month={Jun.}, pages={12790-12798} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26504/26276", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26504", + "pdf_size": 649356, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12757348162404929124&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gdufs.edu.cn;corp.netease.com;columbia.edu;corp.netease.com", + "email": "gdufs.edu.cn;corp.netease.com;columbia.edu;corp.netease.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;2;1", + "aff_unique_norm": "Guangdong University of Foreign Studies;NetEase Games;Columbia University", + "aff_unique_dep": ";AI Lab;", + "aff_unique_url": "http://www.gdufs.edu.cn;https://game.163.com;https://www.columbia.edu", + "aff_unique_abbr": "GDUFS;NetEase Games AI Lab;Columbia", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25289", + "title": "MIMO Is All You Need\uff1aA Strong Multi-in-Multi-Out Baseline for Video Prediction", + "track": "main", + "status": "Technical", + "abstract": "The mainstream of the existing approaches for video prediction builds up their models based on a Single-In-Single-Out (SISO) architecture, which takes the current frame as input to predict the next frame in a recursive manner. This way often leads to severe performance degradation when they try to extrapolate a longer period of future, thus limiting the practical use of the prediction model. Alternatively, a Multi-In-Multi-Out (MIMO) architecture that outputs all the future frames at one shot naturally breaks the recursive manner and therefore prevents error accumulation. However, only a few MIMO models for video prediction are proposed and they only achieve inferior performance due to the date. \nThe real strength of the MIMO model in this area is not well noticed and is largely under-explored. Motivated by that, we conduct a comprehensive investigation in this paper to thoroughly exploit how far a simple MIMO architecture can go. Surprisingly, our empirical studies reveal that a simple MIMO model can outperform the state-of-the-art work with a large margin much more than expected, especially in dealing with long-term error accumulation. \nAfter exploring a number of ways and designs, we propose a new MIMO architecture based on extending the pure Transformer with local spatio-temporal blocks and a new multi-output decoder, namely MIMO-VP, to establish a new standard in video prediction. We evaluate our model in four highly competitive benchmarks. \nExtensive experiments show that our model wins 1st place on all the benchmarks with remarkable performance gains and surpasses the best SISO model in all aspects including efficiency, quantity, and quality. A dramatic error reduction is achieved when predicting 10 frames on Moving MNIST and Weather datasets respectively. We believe our model can serve as a new baseline to facilitate the future research of video prediction tasks. The code will be released.", + "primary_area": "computer vision ii", + "author": "Shuliang Ning; Mengcheng Lan; Yanran Li; Chaofeng Chen; Qian Chen; Xunlai Chen; Xiaoguang Han; Shuguang Cui", + "authorids": "", + "aff": "FNii, CUHKSZ+sse, CUHKSZ; SSE, CUHKSZ; The University of Edinburgh; Nanyang Technological University; Shenzhen Meteorological Bureau; Shenzhen Meteorological Bureau; SSE, CUHKSZ+FNii, CUHKSZ; SSE, CUHKSZ+FNii, CUHKSZ", + "bibtex": "@article{Ning_Lan_Li_Chen_Chen_Chen_Han_Cui_2023, title={MIMO Is All You Need\uff1aA Strong Multi-in-Multi-Out Baseline for Video Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25289}, DOI={10.1609/aaai.v37i2.25289}, abstractNote={The mainstream of the existing approaches for video prediction builds up their models based on a Single-In-Single-Out (SISO) architecture, which takes the current frame as input to predict the next frame in a recursive manner. This way often leads to severe performance degradation when they try to extrapolate a longer period of future, thus limiting the practical use of the prediction model. Alternatively, a Multi-In-Multi-Out (MIMO) architecture that outputs all the future frames at one shot naturally breaks the recursive manner and therefore prevents error accumulation. However, only a few MIMO models for video prediction are proposed and they only achieve inferior performance due to the date. The real strength of the MIMO model in this area is not well noticed and is largely under-explored. Motivated by that, we conduct a comprehensive investigation in this paper to thoroughly exploit how far a simple MIMO architecture can go. Surprisingly, our empirical studies reveal that a simple MIMO model can outperform the state-of-the-art work with a large margin much more than expected, especially in dealing with long-term error accumulation. After exploring a number of ways and designs, we propose a new MIMO architecture based on extending the pure Transformer with local spatio-temporal blocks and a new multi-output decoder, namely MIMO-VP, to establish a new standard in video prediction. We evaluate our model in four highly competitive benchmarks. Extensive experiments show that our model wins 1st place on all the benchmarks with remarkable performance gains and surpasses the best SISO model in all aspects including efficiency, quantity, and quality. A dramatic error reduction is achieved when predicting 10 frames on Moving MNIST and Weather datasets respectively. We believe our model can serve as a new baseline to facilitate the future research of video prediction tasks. The code will be released.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ning, Shuliang and Lan, Mengcheng and Li, Yanran and Chen, Chaofeng and Chen, Qian and Chen, Xunlai and Han, Xiaoguang and Cui, Shuguang}, year={2023}, month={Jun.}, pages={1975-1983} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25289/25061", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25289", + "pdf_size": 3359540, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5760399942877751186&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "link.cuhk.edu.cn;gmail.com;ed.ac.uk;gmail.com;weather.sz.gov.cn;weather.sz.gov.cn;cuhk.edu.cn;cuhk.edu.cn", + "email": "link.cuhk.edu.cn;gmail.com;ed.ac.uk;gmail.com;weather.sz.gov.cn;weather.sz.gov.cn;cuhk.edu.cn;cuhk.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;3;4;5;5;2+0;2+0", + "aff_unique_norm": "Fudan University;Shenzhen University;Shenzhen University, College of Software Engineering;University of Edinburgh;Nanyang Technological University;Shenzhen Meteorological Bureau", + "aff_unique_dep": ";School of Software Engineering;College of Software Engineering;;;", + "aff_unique_url": "https://www.fudan.edu.cn/en/;http://www.szu.edu.cn/;http://sse.cuhk.edu.cn;https://www.ed.ac.uk;https://www.ntu.edu.sg;http://www.szmb.gov.cn/", + "aff_unique_abbr": "Fudan;SZU;SSE;Edinburgh;NTU;", + "aff_campus_unique_index": "0+1;0;0+0;0+0", + "aff_campus_unique": "Shenzhen;SZU;", + "aff_country_unique_index": "0+0;0;1;2;0;0;0+0;0+0", + "aff_country_unique": "China;United Kingdom;Singapore" + }, + { + "id": "article-25100", + "title": "MMTN: Multi-Modal Memory Transformer Network for Image-Report Consistent Medical Report Generation", + "track": "main", + "status": "Technical", + "abstract": "Automatic medical report generation is an essential task in applying artificial intelligence to the medical domain, which can lighten the workloads of doctors and promote clinical automation. The state-of-the-art approaches employ Transformer-based encoder-decoder architectures to generate reports for medical images. However, they do not fully explore the relationships between multi-modal medical data, and generate inaccurate and inconsistent reports. To address these issues, this paper proposes a Multi-modal Memory Transformer Network (MMTN) to cope with multi-modal medical data for generating image-report consistent medical reports. On the one hand, MMTN reduces the occurrence of image-report inconsistencies by designing a unique encoder to associate and memorize the relationship between medical images and medical terminologies. On the other hand, MMTN utilizes the cross-modal complementarity of the medical vision and language for the word prediction, which further enhances the accuracy of generating medical reports. Extensive experiments on three real datasets show that MMTN achieves significant effectiveness over state-of-the-art approaches on both automatic metrics and human evaluation.", + "primary_area": "computer vision i", + "author": "Yiming Cao; Lizhen Cui; Lei Zhang; Fuqiang Yu; Zhen Li; Yonghui Xu", + "authorids": "", + "aff": "School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China+Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China; Department of Gastroenterology, Qilu Hospital of Shandong University, Jinan, China; Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, Jinan, China", + "bibtex": "@article{Cao_Cui_Zhang_Yu_Li_Xu_2023, title={MMTN: Multi-Modal Memory Transformer Network for Image-Report Consistent Medical Report Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25100}, DOI={10.1609/aaai.v37i1.25100}, abstractNote={Automatic medical report generation is an essential task in applying artificial intelligence to the medical domain, which can lighten the workloads of doctors and promote clinical automation. The state-of-the-art approaches employ Transformer-based encoder-decoder architectures to generate reports for medical images. However, they do not fully explore the relationships between multi-modal medical data, and generate inaccurate and inconsistent reports. To address these issues, this paper proposes a Multi-modal Memory Transformer Network (MMTN) to cope with multi-modal medical data for generating image-report consistent medical reports. On the one hand, MMTN reduces the occurrence of image-report inconsistencies by designing a unique encoder to associate and memorize the relationship between medical images and medical terminologies. On the other hand, MMTN utilizes the cross-modal complementarity of the medical vision and language for the word prediction, which further enhances the accuracy of generating medical reports. Extensive experiments on three real datasets show that MMTN achieves significant effectiveness over state-of-the-art approaches on both automatic metrics and human evaluation.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Yiming and Cui, Lizhen and Zhang, Lei and Yu, Fuqiang and Li, Zhen and Xu, Yonghui}, year={2023}, month={Jun.}, pages={277-285} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25100/24872", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25100", + "pdf_size": 4369020, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12527222210734995447&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.sdu.edu.cn;mail.sdu.edu.cn;mail.sdu.edu.cn;mail.sdu.edu.cn;sdu.edu.cn;hotmail.com", + "email": "mail.sdu.edu.cn;mail.sdu.edu.cn;mail.sdu.edu.cn;mail.sdu.edu.cn;sdu.edu.cn;hotmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;0;0", + "aff_unique_norm": "Shandong University", + "aff_unique_dep": "School of Software", + "aff_unique_url": "http://www.sdu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0;0", + "aff_campus_unique": "Jinan", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25971", + "title": "MNER-QG: An End-to-End MRC Framework for Multimodal Named Entity Recognition with Query Grounding", + "track": "main", + "status": "Technical", + "abstract": "Multimodal named entity recognition (MNER) is a critical step in information extraction, which aims to detect entity spans and classify them to corresponding entity types given a sentence-image pair. Existing methods either (1) obtain named entities with coarse-grained visual clues from attention mechanisms, or (2) first detect fine-grained visual regions with toolkits and then recognize named entities. However, they suffer from improper alignment between entity types and visual regions or error propagation in the two-stage manner, which finally imports irrelevant visual information into texts. In this paper, we propose a novel end-to-end framework named MNER-QG that can simultaneously perform MRC-based multimodal named entity recognition and query grounding. Specifically, with the assistance of queries, MNER-QG can provide prior knowledge of entity types and visual regions, and further enhance representations of both text and image. To conduct the query grounding task, we provide manual annotations and weak supervisions that are obtained via training a highly flexible visual grounding model with transfer learning. We conduct extensive experiments on two public MNER datasets, Twitter2015 and Twitter2017. Experimental results show that MNER-QG outperforms the current state-of-the-art models on the MNER task, and also improves the query grounding performance.", + "primary_area": "machine learning ii", + "author": "Meihuizi Jia; Lei Shen; Xin Shen; Lejian Liao; Meng Chen; Xiaodong He; Zhendong Chen; Jiaqi Li", + "authorids": "", + "aff": "School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China+JD AI Research, Beijing, China; JD AI Research, Beijing, China; Australian National University, Canberra, Australia; School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China; JD AI Research, Beijing, China; JD AI Research, Beijing, China; School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China; School of Computer Science & Technology, Beijing Institute of Technology, Beijing, China", + "bibtex": "@article{Jia_Shen_Shen_Liao_Chen_He_Chen_Li_2023, title={MNER-QG: An End-to-End MRC Framework for Multimodal Named Entity Recognition with Query Grounding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25971}, DOI={10.1609/aaai.v37i7.25971}, abstractNote={Multimodal named entity recognition (MNER) is a critical step in information extraction, which aims to detect entity spans and classify them to corresponding entity types given a sentence-image pair. Existing methods either (1) obtain named entities with coarse-grained visual clues from attention mechanisms, or (2) first detect fine-grained visual regions with toolkits and then recognize named entities. However, they suffer from improper alignment between entity types and visual regions or error propagation in the two-stage manner, which finally imports irrelevant visual information into texts. In this paper, we propose a novel end-to-end framework named MNER-QG that can simultaneously perform MRC-based multimodal named entity recognition and query grounding. Specifically, with the assistance of queries, MNER-QG can provide prior knowledge of entity types and visual regions, and further enhance representations of both text and image. To conduct the query grounding task, we provide manual annotations and weak supervisions that are obtained via training a highly flexible visual grounding model with transfer learning. We conduct extensive experiments on two public MNER datasets, Twitter2015 and Twitter2017. Experimental results show that MNER-QG outperforms the current state-of-the-art models on the MNER task, and also improves the query grounding performance.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Meihuizi and Shen, Lei and Shen, Xin and Liao, Lejian and Chen, Meng and He, Xiaodong and Chen, Zhendong and Li, Jiaqi}, year={2023}, month={Jun.}, pages={8032-8040} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25971/25743", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25971", + "pdf_size": 7415692, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12825085740731914774&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "bit.edu.cn;jd.com;anu.edu.au;bit.edu.cn;jd.com;jd.com;bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;jd.com;anu.edu.au;bit.edu.cn;jd.com;jd.com;bit.edu.cn;bit.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;2;0;1;1;0;0", + "aff_unique_norm": "Beijing Institute of Technology;JD AI Research;Australian National University", + "aff_unique_dep": "School of Computer Science & Technology;;", + "aff_unique_url": "http://www.bit.edu.cn;;https://www.anu.edu.au", + "aff_unique_abbr": "BIT;;ANU", + "aff_campus_unique_index": "0+0;0;1;0;0;0;0;0", + "aff_campus_unique": "Beijing;Canberra", + "aff_country_unique_index": "0+0;0;1;0;0;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26634", + "title": "MPMQA: Multimodal Question Answering on Product Manuals", + "track": "main", + "status": "Technical", + "abstract": "Visual contents, such as illustrations and images, play a big role in product manual understanding. Existing Product Manual Question Answering (PMQA) datasets tend to ignore visual contents and only retain textual parts. \nIn this work, to emphasize the importance of multimodal contents, we propose a Multimodal Product Manual Question Answering (MPMQA) task. For each question, MPMQA requires the model not only to process multimodal contents but also to provide multimodal answers. To support MPMQA, a large-scale dataset PM209 is constructed with human annotations, which contains 209 product manuals from 27 well-known consumer electronic brands. Human annotations include 6 types of semantic regions for manual contents and 22,021 pairs of question and answer. Especially, each answer consists of a textual sentence and related visual regions from manuals. Taking into account the length of product manuals and the fact that a question is always related to a small number of pages, MPMQA can be naturally split into two subtasks: retrieving most related pages and then generating multimodal answers. We further propose a unified model that can perform these two subtasks all together and achieve comparable performance with multiple task-specific models. The PM209 dataset is available at https://github.com/AIM3-RUC/MPMQA.", + "primary_area": "speech natural language processing", + "author": "Liang Zhang; Anwen Hu; Jing Zhang; Shuo Hu; Qin Jin", + "authorids": "", + "aff": "School of Information, Renmin University of China; School of Information, Renmin University of China; Samsung Research China - Beijing (SRC-B); Samsung Research China - Beijing (SRC-B); School of Information, Renmin University of China", + "bibtex": "@article{Zhang_Hu_Zhang_Hu_Jin_2023, title={MPMQA: Multimodal Question Answering on Product Manuals}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26634}, DOI={10.1609/aaai.v37i11.26634}, abstractNote={Visual contents, such as illustrations and images, play a big role in product manual understanding. Existing Product Manual Question Answering (PMQA) datasets tend to ignore visual contents and only retain textual parts. In this work, to emphasize the importance of multimodal contents, we propose a Multimodal Product Manual Question Answering (MPMQA) task. For each question, MPMQA requires the model not only to process multimodal contents but also to provide multimodal answers. To support MPMQA, a large-scale dataset PM209 is constructed with human annotations, which contains 209 product manuals from 27 well-known consumer electronic brands. Human annotations include 6 types of semantic regions for manual contents and 22,021 pairs of question and answer. Especially, each answer consists of a textual sentence and related visual regions from manuals. Taking into account the length of product manuals and the fact that a question is always related to a small number of pages, MPMQA can be naturally split into two subtasks: retrieving most related pages and then generating multimodal answers. We further propose a unified model that can perform these two subtasks all together and achieve comparable performance with multiple task-specific models. The PM209 dataset is available at https://github.com/AIM3-RUC/MPMQA.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Liang and Hu, Anwen and Zhang, Jing and Hu, Shuo and Jin, Qin}, year={2023}, month={Jun.}, pages={13958-13966} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26634/26406", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26634", + "pdf_size": 1671672, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10764477634528122341&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;samsung.com;samsung.com;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;samsung.com;samsung.com;ruc.edu.cn", + "github": "https://github.com/AIM3-RUC/MPMQA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;1;0", + "aff_unique_norm": "Renmin University of China;Samsung Research China", + "aff_unique_dep": "School of Information;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.samsung.com/cn/research/", + "aff_unique_abbr": "RUC;SRC-B", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25459", + "title": "MRCN: A Novel Modality Restitution and Compensation Network for Visible-Infrared Person Re-identification", + "track": "main", + "status": "Technical", + "abstract": "Visible-infrared person re-identification (VI-ReID), which aims to search identities across different spectra, is a challenging task due to large cross-modality discrepancy between visible and infrared images. The key to reduce the discrepancy is to filter out identity-irrelevant interference and effectively learn modality-invariant person representations. In this paper, we propose a novel Modality Restitution and Compensation Network (MRCN) to narrow the gap between the two modalities. Specifically, we first reduce the modality discrepancy by using two Instance Normalization (IN) layers. Next, to reduce the influence of IN layers on removing discriminative information and to reduce modality differences, we propose a Modality Restitution Module (MRM) and a Modality Compensation Module (MCM) to respectively distill modality-irrelevant and modality-relevant features from the removed information. Then, the modality-irrelevant features are used to restitute to the normalized visible and infrared features, while the modality-relevant features are used to compensate for the features of the other modality. Furthermore, to better disentangle the modality-relevant features and the modality-irrelevant features, we propose a novel Center-Quadruplet Causal (CQC) loss to encourage the network to effectively learn the modality-relevant features and the modality-irrelevant features. Extensive experiments are conducted to validate the superiority of our method on the challenging SYSU-MM01 and RegDB datasets. More remarkably, our method achieves 95.1% in terms of Rank-1 and 89.2% in terms of mAP on the RegDB dataset.", + "primary_area": "computer vision iii", + "author": "Yukang Zhang; Yan Yan; Jie Li; Hanzi Wang", + "authorids": "", + "aff": "Fujian Key Laboratory of Sensing and Computing for Smart City, School of Informatics, Xiamen University, China; Fujian Key Laboratory of Sensing and Computing for Smart City, School of Informatics, Xiamen University, China; Video and Image Processing System Laboratory, School of Electronic Engineering, Xidian University, Xi\u2019an, China; Fujian Key Laboratory of Sensing and Computing for Smart City, School of Informatics, Xiamen University, China + Shanghai Artificial Intelligence Laboratory, Shanghai, China", + "bibtex": "@article{Zhang_Yan_Li_Wang_2023, title={MRCN: A Novel Modality Restitution and Compensation Network for Visible-Infrared Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25459}, DOI={10.1609/aaai.v37i3.25459}, abstractNote={Visible-infrared person re-identification (VI-ReID), which aims to search identities across different spectra, is a challenging task due to large cross-modality discrepancy between visible and infrared images. The key to reduce the discrepancy is to filter out identity-irrelevant interference and effectively learn modality-invariant person representations. In this paper, we propose a novel Modality Restitution and Compensation Network (MRCN) to narrow the gap between the two modalities. Specifically, we first reduce the modality discrepancy by using two Instance Normalization (IN) layers. Next, to reduce the influence of IN layers on removing discriminative information and to reduce modality differences, we propose a Modality Restitution Module (MRM) and a Modality Compensation Module (MCM) to respectively distill modality-irrelevant and modality-relevant features from the removed information. Then, the modality-irrelevant features are used to restitute to the normalized visible and infrared features, while the modality-relevant features are used to compensate for the features of the other modality. Furthermore, to better disentangle the modality-relevant features and the modality-irrelevant features, we propose a novel Center-Quadruplet Causal (CQC) loss to encourage the network to effectively learn the modality-relevant features and the modality-irrelevant features. Extensive experiments are conducted to validate the superiority of our method on the challenging SYSU-MM01 and RegDB datasets. More remarkably, our method achieves 95.1% in terms of Rank-1 and 89.2% in terms of mAP on the RegDB dataset.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yukang and Yan, Yan and Li, Jie and Wang, Hanzi}, year={2023}, month={Jun.}, pages={3498-3506} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25459/25231", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25459", + "pdf_size": 1771494, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1182259788141099546&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn;mail.xidian.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn;mail.xidian.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+2", + "aff_unique_norm": "Xiamen University;Xidian University;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "School of Informatics;School of Electronic Engineering;", + "aff_unique_url": "https://www.xmu.edu.cn;http://www.xidian.edu.cn;", + "aff_unique_abbr": "XMU;Xidian;", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Xi'an;Shanghai", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25636", + "title": "MSDC: Exploiting Multi-State Power Consumption in Non-intrusive Load Monitoring Based on a Dual-CNN Model", + "track": "main", + "status": "Technical", + "abstract": "Non-intrusive load monitoring (NILM) aims to decompose aggregated electrical usage signal into appliance-specific power consumption and it amounts to a classical example of blind source separation tasks. Leveraging recent progress on deep learning techniques, we design a new neural NILM model {\\em Multi-State Dual CNN} (MSDC). Different from previous models, MSDC explicitly extracts information about the appliance's multiple states and state transitions, which in turn regulates the prediction of signals for appliances. More specifically, we employ a dual-CNN architecture: one CNN for outputting state distributions and the other for predicting the power of each state. A new technique is invented that utilizes conditional random fields (CRF) to capture state transitions. Experiments on two real-world datasets REDD and UK-DALE demonstrate that our model significantly outperform state-of-the-art models while having good generalization capacity, achieving 6%-10% MAE gain and 33%-51% SAE gain to unseen appliances.", + "primary_area": "domain s of application", + "author": "Jialing He; Jiamou Liu; Zijian Zhang; Yang Chen; Yiwei Liu; Bakh Khoussainov; Liehuang Zhu", + "authorids": "", + "aff": "College of Computer Science, Chongqing University, Chongqing, China; School of Computer Science, The University of Auckland, Auckland 1142, New Zealand; School of Cyberspace Science and Technology, Beijing Institute of Technology, Beijing, China; Southeast Institute of Information Technology, Beijing Institute of Technology, Fujian China; Strong AI Lab, The University of Auckland, Auckland 1142, New Zealand; Defence Industry Secrecy Examination and Certification Center, Beijing, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China", + "bibtex": "@article{He_Liu_Zhang_Chen_Liu_Khoussainov_Zhu_2023, title={MSDC: Exploiting Multi-State Power Consumption in Non-intrusive Load Monitoring Based on a Dual-CNN Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25636}, DOI={10.1609/aaai.v37i4.25636}, abstractNote={Non-intrusive load monitoring (NILM) aims to decompose aggregated electrical usage signal into appliance-specific power consumption and it amounts to a classical example of blind source separation tasks. Leveraging recent progress on deep learning techniques, we design a new neural NILM model {\\em Multi-State Dual CNN} (MSDC). Different from previous models, MSDC explicitly extracts information about the appliance\u2019s multiple states and state transitions, which in turn regulates the prediction of signals for appliances. More specifically, we employ a dual-CNN architecture: one CNN for outputting state distributions and the other for predicting the power of each state. A new technique is invented that utilizes conditional random fields (CRF) to capture state transitions. Experiments on two real-world datasets REDD and UK-DALE demonstrate that our model significantly outperform state-of-the-art models while having good generalization capacity, achieving 6%-10% MAE gain and 33%-51% SAE gain to unseen appliances.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Jialing and Liu, Jiamou and Zhang, Zijian and Chen, Yang and Liu, Yiwei and Khoussainov, Bakh and Zhu, Liehuang}, year={2023}, month={Jun.}, pages={5078-5086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25636/25408", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25636", + "pdf_size": 363603, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12856935666249766769&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 8, + "aff_domain": "cqu.edu.cn;auckland.ac.nz;auckland.ac.nz;bit.edu.cn;bit.edu.cn;disecc.com;uestc.edu.cn", + "email": "cqu.edu.cn;auckland.ac.nz;auckland.ac.nz;bit.edu.cn;bit.edu.cn;disecc.com;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;2;1;3;4", + "aff_unique_norm": "Chongqing University;The University of Auckland;Beijing Institute of Technology;Defence Industry Secrecy Examination and Certification Center;University of Electronic Science and Technology of China", + "aff_unique_dep": "College of Computer Science;School of Computer Science;School of Cyberspace Science and Technology;;School of Computer Science and Engineering", + "aff_unique_url": "http://en.cqu.edu.cn/;https://www.auckland.ac.nz;http://www.bit.edu.cn/;;http://www.uestc.edu.cn", + "aff_unique_abbr": "CQU;UoA;BIT;;UESTC", + "aff_campus_unique_index": "0;1;2;3;1;5", + "aff_campus_unique": "Chongqing;Auckland;Beijing;Southeast;;Chengdu", + "aff_country_unique_index": "0;1;0;0;1;0;0", + "aff_country_unique": "China;New Zealand" + }, + { + "id": "article-26666", + "title": "MTDiag: An Effective Multi-Task Framework for Automatic Diagnosis", + "track": "aaai special track", + "status": "Technical", + "abstract": "Automatic diagnosis systems aim to probe for symptoms (i.e., symptom checking) and diagnose disease through multi-turn conversations with patients. Most previous works formulate it as a sequential decision process and use reinforcement learning (RL) to decide whether to inquire about symptoms or make a diagnosis. However, these RL-based methods heavily rely on the elaborate reward function and usually suffer from an unstable training process and low data efficiency. In this work, we propose an effective multi-task framework for automatic diagnosis called MTDiag. We first reformulate symptom checking as a multi-label classification task by direct supervision. Each medical dialogue is equivalently converted into multiple samples for classification, which can also help alleviate the data scarcity problem. Furthermore, we design a multi-task learning strategy to guide the symptom checking procedure with disease information and further utilize contrastive learning to better distinguish symptoms between diseases. Extensive experimental results show that our method achieves state-of-the-art performance on four public datasets with 1.7%~3.1% improvement in disease diagnosis, demonstrating the superiority of the proposed method. Additionally, our model is now deployed in an online medical consultant system as an assistant tool for real-life doctors.", + "primary_area": "ai for social impact", + "author": "Zhenyu Hou; Yukuo Cen; Ziding Liu; Dongxue Wu; Baoyan Wang; Xuanhe Li; Lei Hong; Jie Tang", + "authorids": "", + "aff": "Tsinghua University; Tsinghua University; Meituan; Meituan; Meituan; Meituan; Meituan; Tsinghua University", + "bibtex": "@article{Hou_Cen_Liu_Wu_Wang_Li_Hong_Tang_2023, title={MTDiag: An Effective Multi-Task Framework for Automatic Diagnosis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26666}, DOI={10.1609/aaai.v37i12.26666}, abstractNote={Automatic diagnosis systems aim to probe for symptoms (i.e., symptom checking) and diagnose disease through multi-turn conversations with patients. Most previous works formulate it as a sequential decision process and use reinforcement learning (RL) to decide whether to inquire about symptoms or make a diagnosis. However, these RL-based methods heavily rely on the elaborate reward function and usually suffer from an unstable training process and low data efficiency. In this work, we propose an effective multi-task framework for automatic diagnosis called MTDiag. We first reformulate symptom checking as a multi-label classification task by direct supervision. Each medical dialogue is equivalently converted into multiple samples for classification, which can also help alleviate the data scarcity problem. Furthermore, we design a multi-task learning strategy to guide the symptom checking procedure with disease information and further utilize contrastive learning to better distinguish symptoms between diseases. Extensive experimental results show that our method achieves state-of-the-art performance on four public datasets with 1.7%~3.1% improvement in disease diagnosis, demonstrating the superiority of the proposed method. Additionally, our model is now deployed in an online medical consultant system as an assistant tool for real-life doctors.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hou, Zhenyu and Cen, Yukuo and Liu, Ziding and Wu, Dongxue and Wang, Baoyan and Li, Xuanhe and Hong, Lei and Tang, Jie}, year={2023}, month={Jun.}, pages={14241-14248} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26666/26438", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26666", + "pdf_size": 634674, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11885258590014585834&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;1;1;1;1;0", + "aff_unique_norm": "Tsinghua University;Meituan", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.meituan.com", + "aff_unique_abbr": "THU;Meituan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26080", + "title": "MVCINN: Multi-View Diabetic Retinopathy Detection Using a Deep Cross-Interaction Neural Network", + "track": "main", + "status": "Technical", + "abstract": "", + "primary_area": "machine learning ii", + "author": "Xiaoling Luo; Chengliang Liu; Waikeung Wong; Jie Wen; Xiaopeng Jin; Yong Xu", + "authorids": "", + "aff": "Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; School of Fashion and Textiles, The Hong Kong Polytechnic University, Kowloon, Hong Kong+Laboratory for Artificial Intelligence in Design, Hong Kong; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China; College of Big Data and Internet, Shenzhen Technology University, Shenzhen, China; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, China", + "bibtex": "", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26080/25852", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26080", + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=151829407958343058&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "outlook.com;163.com;polyu.edu.hk;126.com;gmail.com;hit.edu.cn", + "email": "outlook.com;163.com;polyu.edu.hk;126.com;gmail.com;hit.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1+2;0;3;0", + "aff_unique_norm": "Harbin Institute of Technology;The Hong Kong Polytechnic University;Hong Kong University;Shenzhen Technology University", + "aff_unique_dep": "Shenzhen Key Laboratory of Visual Object Detection and Recognition;School of Fashion and Textiles;Laboratory for Artificial Intelligence in Design;College of Big Data and Internet", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.polyu.edu.hk;https://www.hku.hk;https://www.sztu.edu.cn", + "aff_unique_abbr": "HIT;PolyU;;SZTU", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Shenzhen;Kowloon;", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26199", + "title": "Machines of Finite Depth: Towards a Formalization of Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "We provide a unifying framework where artificial neural networks and their architectures can be formally described as particular cases of a general mathematical construction---machines of finite depth. Unlike neural networks, machines have a precise definition, from which several properties follow naturally. Machines of finite depth are modular (they can be combined), efficiently computable, and differentiable. The backward pass of a machine is again a machine and can be computed without overhead using the same procedure as the forward pass. We prove this statement theoretically and practically via a unified implementation that generalizes several classical architectures---dense, convolutional, and recurrent neural networks with a rich shortcut structure---and their respective backpropagation rules.", + "primary_area": "machine learning iii", + "author": "Pietro Vertechi; Mattia G. Bergomi", + "authorids": "", + "aff": ";", + "bibtex": "@article{Vertechi_Bergomi_2023, title={Machines of Finite Depth: Towards a Formalization of Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26199}, DOI={10.1609/aaai.v37i8.26199}, abstractNote={We provide a unifying framework where artificial neural networks and their architectures can be formally described as particular cases of a general mathematical construction---machines of finite depth. Unlike neural networks, machines have a precise definition, from which several properties follow naturally. Machines of finite depth are modular (they can be combined), efficiently computable, and differentiable. The backward pass of a machine is again a machine and can be computed without overhead using the same procedure as the forward pass. We prove this statement theoretically and practically via a unified implementation that generalizes several classical architectures---dense, convolutional, and recurrent neural networks with a rich shortcut structure---and their respective backpropagation rules.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vertechi, Pietro and Bergomi, Mattia G.}, year={2023}, month={Jun.}, pages={10061-10068} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26199/25971", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26199", + "pdf_size": 186483, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7093850286828345567&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "protonmail.com;gmail.com", + "email": "protonmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "article-26878", + "title": "Maestro: A Gamified Platform for Teaching AI Robustness", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "Although the prevention of AI vulnerabilities is critical to preserve the safety and privacy of users and businesses, educational tools for robust AI are still underdeveloped worldwide. We present the design, implementation, and assessment of Maestro. Maestro is an effective open-source game-based platform that contributes to the advancement of robust AI education. Maestro provides \"goal-based scenarios\" where college students are exposed to challenging life-inspired assignments in a \"competitive programming\" environment. We assessed Maestro's influence on students' engagement, motivation, and learning success in robust AI. This work also provides insights into the design features of online learning tools that promote active learning opportunities in the robust AI domain. We analyzed the reflection responses (measured with Likert scales) of 147 undergraduate students using Maestro in two quarterly college courses in AI. According to the results, students who felt the acquisition of new skills in robust AI tended to appreciate highly Maestro and scored highly on material consolidation, curiosity, and maestry in robust AI. Moreover, the leaderboard, our key gamification element in Maestro, has effectively contributed to students' engagement and learning. Results also indicate that Maestro can be effectively adapted to any course length and depth without losing its educational quality.", + "primary_area": "", + "author": "Margarita Geleta; Jiacen Xu; Manikanta Loya; Junlin Wang; Sameer Singh; Zhou Li; Sergio Gago-Masague", + "authorids": "", + "aff": "University of California, Irvine; University of California, Irvine; University of California, Irvine; University of California, Irvine; University of California, Irvine; University of California, Irvine; University of California, Irvine", + "bibtex": "@article{Geleta_Xu_Loya_Wang_Singh_Li_Gago-Masague_2024, title={Maestro: A Gamified Platform for Teaching AI Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26878}, DOI={10.1609/aaai.v37i13.26878}, abstractNote={Although the prevention of AI vulnerabilities is critical to preserve the safety and privacy of users and businesses, educational tools for robust AI are still underdeveloped worldwide. We present the design, implementation, and assessment of Maestro. Maestro is an effective open-source game-based platform that contributes to the advancement of robust AI education. Maestro provides "goal-based scenarios" where college students are exposed to challenging life-inspired assignments in a "competitive programming" environment. We assessed Maestro\u2019s influence on students\u2019 engagement, motivation, and learning success in robust AI. This work also provides insights into the design features of online learning tools that promote active learning opportunities in the robust AI domain. We analyzed the reflection responses (measured with Likert scales) of 147 undergraduate students using Maestro in two quarterly college courses in AI. According to the results, students who felt the acquisition of new skills in robust AI tended to appreciate highly Maestro and scored highly on material consolidation, curiosity, and maestry in robust AI. Moreover, the leaderboard, our key gamification element in Maestro, has effectively contributed to students\u2019 engagement and learning. Results also indicate that Maestro can be effectively adapted to any course length and depth without losing its educational quality.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Geleta, Margarita and Xu, Jiacen and Loya, Manikanta and Wang, Junlin and Singh, Sameer and Li, Zhou and Gago-Masague, Sergio}, year={2024}, month={Jul.}, pages={15816-15824} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26878/26650", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26878", + "pdf_size": 1902599, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9345990121354168423&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "uci.edu;uci.edu;uci.edu;uci.edu;uci.edu;uci.edu;uci.edu", + "email": "uci.edu;uci.edu;uci.edu;uci.edu;uci.edu;uci.edu;uci.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "University of California, Irvine", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uci.edu", + "aff_unique_abbr": "UCI", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Irvine", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26413", + "title": "Markov Decision Processes with Time-Varying Geometric Discounting", + "track": "main", + "status": "Technical", + "abstract": "Canonical models of Markov decision processes (MDPs) usually consider geometric discounting based on a constant discount factor. While this standard modeling approach has led to many elegant results, some recent studies indicate the necessity of modeling time-varying discounting in certain applications. This paper studies a model of infinite-horizon MDPs with time-varying discount factors. We take a game-theoretic perspective \u2013 whereby each time step is treated as an independent decision maker with their own (fixed) discount factor \u2013 and we study the subgame perfect equilibrium (SPE) of the resulting game as well as the related algorithmic problems. We present a constructive proof of the existence of an SPE and demonstrate the EXPTIME-hardness of computing an SPE. We also turn to the approximate notion of epsilon-SPE and show that an epsilon-SPE exists under milder assumptions. An algorithm is presented to compute an epsilon-SPE, of which an upper bound of the time complexity, as a function of the convergence property of the time-varying discount factor, is provided.", + "primary_area": "planning routing and scheduling", + "author": "Jiarui Gan; Annika Hennes; Rupak Majumdar; Debmalya Mandal; Goran Radanovic", + "authorids": "", + "aff": "University of Oxford; Heinrich-Heine-University D\u00fcsseldorf; Max Planck Institute for Software Systems; Max Planck Institute for Software Systems; Max Planck Institute for Software Systems", + "bibtex": "@article{Gan_Hennes_Majumdar_Mandal_Radanovic_2023, title={Markov Decision Processes with Time-Varying Geometric Discounting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26413}, DOI={10.1609/aaai.v37i10.26413}, abstractNote={Canonical models of Markov decision processes (MDPs) usually consider geometric discounting based on a constant discount factor. While this standard modeling approach has led to many elegant results, some recent studies indicate the necessity of modeling time-varying discounting in certain applications. This paper studies a model of infinite-horizon MDPs with time-varying discount factors. We take a game-theoretic perspective \u2013 whereby each time step is treated as an independent decision maker with their own (fixed) discount factor \u2013 and we study the subgame perfect equilibrium (SPE) of the resulting game as well as the related algorithmic problems. We present a constructive proof of the existence of an SPE and demonstrate the EXPTIME-hardness of computing an SPE. We also turn to the approximate notion of epsilon-SPE and show that an epsilon-SPE exists under milder assumptions. An algorithm is presented to compute an epsilon-SPE, of which an upper bound of the time complexity, as a function of the convergence property of the time-varying discount factor, is provided.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gan, Jiarui and Hennes, Annika and Majumdar, Rupak and Mandal, Debmalya and Radanovic, Goran}, year={2023}, month={Jun.}, pages={11980-11988} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26413/26185", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26413", + "pdf_size": 210335, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5996224753800675242&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "cs.ox.ac.uk;hhu.de;mpi-sws.org;mpi-sws.org;mpi-sws.org", + "email": "cs.ox.ac.uk;hhu.de;mpi-sws.org;mpi-sws.org;mpi-sws.org", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;2", + "aff_unique_norm": "University of Oxford;Heinrich-Heine-University;Max Planck Institute for Software Systems", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ox.ac.uk;https://www.hhu.de;https://www.mpi-sws.org", + "aff_unique_abbr": "Oxford;HHU;MPI-SWS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";D\u00fcsseldorf", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "United Kingdom;Germany" + }, + { + "id": "article-27047", + "title": "Mask-Net: Learning Context Aware Invariant Features Using Adversarial Forgetting (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Training a robust system, e.g., Speech to Text (STT), requires large datasets. Variability present in the dataset, such as unwanted nuances and biases, is the reason for the need for large datasets to learn general representations. In this work, we propose a novel approach to induce invariance using adversarial forgetting (AF). Our initial experiments on learning invariant features such as accent on the STT task achieve better generalizations in terms of word error rate (WER) compared to traditional models. We observe an absolute improvement of 2.2% and 1.3% on out-of-distribution and in-distribution test sets, respectively.", + "primary_area": "", + "author": "Hemant Yadav; Rajiv Ratn Shah", + "authorids": "", + "aff": "IIIT Delhi, India; IIIT Delhi, India", + "bibtex": "@article{Yadav_Ratn Shah_2024, title={Mask-Net: Learning Context Aware Invariant Features Using Adversarial Forgetting (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27047}, DOI={10.1609/aaai.v37i13.27047}, abstractNote={Training a robust system, e.g., Speech to Text (STT), requires large datasets. Variability present in the dataset, such as unwanted nuances and biases, is the reason for the need for large datasets to learn general representations. In this work, we propose a novel approach to induce invariance using adversarial forgetting (AF). Our initial experiments on learning invariant features such as accent on the STT task achieve better generalizations in terms of word error rate (WER) compared to traditional models. We observe an absolute improvement of 2.2% and 1.3% on out-of-distribution and in-distribution test sets, respectively.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yadav, Hemant and Ratn Shah, Rajiv}, year={2024}, month={Jul.}, pages={16374-16375} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27047/26819", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27047", + "pdf_size": 434855, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7327217865011261027&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "iiitd.ac.in;iiitd.ac.in", + "email": "iiitd.ac.in;iiitd.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "IIIT Delhi", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iiitdelhi.ac.in", + "aff_unique_abbr": "IIITD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25481", + "title": "MaskBooster: End-to-End Self-Training for Sparsely Supervised Instance Segmentation", + "track": "main", + "status": "Technical", + "abstract": "The present paper introduces sparsely supervised instance segmentation, with the datasets being fully annotated bounding boxes and sparsely annotated masks. A direct solution to this task is self-training, which is not fully explored for instance segmentation yet. In this paper, we propose MaskBooster for sparsely supervised instance segmentation (SpSIS) with comprehensive usage of pseudo masks. MaskBooster is featured with (1) dynamic and progressive pseudo masks from an online updating teacher model, (2) refining binary pseudo masks with the help of bounding box prior, (3) learning inter-class prediction distribution via knowledge distillation for soft pseudo masks. As an end-to-end and universal self-training framework, MaskBooster can empower fully supervised algorithms and boost their segmentation performance on SpSIS. Abundant experiments are conducted on COCO and BDD100K datasets and validate the effectiveness of MaskBooster. Specifically, on different COCO protocols and BDD100K, we surpass sparsely supervised baseline by a large margin for both Mask RCNN and ShapeProp. MaskBooster on SpSIS also outperforms weakly and semi-supervised instance segmentation state-of-the-art on the datasets with similar annotation budgets.", + "primary_area": "computer vision iii", + "author": "Shida Zheng; Chenshu Chen; Xi Yang; Wenming Tan", + "authorids": "", + "aff": "Hikvision Research Institute; Hikvision Research Institute; Hikvision Research Institute; Hikvision Research Institute", + "bibtex": "@article{Zheng_Chen_Yang_Tan_2023, title={MaskBooster: End-to-End Self-Training for Sparsely Supervised Instance Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25481}, DOI={10.1609/aaai.v37i3.25481}, abstractNote={The present paper introduces sparsely supervised instance segmentation, with the datasets being fully annotated bounding boxes and sparsely annotated masks. A direct solution to this task is self-training, which is not fully explored for instance segmentation yet. In this paper, we propose MaskBooster for sparsely supervised instance segmentation (SpSIS) with comprehensive usage of pseudo masks. MaskBooster is featured with (1) dynamic and progressive pseudo masks from an online updating teacher model, (2) refining binary pseudo masks with the help of bounding box prior, (3) learning inter-class prediction distribution via knowledge distillation for soft pseudo masks. As an end-to-end and universal self-training framework, MaskBooster can empower fully supervised algorithms and boost their segmentation performance on SpSIS. Abundant experiments are conducted on COCO and BDD100K datasets and validate the effectiveness of MaskBooster. Specifically, on different COCO protocols and BDD100K, we surpass sparsely supervised baseline by a large margin for both Mask RCNN and ShapeProp. MaskBooster on SpSIS also outperforms weakly and semi-supervised instance segmentation state-of-the-art on the datasets with similar annotation budgets.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Shida and Chen, Chenshu and Yang, Xi and Tan, Wenming}, year={2023}, month={Jun.}, pages={3696-3704} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25481/25253", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25481", + "pdf_size": 1326681, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LnrbOcHZOyMJ:scholar.google.com/&scioq=MaskBooster:+End-to-End+Self-Training+for+Sparsely+Supervised+Instance+Segmentation&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "hikvision.com;hikvision.com;hikvision.com;hikvision.com", + "email": "hikvision.com;hikvision.com;hikvision.com;hikvision.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Hikvision Research Institute", + "aff_unique_dep": "", + "aff_unique_url": "https://www.hikvision.com/cn/", + "aff_unique_abbr": "Hikvision", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25807", + "title": "Materialisation-Based Reasoning in DatalogMTL with Bounded Intervals", + "track": "main", + "status": "Technical", + "abstract": "DatalogMTL is a powerful extension of Datalog with operators from metric temporal logic (MTL), which has received significant attention in recent years. In this paper, we investigate materialisation-based reasoning (a.k.a. forward chaining) in the context of DatalogMTL programs and datasets with bounded intervals, where partial representations of the canonical model are obtained through successive rounds of rule applications. Although materialisation does not naturally terminate in this setting, it is known that the structure of canonical models is ultimately periodic. Our first contribution in this paper is a detailed analysis of the periodic structure of canonical models; in particular, we formulate saturation conditions whose satisfaction by a partial materialisation implies an ability to recover the full canonical model via unfolding; this allows us to compute the actual periods describing the repeating parts of the canonical model as well as to establish concrete bounds on the number of rounds of rule applications required to achieve saturation. Based on these theoretical results, we propose a practical reasoning algorithm where saturation can be efficiently detected as materialisation progresses, and where the relevant periods used to evaluate entailment of queries via unfolding are efficiently computed. We have implemented our algorithm and our experiments suggest that our approach is both scalable and robust.", + "primary_area": "knowledge representation and reasoning", + "author": "Przemys\u0142aw A. Wa\u0142\u0119ga; Micha\u0142 Zawidzki; Dingmin Wang; Bernardo Cuenca Grau", + "authorids": "", + "aff": "Department of Computer Science, University of Oxford, UK; Department of Computer Science, University of Oxford, UK; Department of Computer Science, University of Oxford, UK; Department of Computer Science, University of Oxford, UK", + "bibtex": "@article{Wa\u0142\u0119ga_Zawidzki_Wang_Cuenca Grau_2023, title={Materialisation-Based Reasoning in DatalogMTL with Bounded Intervals}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25807}, DOI={10.1609/aaai.v37i5.25807}, abstractNote={DatalogMTL is a powerful extension of Datalog with operators from metric temporal logic (MTL), which has received significant attention in recent years. In this paper, we investigate materialisation-based reasoning (a.k.a. forward chaining) in the context of DatalogMTL programs and datasets with bounded intervals, where partial representations of the canonical model are obtained through successive rounds of rule applications. Although materialisation does not naturally terminate in this setting, it is known that the structure of canonical models is ultimately periodic. Our first contribution in this paper is a detailed analysis of the periodic structure of canonical models; in particular, we formulate saturation conditions whose satisfaction by a partial materialisation implies an ability to recover the full canonical model via unfolding; this allows us to compute the actual periods describing the repeating parts of the canonical model as well as to establish concrete bounds on the number of rounds of rule applications required to achieve saturation. Based on these theoretical results, we propose a practical reasoning algorithm where saturation can be efficiently detected as materialisation progresses, and where the relevant periods used to evaluate entailment of queries via unfolding are efficiently computed. We have implemented our algorithm and our experiments suggest that our approach is both scalable and robust.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wa\u0142\u0119ga, Przemys\u0142aw A. and Zawidzki, Micha\u0142 and Wang, Dingmin and Cuenca Grau, Bernardo}, year={2023}, month={Jun.}, pages={6566-6574} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25807/25579", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25807", + "pdf_size": 174254, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6355954485889728349&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-27018", + "title": "Maximizing Influence Spread through a Dynamic Social Network (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Modern social networks are dynamic in their nature; a new connections are appearing and old connections are disappearing all the time. However, in our algorithmic and complexity studies, we usually model social networks as static graphs.\n\nIn this paper, we propose a new paradigm for the study of the well-known Target Set Selection problem, which is a fundamental problem in viral marketing and the spread of opinion through social networks. In particular, we use temporal graphs to capture the dynamic nature of social networks.\n\nWe show that the temporal interpretation is, unsurprisingly, NP-complete in general. Then, we study computational complexity of this problem for multiple restrictions of both the threshold function and the underlying graph structure and provide multiple hardness lower-bounds.", + "primary_area": "", + "author": "\u0160imon Schierreich", + "authorids": "", + "aff": "Department of Theoretical Computer Science, Faculty of Information Technology, Czech Technical University in Prague, Prague, Czech Republic", + "bibtex": "@article{Schierreich_2024, title={Maximizing Influence Spread through a Dynamic Social Network (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27018}, DOI={10.1609/aaai.v37i13.27018}, abstractNote={Modern social networks are dynamic in their nature; a new connections are appearing and old connections are disappearing all the time. However, in our algorithmic and complexity studies, we usually model social networks as static graphs. In this paper, we propose a new paradigm for the study of the well-known Target Set Selection problem, which is a fundamental problem in viral marketing and the spread of opinion through social networks. In particular, we use temporal graphs to capture the dynamic nature of social networks. We show that the temporal interpretation is, unsurprisingly, NP-complete in general. Then, we study computational complexity of this problem for multiple restrictions of both the threshold function and the underlying graph structure and provide multiple hardness lower-bounds.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Schierreich, \u0160imon}, year={2024}, month={Jul.}, pages={16316-16317} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27018/26790", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27018", + "pdf_size": 79341, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6651804188949410208&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "fit.cvut.cz", + "email": "fit.cvut.cz", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Czech Technical University in Prague", + "aff_unique_dep": "Department of Theoretical Computer Science", + "aff_unique_url": "https://www.fit.cvut.cz", + "aff_unique_abbr": "CTU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Prague", + "aff_country_unique_index": "0", + "aff_country_unique": "Czech Republic" + }, + { + "id": "article-26446", + "title": "Maximizing the Probability of Fixation in the Positional Voter Model", + "track": "main", + "status": "Technical", + "abstract": "The Voter model is a well-studied stochastic process that models the invasion of a novel trait A (e.g., a new opinion, social meme, genetic mutation, magnetic spin) in a network of individuals (agents, people, genes, particles) carrying an existing resident trait B. Individuals change traits by occasionally sampling the trait of a neighbor, while an invasion bias \u03b4 \u2265 0 expresses the stochastic preference to adopt the novel trait A over the resident trait B. The strength of an invasion is measured by the probability that eventually the whole population adopts trait A, i.e., the fixation probability. In more realistic settings, however, the invasion bias is not ubiquitous, but rather manifested only in parts of the network. For instance, when modeling the spread of a social trait, the invasion bias represents localized incentives. In this paper, we generalize the standard biased Voter model to the positional Voter model, in which the invasion bias is effectuated only on an arbitrary subset of the network nodes, called biased nodes. We study the ensuing optimization problem, which is, given a budget k, to choose k biased nodes so as to maximize the fixation probability of a randomly occurring invasion. We show that the problem is NP-hard both for finite \u03b4 and when \u03b4 \u2192 \u221e (strong bias), while the objective function is not submodular in either setting, indicating strong computational hardness. On the other hand, we show that, when \u03b4 \u2192 0 (weak bias), we can obtain a tight approximation in O(n^2\u03c9 ) time, where \u03c9 is the matrix-multiplication exponent. We complement our theoretical results with an experimental evaluation of some proposed heuristics.", + "primary_area": "reasoning under uncertainty", + "author": "Petros Petsinis; Andreas Pavlogiannis; Panagiotis Karras", + "authorids": "", + "aff": "Aarhus University, Denmark; Aarhus University, Denmark; Aarhus University, Denmark", + "bibtex": "@article{Petsinis_Pavlogiannis_Karras_2023, title={Maximizing the Probability of Fixation in the Positional Voter Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26446}, DOI={10.1609/aaai.v37i10.26446}, abstractNote={The Voter model is a well-studied stochastic process that models the invasion of a novel trait A (e.g., a new opinion, social meme, genetic mutation, magnetic spin) in a network of individuals (agents, people, genes, particles) carrying an existing resident trait B. Individuals change traits by occasionally sampling the trait of a neighbor, while an invasion bias \u03b4 \u2265 0 expresses the stochastic preference to adopt the novel trait A over the resident trait B. The strength of an invasion is measured by the probability that eventually the whole population adopts trait A, i.e., the fixation probability. In more realistic settings, however, the invasion bias is not ubiquitous, but rather manifested only in parts of the network. For instance, when modeling the spread of a social trait, the invasion bias represents localized incentives. In this paper, we generalize the standard biased Voter model to the positional Voter model, in which the invasion bias is effectuated only on an arbitrary subset of the network nodes, called biased nodes. We study the ensuing optimization problem, which is, given a budget k, to choose k biased nodes so as to maximize the fixation probability of a randomly occurring invasion. We show that the problem is NP-hard both for finite \u03b4 and when \u03b4 \u2192 \u221e (strong bias), while the objective function is not submodular in either setting, indicating strong computational hardness. On the other hand, we show that, when \u03b4 \u2192 0 (weak bias), we can obtain a tight approximation in O(n^2\u03c9 ) time, where \u03c9 is the matrix-multiplication exponent. We complement our theoretical results with an experimental evaluation of some proposed heuristics.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Petsinis, Petros and Pavlogiannis, Andreas and Karras, Panagiotis}, year={2023}, month={Jun.}, pages={12269-12277} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26446/26218", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26446", + "pdf_size": 405695, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5353331208656479196&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "cs.au.dk;cs.au.dk;cs.au.dk", + "email": "cs.au.dk;cs.au.dk;cs.au.dk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Aarhus University", + "aff_unique_dep": "", + "aff_unique_url": "https://au.dk", + "aff_unique_abbr": "AU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Denmark" + }, + { + "id": "article-25758", + "title": "Maximum Entropy Population-Based Training for Zero-Shot Human-AI Coordination", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of training a Reinforcement Learning (RL) agent that is collaborative with humans without using human data. Although such agents can be obtained through self-play training, they can suffer significantly from the distributional shift when paired with unencountered partners, such as humans. In this paper, we propose Maximum Entropy Population-based training (MEP) to mitigate such distributional shift. In MEP, agents in the population are trained with our derived Population Entropy bonus to promote the pairwise diversity between agents and the individual diversity of agents themselves. After obtaining this diversified population, a common best agent is trained by paring with agents in this population via prioritized sampling, where the prioritization is dynamically adjusted based on the training progress. We demonstrate the effectiveness of our method MEP, with comparison to Self-Play PPO (SP), Population-Based Training (PBT), Trajectory Diversity (TrajeDi), and Fictitious Co-Play (FCP) in both matrix game and Overcooked game environments, with partners being human proxy models and real humans. A supplementary video showing experimental results is available at https://youtu.be/Xh-FKD0AAKE.", + "primary_area": "humans and ai", + "author": "Rui Zhao; Jinming Song; Yufeng Yuan; Haifeng Hu; Yang Gao; Yi Wu; Zhongqian Sun; Wei Yang", + "authorids": "", + "aff": "Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tsinghua University; Tsinghua University; Tencent AI Lab; Tencent AI Lab", + "bibtex": "@article{Zhao_Song_Yuan_Hu_Gao_Wu_Sun_Yang_2023, title={Maximum Entropy Population-Based Training for Zero-Shot Human-AI Coordination}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25758}, DOI={10.1609/aaai.v37i5.25758}, abstractNote={We study the problem of training a Reinforcement Learning (RL) agent that is collaborative with humans without using human data. Although such agents can be obtained through self-play training, they can suffer significantly from the distributional shift when paired with unencountered partners, such as humans. In this paper, we propose Maximum Entropy Population-based training (MEP) to mitigate such distributional shift. In MEP, agents in the population are trained with our derived Population Entropy bonus to promote the pairwise diversity between agents and the individual diversity of agents themselves. After obtaining this diversified population, a common best agent is trained by paring with agents in this population via prioritized sampling, where the prioritization is dynamically adjusted based on the training progress. We demonstrate the effectiveness of our method MEP, with comparison to Self-Play PPO (SP), Population-Based Training (PBT), Trajectory Diversity (TrajeDi), and Fictitious Co-Play (FCP) in both matrix game and Overcooked game environments, with partners being human proxy models and real humans. A supplementary video showing experimental results is available at https://youtu.be/Xh-FKD0AAKE.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Rui and Song, Jinming and Yuan, Yufeng and Hu, Haifeng and Gao, Yang and Wu, Yi and Sun, Zhongqian and Yang, Wei}, year={2023}, month={Jun.}, pages={6145-6153} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25758/25530", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25758", + "pdf_size": 258415, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15588872748076271403&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com; ; ; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ; ; ", + "github": "", + "project": "https://youtu.be/Xh-FKD0AAKE", + "author_num": 8, + "aff_unique_index": "0;0;0;0;1;1;0;0", + "aff_unique_norm": "Tencent;Tsinghua University", + "aff_unique_dep": "Tencent AI Lab;", + "aff_unique_url": "https://ai.tencent.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Tencent AI Lab;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26110", + "title": "Mean Estimation of Truncated Mixtures of Two Gaussians: A Gradient Based Approach", + "track": "main", + "status": "Technical", + "abstract": "Even though data is abundant, it is often subjected to some form of censoring or truncation which inherently creates biases. Removing such biases and performing parameter estimation is a classical challenge in Statistics. In this paper, we focus on the problem of estimating the means of a mixture of two balanced d-dimensional Gaussians when the samples are prone to truncation. A recent theoretical study on the performance of the Expectation-Maximization (EM) algorithm for the aforementioned problem showed EM almost surely converges for d=1 and exhibits local convergence for d>1 to the true means. Nevertheless, the EM algorithm for the case of truncated mixture of two Gaussians is not easy to implement as it requires solving a set of nonlinear equations at every iteration which makes the algorithm impractical. In this work, we propose a gradient based variant of the EM algorithm that has global convergence guarantees when d=1 and local convergence for d>1 to the true means. Moreover, the update rule at every iteration is easy to compute which makes the proposed method practical. We also provide numerous experiments to obtain more insights into the effect of truncation on the convergence to the true parameters in high dimensions.", + "primary_area": "machine learning iii", + "author": "Sai Ganesh Nagarajan; Gerasimos Palaiopanos; Ioannis Panageas; Tushar Vaidya; Samson Yu", + "authorids": "", + "aff": "EPFL; University of Pittsburgh; University of California, Irvine; NTU; NUS", + "bibtex": "@article{Nagarajan_Palaiopanos_Panageas_Vaidya_Yu_2023, title={Mean Estimation of Truncated Mixtures of Two Gaussians: A Gradient Based Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26110}, DOI={10.1609/aaai.v37i8.26110}, abstractNote={Even though data is abundant, it is often subjected to some form of censoring or truncation which inherently creates biases. Removing such biases and performing parameter estimation is a classical challenge in Statistics. In this paper, we focus on the problem of estimating the means of a mixture of two balanced d-dimensional Gaussians when the samples are prone to truncation. A recent theoretical study on the performance of the Expectation-Maximization (EM) algorithm for the aforementioned problem showed EM almost surely converges for d=1 and exhibits local convergence for d>1 to the true means. Nevertheless, the EM algorithm for the case of truncated mixture of two Gaussians is not easy to implement as it requires solving a set of nonlinear equations at every iteration which makes the algorithm impractical. In this work, we propose a gradient based variant of the EM algorithm that has global convergence guarantees when d=1 and local convergence for d>1 to the true means. Moreover, the update rule at every iteration is easy to compute which makes the proposed method practical. We also provide numerous experiments to obtain more insights into the effect of truncation on the convergence to the true parameters in high dimensions.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nagarajan, Sai Ganesh and Palaiopanos, Gerasimos and Panageas, Ioannis and Vaidya, Tushar and Yu, Samson}, year={2023}, month={Jun.}, pages={9260-9267} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26110/25882", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26110", + "pdf_size": 263730, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14590030709399394544&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;University of Pittsburgh;University of California, Irvine;Nanyang Technological University;National University of Singapore", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.epfl.ch;https://www.pitt.edu;https://www.uci.edu;https://www.ntu.edu.sg;https://www.nus.edu.sg", + "aff_unique_abbr": "EPFL;Pitt;UCI;NTU;NUS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Irvine", + "aff_country_unique_index": "0;1;1;2;2", + "aff_country_unique": "Switzerland;United States;Singapore" + }, + { + "id": "article-25309", + "title": "Mean-Shifted Contrastive Loss for Anomaly Detection", + "track": "main", + "status": "Technical", + "abstract": "Deep anomaly detection methods learn representations that separate between normal and anomalous images. Although self-supervised representation learning is commonly used, small dataset sizes limit its effectiveness. It was previously shown that utilizing external, generic datasets (e.g. ImageNet classification) can significantly improve anomaly detection performance. One approach is outlier exposure, which fails when the external datasets do not resemble the anomalies. We take the approach of transferring representations pre-trained on external datasets for anomaly detection. Anomaly detection performance can be significantly improved by fine-tuning the pre-trained representations on the normal training images. In this paper, we first demonstrate and analyze that contrastive learning, the most popular self-supervised learning paradigm cannot be naively applied to pre-trained features. The reason is that pre-trained feature initialization causes poor conditioning for standard contrastive objectives, resulting in bad optimization dynamics. Based on our analysis, we provide a modified contrastive objective, the Mean-Shifted Contrastive Loss. Our method is highly effective and achieves a new state-of-the-art anomaly detection performance including 98.6% ROC-AUC on the CIFAR-10 dataset.", + "primary_area": "computer vision ii", + "author": "Tal Reiss; Yedid Hoshen", + "authorids": "", + "aff": "The Hebrew University of Jerusalem; The Hebrew University of Jerusalem", + "bibtex": "@article{Reiss_Hoshen_2023, title={Mean-Shifted Contrastive Loss for Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25309}, DOI={10.1609/aaai.v37i2.25309}, abstractNote={Deep anomaly detection methods learn representations that separate between normal and anomalous images. Although self-supervised representation learning is commonly used, small dataset sizes limit its effectiveness. It was previously shown that utilizing external, generic datasets (e.g. ImageNet classification) can significantly improve anomaly detection performance. One approach is outlier exposure, which fails when the external datasets do not resemble the anomalies. We take the approach of transferring representations pre-trained on external datasets for anomaly detection. Anomaly detection performance can be significantly improved by fine-tuning the pre-trained representations on the normal training images. In this paper, we first demonstrate and analyze that contrastive learning, the most popular self-supervised learning paradigm cannot be naively applied to pre-trained features. The reason is that pre-trained feature initialization causes poor conditioning for standard contrastive objectives, resulting in bad optimization dynamics. Based on our analysis, we provide a modified contrastive objective, the Mean-Shifted Contrastive Loss. Our method is highly effective and achieves a new state-of-the-art anomaly detection performance including 98.6% ROC-AUC on the CIFAR-10 dataset.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Reiss, Tal and Hoshen, Yedid}, year={2023}, month={Jun.}, pages={2155-2162} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25309/25081", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25309", + "pdf_size": 496978, + "gs_citation": 150, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10386217252747944521&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Hebrew University of Jerusalem", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huji.ac.il", + "aff_unique_abbr": "HUJI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-27050", + "title": "Measuring the Privacy Leakage via Graph Reconstruction Attacks on Simplicial Neural Networks (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this paper, we measure the privacy leakage via studying whether graph representations can be inverted to recover the graph used to generate them via graph reconstruction attack (GRA). We propose a GRA that recovers a graph's adjacency matrix from the representations via a graph decoder that minimizes the reconstruction loss between the partial graph and the reconstructed graph. We study three types of representations that are trained on the graph, i.e., representations output from graph convolutional network (GCN), graph attention network (GAT), and our proposed simplicial neural network (SNN) via a higher-order combinatorial Laplacian. Unlike the first two types of representations that only encode pairwise relationships, the third type of representation, i.e., SNN outputs, encodes higher-order interactions (e.g., homological features) between nodes. We find that the SNN outputs reveal the lowest privacy-preserving ability to defend the GRA, followed by those of GATs and GCNs, which indicates the importance of building more private representations with higher-order node information that could defend the potential threats, such as GRAs.", + "primary_area": "", + "author": "Huixin Zhan; Kun Zhang; Keyi Lu; Victor S. Sheng", + "authorids": "", + "aff": "Department of Computer Science, Texas Tech University; Department of Computer Science, Xavier University of Louisiana; Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science, Texas Tech University", + "bibtex": "@article{Zhan_Zhang_Lu_Sheng_2024, title={Measuring the Privacy Leakage via Graph Reconstruction Attacks on Simplicial Neural Networks (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27050}, DOI={10.1609/aaai.v37i13.27050}, abstractNote={In this paper, we measure the privacy leakage via studying whether graph representations can be inverted to recover the graph used to generate them via graph reconstruction attack (GRA). We propose a GRA that recovers a graph\u2019s adjacency matrix from the representations via a graph decoder that minimizes the reconstruction loss between the partial graph and the reconstructed graph. We study three types of representations that are trained on the graph, i.e., representations output from graph convolutional network (GCN), graph attention network (GAT), and our proposed simplicial neural network (SNN) via a higher-order combinatorial Laplacian. Unlike the first two types of representations that only encode pairwise relationships, the third type of representation, i.e., SNN outputs, encodes higher-order interactions (e.g., homological features) between nodes. We find that the SNN outputs reveal the lowest privacy-preserving ability to defend the GRA, followed by those of GATs and GCNs, which indicates the importance of building more private representations with higher-order node information that could defend the potential threats, such as GRAs.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhan, Huixin and Zhang, Kun and Lu, Keyi and Sheng, Victor S.}, year={2024}, month={Jul.}, pages={16380-16381} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27050/26822", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27050", + "pdf_size": 86515, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13552879306636366781&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ttu.edu;xula.edu;osu.edu;ttu.edu", + "email": "ttu.edu;xula.edu;osu.edu;ttu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Texas Tech University;Xavier University of Louisiana;The Ohio State University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.ttu.edu;https://www.xula.edu;https://www.osu.edu", + "aff_unique_abbr": "TTU;XULA;OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26649", + "title": "Med-EASi: Finely Annotated Dataset and Models for Controllable Simplification of Medical Texts", + "track": "aaai special track", + "status": "Technical", + "abstract": "Automatic medical text simplification can assist providers with patient-friendly communication and make medical texts more accessible, thereby improving health literacy. But curating a quality corpus for this task requires the supervision of medical experts. In this work, we present Med-EASi (Medical dataset for Elaborative and Abstractive Simplification), a uniquely crowdsourced and finely annotated dataset for supervised simplification of short medical texts. Its expert-layman-AI collaborative annotations facilitate controllability over text simplification by marking four kinds of textual transformations: elaboration, replacement, deletion, and insertion. To learn medical text simplification, we fine-tune T5-large with four different styles of input-output combinations, leading to two control-free and two controllable versions of the model. We add two types of controllability into text simplification, by using a multi-angle training approach: position-aware, which uses in-place annotated inputs and outputs, and position-agnostic, where the model only knows the contents to be edited, but not their positions. Our results show that our fine-grained annotations improve learning compared to the unannotated baseline. Furthermore, our position-aware control enhances the model's ability to generate better simplification than the position-agnostic version. The data and code are available at https://github.com/Chandrayee/CTRL-SIMP.", + "primary_area": "ai for social impact", + "author": "Chandrayee Basu; Rosni Vasu; Michihiro Yasunaga; Qian Yang", + "authorids": "", + "aff": "Stanford University; University of Zurich; Stanford University; Cornell University", + "bibtex": "@article{Basu_Vasu_Yasunaga_Yang_2023, title={Med-EASi: Finely Annotated Dataset and Models for Controllable Simplification of Medical Texts}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26649}, DOI={10.1609/aaai.v37i12.26649}, abstractNote={Automatic medical text simplification can assist providers with patient-friendly communication and make medical texts more accessible, thereby improving health literacy. But curating a quality corpus for this task requires the supervision of medical experts. In this work, we present Med-EASi (Medical dataset for Elaborative and Abstractive Simplification), a uniquely crowdsourced and finely annotated dataset for supervised simplification of short medical texts. Its expert-layman-AI collaborative annotations facilitate controllability over text simplification by marking four kinds of textual transformations: elaboration, replacement, deletion, and insertion. To learn medical text simplification, we fine-tune T5-large with four different styles of input-output combinations, leading to two control-free and two controllable versions of the model. We add two types of controllability into text simplification, by using a multi-angle training approach: position-aware, which uses in-place annotated inputs and outputs, and position-agnostic, where the model only knows the contents to be edited, but not their positions. Our results show that our fine-grained annotations improve learning compared to the unannotated baseline. Furthermore, our position-aware control enhances the model\u2019s ability to generate better simplification than the position-agnostic version. The data and code are available at https://github.com/Chandrayee/CTRL-SIMP.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Basu, Chandrayee and Vasu, Rosni and Yasunaga, Michihiro and Yang, Qian}, year={2023}, month={Jun.}, pages={14093-14101} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26649/26421", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26649", + "pdf_size": 454342, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12726027441269889996&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 6, + "aff_domain": "stanford.edu;ifi.uzh.ch;stanford.edu;cornell.edu", + "email": "stanford.edu;ifi.uzh.ch;stanford.edu;cornell.edu", + "github": "https://github.com/Chandrayee/CTRL-SIMP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Stanford University;University of Zurich;Cornell University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.stanford.edu;https://www.unizh.ch;https://www.cornell.edu", + "aff_unique_abbr": "Stanford;UZH;Cornell", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "article-25678", + "title": "Mediated Cheap Talk Design", + "track": "main", + "status": "Technical", + "abstract": "We study an information design problem with two informed senders and a receiver in which, in contrast to traditional Bayesian persuasion settings, senders do not have commitment power. In our setting, a trusted mediator/platform gathers data from the senders and recommends the receiver which action to play. We characterize the set of feasible action distributions that can be obtained in equilibrium, and provide an O(n log n) algorithm (where n is the number of states) that computes the optimal equilibrium for the senders. Additionally, we show that the optimal equilibrium for the receiver can be obtained by a simple revelation mechanism.", + "primary_area": "game theory and economic paradigms", + "author": "Itai Arieli; Ivan Geffner; Moshe Tennenholtz", + "authorids": "", + "aff": "Technion - Israel Institute of Technology; Technion - Israel Institute of Technology+European Research Council (ERC); Technion - Israel Institute of Technology", + "bibtex": "@article{Arieli_Geffner_Tennenholtz_2023, title={Mediated Cheap Talk Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25678}, DOI={10.1609/aaai.v37i5.25678}, abstractNote={We study an information design problem with two informed senders and a receiver in which, in contrast to traditional Bayesian persuasion settings, senders do not have commitment power. In our setting, a trusted mediator/platform gathers data from the senders and recommends the receiver which action to play. We characterize the set of feasible action distributions that can be obtained in equilibrium, and provide an O(n log n) algorithm (where n is the number of states) that computes the optimal equilibrium for the senders. Additionally, we show that the optimal equilibrium for the receiver can be obtained by a simple revelation mechanism.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Arieli, Itai and Geffner, Ivan and Tennenholtz, Moshe}, year={2023}, month={Jun.}, pages={5456-5463} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25678/25450", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25678", + "pdf_size": 152822, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10718880696282054837&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "technion.ac.il;cornell.edu;technion.ac.il", + "email": "technion.ac.il;cornell.edu;technion.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0", + "aff_unique_norm": "Technion - Israel Institute of Technology;European Research Council", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.technion.ac.il/en/;https://erc.europa.eu", + "aff_unique_abbr": "Technion;ERC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "Israel;European Union" + }, + { + "id": "article-26329", + "title": "Memorization Weights for Instance Reweighting in Adversarial Training", + "track": "main", + "status": "Technical", + "abstract": "Adversarial training is an effective way to defend deep neural networks (DNN) against adversarial examples. However, there are atypical samples that are rare and hard to learn, or even hurt DNNs' generalization performance on test data. In this paper, we propose a novel algorithm to reweight the training samples based on self-supervised techniques to mitigate the negative effects of the atypical samples. \nSpecifically, a memory bank is built to record the popular samples as prototypes and calculate the memorization weight for each sample, evaluating the \"typicalness\" of a sample. All the training samples are reweigthed based on the proposed memorization weights to reduce the negative effects of atypical samples. Experimental results show the proposed method is flexible to boost state-of-the-art adversarial training methods, improving both robustness and standard accuracy of DNNs.", + "primary_area": "machine learning iv", + "author": "Jianfu Zhang; Yan Hong; Qibin Zhao", + "authorids": "", + "aff": "RIKEN AIP; Shanghai Jiao Tong University; RIKEN AIP", + "bibtex": "@article{Zhang_Hong_Zhao_2023, title={Memorization Weights for Instance Reweighting in Adversarial Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26329}, DOI={10.1609/aaai.v37i9.26329}, abstractNote={Adversarial training is an effective way to defend deep neural networks (DNN) against adversarial examples. However, there are atypical samples that are rare and hard to learn, or even hurt DNNs\u2019 generalization performance on test data. In this paper, we propose a novel algorithm to reweight the training samples based on self-supervised techniques to mitigate the negative effects of the atypical samples. Specifically, a memory bank is built to record the popular samples as prototypes and calculate the memorization weight for each sample, evaluating the "typicalness" of a sample. All the training samples are reweigthed based on the proposed memorization weights to reduce the negative effects of atypical samples. Experimental results show the proposed method is flexible to boost state-of-the-art adversarial training methods, improving both robustness and standard accuracy of DNNs.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jianfu and Hong, Yan and Zhao, Qibin}, year={2023}, month={Jun.}, pages={11228-11236} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26329/26101", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26329", + "pdf_size": 4596501, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7308663509192390785&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "riken.jp;gmail.com;riken.jp", + "email": "riken.jp;gmail.com;riken.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "RIKEN;Shanghai Jiao Tong University", + "aff_unique_dep": "Advanced Institute for Computational Science;", + "aff_unique_url": "https://www.aip.riken.jp;https://www.sjtu.edu.cn", + "aff_unique_abbr": "RIKEN AIP;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Japan;China" + }, + { + "id": "article-25480", + "title": "Memory-Aided Contrastive Consensus Learning for Co-salient Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Co-salient object detection (CoSOD) aims at detecting common salient objects within a group of relevant source images. Most of the latest works employ the attention mechanism for finding common objects. To achieve accurate CoSOD results with high-quality maps and high efficiency, we propose a novel Memory-aided Contrastive Consensus Learning (MCCL) framework, which is capable of effectively detecting co-salient objects in real time (\u223c150 fps). To learn better group consensus, we propose the Group Consensus Aggregation Module (GCAM) to abstract the common features of each image group; meanwhile, to make the consensus representation more discriminative, we introduce the Memory-based Contrastive Module (MCM), which saves and updates the consensus of images from different groups in a queue of memories. Finally, to improve the quality and integrity of the predicted maps, we develop an Adversarial Integrity Learning (AIL) strategy to make the segmented regions more likely composed of complete objects with less surrounding noise. Extensive experiments on all the latest CoSOD benchmarks demonstrate that our lite MCCL outperforms 13 cutting-edge models, achieving the new state of the art (\u223c5.9% and \u223c6.2% improvement in S-measure on CoSOD3k and CoSal2015, respectively). Our source codes, saliency maps, and online demos are publicly available at https://github.com/ZhengPeng7/MCCL.", + "primary_area": "computer vision iii", + "author": "Peng Zheng; Jie Qin; Shuo Wang; Tian-Zhu Xiang; Huan Xiong", + "authorids": "", + "aff": "Nanjing University of Aeronautics and Astronautics; Nanjing University of Aeronautics and Astronautics; ETH Zurich; Inception Institute of Artificial Intelligence; Harbin Institute of Technology+Mohamed bin Zayed University of Artificial Intelligence", + "bibtex": "@article{Zheng_Qin_Wang_Xiang_Xiong_2023, title={Memory-Aided Contrastive Consensus Learning for Co-salient Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25480}, DOI={10.1609/aaai.v37i3.25480}, abstractNote={Co-salient object detection (CoSOD) aims at detecting common salient objects within a group of relevant source images. Most of the latest works employ the attention mechanism for finding common objects. To achieve accurate CoSOD results with high-quality maps and high efficiency, we propose a novel Memory-aided Contrastive Consensus Learning (MCCL) framework, which is capable of effectively detecting co-salient objects in real time (\u223c150 fps). To learn better group consensus, we propose the Group Consensus Aggregation Module (GCAM) to abstract the common features of each image group; meanwhile, to make the consensus representation more discriminative, we introduce the Memory-based Contrastive Module (MCM), which saves and updates the consensus of images from different groups in a queue of memories. Finally, to improve the quality and integrity of the predicted maps, we develop an Adversarial Integrity Learning (AIL) strategy to make the segmented regions more likely composed of complete objects with less surrounding noise. Extensive experiments on all the latest CoSOD benchmarks demonstrate that our lite MCCL outperforms 13 cutting-edge models, achieving the new state of the art (\u223c5.9% and \u223c6.2% improvement in S-measure on CoSOD3k and CoSal2015, respectively). Our source codes, saliency maps, and online demos are publicly available at https://github.com/ZhengPeng7/MCCL.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Peng and Qin, Jie and Wang, Shuo and Xiang, Tian-Zhu and Xiong, Huan}, year={2023}, month={Jun.}, pages={3687-3695} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25480/25252", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25480", + "pdf_size": 2307695, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3820133085414979639&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "github": "https://github.com/ZhengPeng7/MCCL", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;3+4", + "aff_unique_norm": "Nanjing University of Aeronautics and Astronautics;ETH Zurich;Inception Institute of Artificial Intelligence;Harbin Institute of Technology;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.nuaa.edu.cn;https://www.ethz.ch;https://www.inceptioniai.org;http://www.hit.edu.cn/;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "NUAA;ETHZ;;HIT;MBZUAI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0;0;1;2;0+2", + "aff_country_unique": "China;Switzerland;United Arab Emirates" + }, + { + "id": "article-26374", + "title": "Memory-Augmented Theory of Mind Network", + "track": "main", + "status": "Technical", + "abstract": "Social reasoning necessitates the capacity of theory of mind (ToM), the ability to contextualise and attribute mental states to others without having access to their internal cognitive structure. Recent machine learning approaches to ToM have demonstrated that we can train the observer to read the past and present behaviours of other agents and infer their beliefs (including false beliefs about things that no longer exist), goals, intentions and future actions. The challenges arise when the behavioural space is complex, demanding skilful space navigation for rapidly changing contexts for an extended period. We tackle the challenges by equipping the observer with novel neural memory mechanisms to encode, and hierarchical attention to selectively retrieve information about others. The memories allow rapid, selective querying of distal related past behaviours of others to deliberatively reason about their current mental state, beliefs and future behaviours. This results in ToMMY, a theory of mind model that learns to reason while making little assumptions about the underlying mental processes. We also construct a new suite of experiments to demonstrate that memories facilitate the learning process and achieve better theory of mind performance, especially for high-demand false-belief tasks that require inferring through multiple steps of changes.", + "primary_area": "multiagent systems", + "author": "Dung Nguyen; Phuoc Nguyen; Hung Le; Kien Do; Svetha Venkatesh; Truyen Tran", + "authorids": "", + "aff": "Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia", + "bibtex": "@article{Nguyen_Nguyen_Le_Do_Venkatesh_Tran_2023, title={Memory-Augmented Theory of Mind Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26374}, DOI={10.1609/aaai.v37i10.26374}, abstractNote={Social reasoning necessitates the capacity of theory of mind (ToM), the ability to contextualise and attribute mental states to others without having access to their internal cognitive structure. Recent machine learning approaches to ToM have demonstrated that we can train the observer to read the past and present behaviours of other agents and infer their beliefs (including false beliefs about things that no longer exist), goals, intentions and future actions. The challenges arise when the behavioural space is complex, demanding skilful space navigation for rapidly changing contexts for an extended period. We tackle the challenges by equipping the observer with novel neural memory mechanisms to encode, and hierarchical attention to selectively retrieve information about others. The memories allow rapid, selective querying of distal related past behaviours of others to deliberatively reason about their current mental state, beliefs and future behaviours. This results in ToMMY, a theory of mind model that learns to reason while making little assumptions about the underlying mental processes. We also construct a new suite of experiments to demonstrate that memories facilitate the learning process and achieve better theory of mind performance, especially for high-demand false-belief tasks that require inferring through multiple steps of changes.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Dung and Nguyen, Phuoc and Le, Hung and Do, Kien and Venkatesh, Svetha and Tran, Truyen}, year={2023}, month={Jun.}, pages={11630-11637} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26374/26146", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26374", + "pdf_size": 2282706, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17364244570391342750&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au", + "email": "deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au;deakin.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Deakin University", + "aff_unique_dep": "Applied Artificial Intelligence Institute (A2I2)", + "aff_unique_url": "https://www.deakin.edu.au", + "aff_unique_abbr": "Deakin", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Geelong", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25319", + "title": "Memory-Oriented Structural Pruning for Efficient Image Restoration", + "track": "main", + "status": "Technical", + "abstract": "Deep learning (DL) based methods have significantly pushed forward the state-of-the-art for image restoration (IR) task. Nevertheless, DL-based IR models are highly computation- and memory-intensive. The surging demands for processing higher-resolution images and multi-task paralleling in practical mobile usage further add to their computation and memory burdens. In this paper, we reveal the overlooked memory redundancy of the IR models and propose a Memory-Oriented Structural Pruning (MOSP) method. To properly compress the long-range skip connections (a major source of the memory burden), we introduce a compactor module onto each skip connection to decouple the pruning of the skip connections and the main branch. MOSP progressively prunes the original model layers and the compactors to cut down the peak memory while maintaining high IR quality. Experiments on real image denoising, image super-resolution and low-light image enhancement show that MOSP can yield models with higher memory efficiency while better preserving performance compared with baseline pruning methods.", + "primary_area": "computer vision ii", + "author": "Xiangsheng Shi; Xuefei Ning; Lidong Guo; Tianchen Zhao; Enshu Liu; Yi Cai; Yuhan Dong; Huazhong Yang; Yu Wang", + "authorids": "", + "aff": "Department of Electronic Engineering, Tsinghua University+Shenzhen International Graduate School, Tsinghua University; Department of Electronic Engineering, Tsinghua University; School of Materials Science and Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Shenzhen International Graduate School, Tsinghua University; Department of Electronic Engineering, Tsinghua University; Department of Electronic Engineering, Tsinghua University", + "bibtex": "@article{Shi_Ning_Guo_Zhao_Liu_Cai_Dong_Yang_Wang_2023, title={Memory-Oriented Structural Pruning for Efficient Image Restoration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25319}, DOI={10.1609/aaai.v37i2.25319}, abstractNote={Deep learning (DL) based methods have significantly pushed forward the state-of-the-art for image restoration (IR) task. Nevertheless, DL-based IR models are highly computation- and memory-intensive. The surging demands for processing higher-resolution images and multi-task paralleling in practical mobile usage further add to their computation and memory burdens. In this paper, we reveal the overlooked memory redundancy of the IR models and propose a Memory-Oriented Structural Pruning (MOSP) method. To properly compress the long-range skip connections (a major source of the memory burden), we introduce a compactor module onto each skip connection to decouple the pruning of the skip connections and the main branch. MOSP progressively prunes the original model layers and the compactors to cut down the peak memory while maintaining high IR quality. Experiments on real image denoising, image super-resolution and low-light image enhancement show that MOSP can yield models with higher memory efficiency while better preserving performance compared with baseline pruning methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shi, Xiangsheng and Ning, Xuefei and Guo, Lidong and Zhao, Tianchen and Liu, Enshu and Cai, Yi and Dong, Yuhan and Yang, Huazhong and Wang, Yu}, year={2023}, month={Jun.}, pages={2245-2253} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25319/25091", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25319", + "pdf_size": 1284162, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8316487343357087217&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;mail.tsinghua.org.cn;sz.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;mail.tsinghua.org.cn;sz.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Department of Electronic Engineering", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26929", + "title": "Meta Learning in Decentralized Neural Networks: Towards More General AI", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Meta-learning usually refers to a learning algorithm that learns from other learning algorithms. The problem of uncertainty in the predictions of neural networks shows that the world is only partially predictable and a learned neural network cannot generalize to its ever-changing surrounding environments. Therefore, the question is how a predictive model can represent multiple predictions simultaneously. We aim to provide a fundamental understanding of learning to learn in the contents of Decentralized Neural Networks (Decentralized NNs) and we believe this is one of the most important questions and prerequisites to building an autonomous intelligence machine. To this end, we shall demonstrate several pieces of evidence for tackling the problems above with Meta Learning in Decentralized NNs. In particular, we will present three different approaches to building such a decentralized learning system: (1) learning from many replica neural networks, (2) building the hierarchy of neural networks for different functions, and (3) leveraging different modality experts to learn cross-modal representations.", + "primary_area": "", + "author": "Yuwei Sun", + "authorids": "", + "aff": "The University of Tokyo + RIKEN", + "bibtex": "@article{Sun_2024, title={Meta Learning in Decentralized Neural Networks: Towards More General AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26929}, DOI={10.1609/aaai.v37i13.26929}, abstractNote={Meta-learning usually refers to a learning algorithm that learns from other learning algorithms. The problem of uncertainty in the predictions of neural networks shows that the world is only partially predictable and a learned neural network cannot generalize to its ever-changing surrounding environments. Therefore, the question is how a predictive model can represent multiple predictions simultaneously. We aim to provide a fundamental understanding of learning to learn in the contents of Decentralized Neural Networks (Decentralized NNs) and we believe this is one of the most important questions and prerequisites to building an autonomous intelligence machine. To this end, we shall demonstrate several pieces of evidence for tackling the problems above with Meta Learning in Decentralized NNs. In particular, we will present three different approaches to building such a decentralized learning system: (1) learning from many replica neural networks, (2) building the hierarchy of neural networks for different functions, and (3) leveraging different modality experts to learn cross-modal representations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Yuwei}, year={2024}, month={Jul.}, pages={16137-16138} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26929/26701", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26929", + "pdf_size": 55840, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17319249436517168188&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 9, + "aff_domain": "g.ecc.u-tokyo.ac.jp", + "email": "g.ecc.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "University of Tokyo;RIKEN", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", + "aff_unique_abbr": "UTokyo;RIKEN", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-25760", + "title": "Meta-Auxiliary Learning for Adaptive Human Pose Prediction", + "track": "main", + "status": "Technical", + "abstract": "Predicting high-fidelity future human poses, from a historically observed sequence, is crucial for intelligent robots to interact with humans. Deep end-to-end learning approaches, which typically train a generic pre-trained model on external datasets and then directly apply it to all test samples, emerge as the dominant solution to solve this issue. Despite encouraging progress, they remain non-optimal, as the unique properties (e.g., motion style, rhythm) of a specific sequence cannot be adapted. More generally, once encountering out-of-distributions, the predicted poses tend to be unreliable. Motivated by this observation, we propose a novel test-time adaptation framework that leverages two self-supervised auxiliary tasks to help the primary forecasting network adapt to the test sequence. In the testing phase, our model can adjust the model parameters by several gradient updates to improve the generation quality. However, due to catastrophic forgetting, both auxiliary tasks typically have a low ability to automatically present the desired positive incentives for the final prediction performance. For this reason, we also propose a meta-auxiliary learning scheme for better adaptation. Extensive experiments show that the proposed approach achieves higher accuracy and more realistic visualization.", + "primary_area": "intelligent robotics", + "author": "Qiongjie Cui; Huaijiang Sun; Jianfeng Lu; Bin Li; Weiqing Li", + "authorids": "", + "aff": "Nanjing University of Science and Technology; Nanjing University of Science and Technology; Nanjing University of Science and Technology; Tianjin AiForward Science and Technology Co., Ltd., China; Nanjing University of Science and Technology", + "bibtex": "@article{Cui_Sun_Lu_Li_Li_2023, title={Meta-Auxiliary Learning for Adaptive Human Pose Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25760}, DOI={10.1609/aaai.v37i5.25760}, abstractNote={Predicting high-fidelity future human poses, from a historically observed sequence, is crucial for intelligent robots to interact with humans. Deep end-to-end learning approaches, which typically train a generic pre-trained model on external datasets and then directly apply it to all test samples, emerge as the dominant solution to solve this issue. Despite encouraging progress, they remain non-optimal, as the unique properties (e.g., motion style, rhythm) of a specific sequence cannot be adapted. More generally, once encountering out-of-distributions, the predicted poses tend to be unreliable. Motivated by this observation, we propose a novel test-time adaptation framework that leverages two self-supervised auxiliary tasks to help the primary forecasting network adapt to the test sequence. In the testing phase, our model can adjust the model parameters by several gradient updates to improve the generation quality. However, due to catastrophic forgetting, both auxiliary tasks typically have a low ability to automatically present the desired positive incentives for the final prediction performance. For this reason, we also propose a meta-auxiliary learning scheme for better adaptation. Extensive experiments show that the proposed approach achieves higher accuracy and more realistic visualization.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Qiongjie and Sun, Huaijiang and Lu, Jianfeng and Li, Bin and Li, Weiqing}, year={2023}, month={Jun.}, pages={6166-6174} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25760/25532", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25760", + "pdf_size": 3053885, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14630011385813471433&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;aiforward.com;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;aiforward.com;njust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Nanjing University of Science and Technology;Tianjin AiForward Science and Technology Co., Ltd.", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.nust.edu.cn/;", + "aff_unique_abbr": "NUST;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25823", + "title": "Meta-Learning for Simple Regret Minimization", + "track": "main", + "status": "Technical", + "abstract": "We develop a meta-learning framework for simple regret minimization in bandits. In this framework, a learning agent interacts with a sequence of bandit tasks, which are sampled i.i.d. from an unknown prior distribution, and learns its meta-parameters to perform better on future tasks. We propose the first Bayesian and frequentist meta-learning algorithms for this setting. The Bayesian algorithm has access to a prior distribution over the meta-parameters and its meta simple regret over m bandit tasks with horizon n is mere O(m / \u221an). On the other hand, the meta simple regret of the frequentist algorithm is O(n\u221am + m/ \u221an). While its regret is worse, the frequentist algorithm is more general because it does not need a prior distribution over the meta-parameters. It can also be analyzed in more settings. We instantiate our algorithms for several classes of bandit problems. Our algorithms are general and we complement our theory by evaluating them empirically in several environments.", + "primary_area": "machine learning i", + "author": "Javad Azizi; Branislav Kveton; Mohammad Ghavamzadeh; Sumeet Katariya", + "authorids": "", + "aff": "University of Southern California; Amazon; Google Research; Amazon", + "bibtex": "@article{Azizi_Kveton_Ghavamzadeh_Katariya_2023, title={Meta-Learning for Simple Regret Minimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25823}, DOI={10.1609/aaai.v37i6.25823}, abstractNote={We develop a meta-learning framework for simple regret minimization in bandits. In this framework, a learning agent interacts with a sequence of bandit tasks, which are sampled i.i.d. from an unknown prior distribution, and learns its meta-parameters to perform better on future tasks. We propose the first Bayesian and frequentist meta-learning algorithms for this setting. The Bayesian algorithm has access to a prior distribution over the meta-parameters and its meta simple regret over m bandit tasks with horizon n is mere O(m / \u221an). On the other hand, the meta simple regret of the frequentist algorithm is O(n\u221am + m/ \u221an). While its regret is worse, the frequentist algorithm is more general because it does not need a prior distribution over the meta-parameters. It can also be analyzed in more settings. We instantiate our algorithms for several classes of bandit problems. Our algorithms are general and we complement our theory by evaluating them empirically in several environments.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Azizi, Javad and Kveton, Branislav and Ghavamzadeh, Mohammad and Katariya, Sumeet}, year={2023}, month={Jun.}, pages={6709-6717} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25823/25595", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25823", + "pdf_size": 1036717, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13414827238297148832&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 6, + "aff_domain": "usc.edu;amazon.com;google.com;amazon.com", + "email": "usc.edu;amazon.com;google.com;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "University of Southern California;Amazon.com, Inc.;Google", + "aff_unique_dep": ";;Google Research", + "aff_unique_url": "https://www.usc.edu;https://www.amazon.com;https://research.google", + "aff_unique_abbr": "USC;Amazon;Google Research", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Los Angeles;;Mountain View", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26210", + "title": "Meta-Reinforcement Learning Based on Self-Supervised Task Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Meta-reinforcement learning enables artificial agents to learn from related training tasks and adapt to new tasks efficiently with minimal interaction data. However, most existing research is still limited to narrow task distributions that are parametric and stationary, and does not consider out-of-distribution tasks during the evaluation, thus, restricting its application. In this paper, we propose MoSS, a context-based Meta-reinforcement learning algorithm based on Self-Supervised task representation learning to address this challenge. We extend meta-RL to broad non-parametric task distributions which have never been explored before, and also achieve state-of-the-art results in non-stationary and out-of-distribution tasks. Specifically, MoSS consists of a task inference module and a policy module. We utilize the Gaussian mixture model for task representation to imitate the parametric and non-parametric task variations. Additionally, our online adaptation strategy enables the agent to react at the first sight of a task change, thus being applicable in non-stationary tasks. MoSS also exhibits strong generalization robustness in out-of-distributions tasks which benefits from the reliable and robust task representation. The policy is built on top of an off-policy RL algorithm and the entire network is trained completely off-policy to ensure high sample efficiency. On MuJoCo and Meta-World benchmarks, MoSS outperforms prior works in terms of asymptotic performance, sample efficiency (3-50x faster), adaptation efficiency, and generalization robustness on broad and diverse task distributions.", + "primary_area": "machine learning iii", + "author": "Mingyang Wang; Zhenshan Bing; Xiangtong Yao; Shuai Wang; Huang Kai; Hang Su; Chenguang Yang; Alois Knoll", + "authorids": "", + "aff": "Department of Informatics, Technical University Munich; Department of Informatics, Technical University Munich; Department of Informatics, Technical University Munich; Tencent Robotics X Lab; School of Computer Science and Engineering, Sun Yat-Sen University + Shenzhen Institute, Sun Yat-Sen University; Dipartimento di Elettronica, Politecnico di Milano; Bristol Robotics Laboratory, University of the West of England; Department of Informatics, Technical University Munich", + "bibtex": "@article{Wang_Bing_Yao_Wang_Kai_Su_Yang_Knoll_2023, title={Meta-Reinforcement Learning Based on Self-Supervised Task Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26210}, DOI={10.1609/aaai.v37i8.26210}, abstractNote={Meta-reinforcement learning enables artificial agents to learn from related training tasks and adapt to new tasks efficiently with minimal interaction data. However, most existing research is still limited to narrow task distributions that are parametric and stationary, and does not consider out-of-distribution tasks during the evaluation, thus, restricting its application. In this paper, we propose MoSS, a context-based Meta-reinforcement learning algorithm based on Self-Supervised task representation learning to address this challenge. We extend meta-RL to broad non-parametric task distributions which have never been explored before, and also achieve state-of-the-art results in non-stationary and out-of-distribution tasks. Specifically, MoSS consists of a task inference module and a policy module. We utilize the Gaussian mixture model for task representation to imitate the parametric and non-parametric task variations. Additionally, our online adaptation strategy enables the agent to react at the first sight of a task change, thus being applicable in non-stationary tasks. MoSS also exhibits strong generalization robustness in out-of-distributions tasks which benefits from the reliable and robust task representation. The policy is built on top of an off-policy RL algorithm and the entire network is trained completely off-policy to ensure high sample efficiency. On MuJoCo and Meta-World benchmarks, MoSS outperforms prior works in terms of asymptotic performance, sample efficiency (3-50x faster), adaptation efficiency, and generalization robustness on broad and diverse task distributions.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Mingyang and Bing, Zhenshan and Yao, Xiangtong and Wang, Shuai and Kai, Huang and Su, Hang and Yang, Chenguang and Knoll, Alois}, year={2023}, month={Jun.}, pages={10157-10165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26210/25982", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26210", + "pdf_size": 387580, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13204048504593196864&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tum.de;in.tum.de;tum.de;gmail.com;mail.sysu.edu.cn;polimi.it;ieee.org;in.tum.de", + "email": "tum.de;in.tum.de;tum.de;gmail.com;mail.sysu.edu.cn;polimi.it;ieee.org;in.tum.de", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2+2;3;4;0", + "aff_unique_norm": "Technical University Munich;Tencent;Sun Yat-Sen University;Politecnico di Milano;University of the West of England", + "aff_unique_dep": "Department of Informatics;Robotics X Lab;School of Computer Science and Engineering;Dipartimento di Elettronica;Bristol Robotics Laboratory", + "aff_unique_url": "https://www.tum.de;https://www.tencent.com;http://www.sysu.edu.cn;https://www.polimi.it;https://www.uwe.ac.uk", + "aff_unique_abbr": "TUM;Tencent;SYSU;Polimi;", + "aff_campus_unique_index": "0;0;0;2;3;0", + "aff_campus_unique": "Munich;;Shenzhen;Bristol", + "aff_country_unique_index": "0;0;0;1;1+1;2;3;0", + "aff_country_unique": "Germany;China;Italy;United Kingdom" + }, + { + "id": "article-25846", + "title": "Meta-Sketch: A Neural Data Structure for Estimating Item Frequencies of Data Streams", + "track": "main", + "status": "Technical", + "abstract": "To estimate item frequencies of data streams with limited space, sketches are widely used in real applications, including real-time web analytics, network monitoring, and self-driving. Sketches can be viewed as a model which maps the identifier of a stream item to the corresponding frequency domain. Starting from the premise, we envision a neural data structure, which we term the meta-sketch, to go beyond the basic structure of conventional sketches. The meta-sketch learns basic sketching abilities from meta-tasks constituted with synthetic datasets following Zipf distributions in the pre-training phase, and can be fast adapted to real (skewed) distributions in the adaption phase. Extensive experiments demonstrate the performance gains of the meta-sketch and offer insights into our proposals.", + "primary_area": "machine learning i", + "author": "Yukun Cao; Yuan Feng; Xike Xie", + "authorids": "", + "aff": "School of Computer Science and Technology, University of Science and Technology of China; School of Computer Science and Technology, University of Science and Technology of China; Data Darkness Lab, MIRACLE Center, Suzhou Institute for Advanced Research, USTC", + "bibtex": "@article{Cao_Feng_Xie_2023, title={Meta-Sketch: A Neural Data Structure for Estimating Item Frequencies of Data Streams}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25846}, DOI={10.1609/aaai.v37i6.25846}, abstractNote={To estimate item frequencies of data streams with limited space, sketches are widely used in real applications, including real-time web analytics, network monitoring, and self-driving. Sketches can be viewed as a model which maps the identifier of a stream item to the corresponding frequency domain. Starting from the premise, we envision a neural data structure, which we term the meta-sketch, to go beyond the basic structure of conventional sketches. The meta-sketch learns basic sketching abilities from meta-tasks constituted with synthetic datasets following Zipf distributions in the pre-training phase, and can be fast adapted to real (skewed) distributions in the adaption phase. Extensive experiments demonstrate the performance gains of the meta-sketch and offer insights into our proposals.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Yukun and Feng, Yuan and Xie, Xike}, year={2023}, month={Jun.}, pages={6916-6924} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25846/25618", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25846", + "pdf_size": 281189, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11362209411586496261&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Suzhou", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25654", + "title": "MetaTPTrans: A Meta Learning Approach for Multilingual Code Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Representation learning of source code is essential for applying machine learning to software engineering tasks. Learning code representation from a multilingual source code dataset has been shown to be more effective than learning from single-language datasets separately, since more training data from multilingual dataset improves the model's ability to extract language-agnostic information from source code. However, existing multilingual training overlooks the language-specific information which is crucial for modeling source code across different programming languages, while only focusing on learning a unified model with shared parameters among different languages for language-agnostic information modeling. To address this problem, we propose MetaTPTrans, a meta learning approach for multilingual code representation learning. MetaTPTrans generates different parameters for the feature extractor according to the specific programming language type of the input code snippet, enabling the model to learn both language-agnostic and language-specific information with dynamic parameters in the feature extractor. We conduct experiments on the code summarization and code completion tasks to verify the effectiveness of our approach. The results demonstrate the superiority of our approach with significant improvements on state-of-the-art baselines.", + "primary_area": "domain s of application", + "author": "Weiguo Pian; Hanyu Peng; Xunzhu Tang; Tiezhu Sun; Haoye Tian; Andrew Habib; Jacques Klein; Tegawend\u00e9 F. Bissyand\u00e9", + "authorids": "", + "aff": "SnT, University of Luxembourg; Baidu Inc.; SnT, University of Luxembourg; SnT, University of Luxembourg; SnT, University of Luxembourg; SnT, University of Luxembourg; SnT, University of Luxembourg; SnT, University of Luxembourg + CITADEL, Universit\u00e9 Virtuelle du Burkina Faso", + "bibtex": "@article{Pian_Peng_Tang_Sun_Tian_Habib_Klein_Bissyand\u00e9_2023, title={MetaTPTrans: A Meta Learning Approach for Multilingual Code Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25654}, DOI={10.1609/aaai.v37i4.25654}, abstractNote={Representation learning of source code is essential for applying machine learning to software engineering tasks. Learning code representation from a multilingual source code dataset has been shown to be more effective than learning from single-language datasets separately, since more training data from multilingual dataset improves the model\u2019s ability to extract language-agnostic information from source code. However, existing multilingual training overlooks the language-specific information which is crucial for modeling source code across different programming languages, while only focusing on learning a unified model with shared parameters among different languages for language-agnostic information modeling. To address this problem, we propose MetaTPTrans, a meta learning approach for multilingual code representation learning. MetaTPTrans generates different parameters for the feature extractor according to the specific programming language type of the input code snippet, enabling the model to learn both language-agnostic and language-specific information with dynamic parameters in the feature extractor. We conduct experiments on the code summarization and code completion tasks to verify the effectiveness of our approach. The results demonstrate the superiority of our approach with significant improvements on state-of-the-art baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pian, Weiguo and Peng, Hanyu and Tang, Xunzhu and Sun, Tiezhu and Tian, Haoye and Habib, Andrew and Klein, Jacques and Bissyand\u00e9, Tegawend\u00e9 F.}, year={2023}, month={Jun.}, pages={5239-5247} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25654/25426", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25654", + "pdf_size": 1477576, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10462180531728409219&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 14, + "aff_domain": "uni.lu;baidu.com;uni.lu;uni.lu;uni.lu;gmail.com;uni.lu;uni.lu", + "email": "uni.lu;baidu.com;uni.lu;uni.lu;uni.lu;gmail.com;uni.lu;uni.lu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;0;0;0+2", + "aff_unique_norm": "University of Luxembourg;Baidu Inc.;Universit\u00e9 Virtuelle du Burkina Faso", + "aff_unique_dep": "SnT;;CITADEL", + "aff_unique_url": "https://wwwen.uniluxembourg.lu;https://www.baidu.com;", + "aff_unique_abbr": "UniLu;Baidu;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;0;0;0+2", + "aff_country_unique": "Luxembourg;China;Burkina Faso" + }, + { + "id": "article-26238", + "title": "MetaZSCIL: A Meta-Learning Approach for Generalized Zero-Shot Class Incremental Learning", + "track": "main", + "status": "Technical", + "abstract": "Generalized zero-shot learning (GZSL) aims to recognize samples whose categories may not have been seen at training. Standard GZSL cannot handle dynamic addition of new seen and unseen classes. In order to address this limitation, some recent attempts have been made to develop continual GZSL methods. However, these methods require end-users to continuously collect and annotate numerous seen class samples, which is unrealistic and hampers the applicability in the real-world. Accordingly, in this paper, we propose a more practical and challenging setting named Generalized Zero-Shot Class Incremental Learning (CI-GZSL). Our setting aims to incrementally learn unseen classes without any training samples, while recognizing all classes previously encountered. We further propose a bi-level meta-learning based method called MetaZSCIL to directly optimize the network to learn how to incrementally learn. Specifically, we sample sequential tasks from seen classes during the offline training to simulate the incremental learning process. For each task, the model is learned using a meta-objective such that it is capable to perform fast adaptation without forgetting. Note that our optimization can be flexibly equipped with most existing generative methods to tackle CI-GZSL. This work introduces a feature generative framework that leverages visual feature distribution alignment to produce replayed samples of previously seen classes to reduce catastrophic forgetting. Extensive experiments conducted on five widely used benchmarks demonstrate the superiority of our proposed method.", + "primary_area": "machine learning iv", + "author": "Yanan Wu; Tengfei Liang; Songhe Feng; Yi Jin; Gengyu Lyu; Haojun Fei; Yang Wang", + "authorids": "", + "aff": "Beijing Key Laboratory of Traffic Data Analysis and Mining, School of Computer and Information Technology, Beijing Jiaotong University; Beijing Key Laboratory of Traffic Data Analysis and Mining, School of Computer and Information Technology, Beijing Jiaotong University; Beijing Key Laboratory of Traffic Data Analysis and Mining, School of Computer and Information Technology, Beijing Jiaotong University; Beijing Key Laboratory of Traffic Data Analysis and Mining, School of Computer and Information Technology, Beijing Jiaotong University; Faculty of Information Technology, Beijing University of Technology; 360 DigiTech, Inc; Department of Computer Science and Software Engineering, Concordia University", + "bibtex": "@article{Wu_Liang_Feng_Jin_Lyu_Fei_Wang_2023, title={MetaZSCIL: A Meta-Learning Approach for Generalized Zero-Shot Class Incremental Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26238}, DOI={10.1609/aaai.v37i9.26238}, abstractNote={Generalized zero-shot learning (GZSL) aims to recognize samples whose categories may not have been seen at training. Standard GZSL cannot handle dynamic addition of new seen and unseen classes. In order to address this limitation, some recent attempts have been made to develop continual GZSL methods. However, these methods require end-users to continuously collect and annotate numerous seen class samples, which is unrealistic and hampers the applicability in the real-world. Accordingly, in this paper, we propose a more practical and challenging setting named Generalized Zero-Shot Class Incremental Learning (CI-GZSL). Our setting aims to incrementally learn unseen classes without any training samples, while recognizing all classes previously encountered. We further propose a bi-level meta-learning based method called MetaZSCIL to directly optimize the network to learn how to incrementally learn. Specifically, we sample sequential tasks from seen classes during the offline training to simulate the incremental learning process. For each task, the model is learned using a meta-objective such that it is capable to perform fast adaptation without forgetting. Note that our optimization can be flexibly equipped with most existing generative methods to tackle CI-GZSL. This work introduces a feature generative framework that leverages visual feature distribution alignment to produce replayed samples of previously seen classes to reduce catastrophic forgetting. Extensive experiments conducted on five widely used benchmarks demonstrate the superiority of our proposed method.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yanan and Liang, Tengfei and Feng, Songhe and Jin, Yi and Lyu, Gengyu and Fei, Haojun and Wang, Yang}, year={2023}, month={Jun.}, pages={10408-10416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26238/26010", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26238", + "pdf_size": 1523409, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10546793556625668941&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjut.edu.cn;360shuke.com;concordia.ca", + "email": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjut.edu.cn;360shuke.com;concordia.ca", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;3", + "aff_unique_norm": "Beijing Jiaotong University;Beijing University of Technology;360 DigiTech, Inc;Concordia University", + "aff_unique_dep": "School of Computer and Information Technology;Faculty of Information Technology;;Department of Computer Science and Software Engineering", + "aff_unique_url": "http://www.bjtu.edu.cn;http://www.bit.edu.cn;https://www.360digitech.com;https://www.concordia.ca", + "aff_unique_abbr": "BJTU;BIT;;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-26188", + "title": "Metric Multi-View Graph Clustering", + "track": "main", + "status": "Technical", + "abstract": "Graph-based methods have hitherto been used to pursue the coherent patterns of data due to its ease of implementation and efficiency. These methods have been increasingly applied in multi-view learning and achieved promising performance in various clustering tasks. However, despite their noticeable empirical success, existing graph-based multi-view clustering methods may still suffer the suboptimal solution considering that multi-view data can be very complicated in raw feature space. Moreover, existing methods usually adopt the similarity metric by an ad hoc approach, which largely simplifies the relationship among real-world data and results in an inaccurate output. To address these issues, we propose to seamlessly integrates metric learning and graph learning for multi-view clustering. Specifically, we employ a useful metric to depict the inherent structure with linearity-aware of affinity graph representation learned based on the self-expressiveness property. Furthermore, instead of directly utilizing the raw features, we prefer to recover a smooth representation such that the geometric structure of the original data can be retained. We model the above concerns into a unified learning framework, and hence complements each learning subtask in a mutual reinforcement manner. The empirical studies corroborate our theoretical findings, and demonstrate that the proposed method is able to boost the multi-view clustering performance.", + "primary_area": "machine learning iii", + "author": "Yuze Tan; Yixi Liu; Hongjie Wu; Jiancheng Lv; Shudong Huang", + "authorids": "", + "aff": "College of Computer Science, Sichuan University; College of Computer Science, Sichuan University; College of Computer Science, Sichuan University; College of Computer Science, Sichuan University; College of Computer Science, Sichuan University", + "bibtex": "@article{Tan_Liu_Wu_Lv_Huang_2023, title={Metric Multi-View Graph Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26188}, DOI={10.1609/aaai.v37i8.26188}, abstractNote={Graph-based methods have hitherto been used to pursue the coherent patterns of data due to its ease of implementation and efficiency. These methods have been increasingly applied in multi-view learning and achieved promising performance in various clustering tasks. However, despite their noticeable empirical success, existing graph-based multi-view clustering methods may still suffer the suboptimal solution considering that multi-view data can be very complicated in raw feature space. Moreover, existing methods usually adopt the similarity metric by an ad hoc approach, which largely simplifies the relationship among real-world data and results in an inaccurate output. To address these issues, we propose to seamlessly integrates metric learning and graph learning for multi-view clustering. Specifically, we employ a useful metric to depict the inherent structure with linearity-aware of affinity graph representation learned based on the self-expressiveness property. Furthermore, instead of directly utilizing the raw features, we prefer to recover a smooth representation such that the geometric structure of the original data can be retained. We model the above concerns into a unified learning framework, and hence complements each learning subtask in a mutual reinforcement manner. The empirical studies corroborate our theoretical findings, and demonstrate that the proposed method is able to boost the multi-view clustering performance.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tan, Yuze and Liu, Yixi and Wu, Hongjie and Lv, Jiancheng and Huang, Shudong}, year={2023}, month={Jun.}, pages={9962-9970} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26188/25960", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26188", + "pdf_size": 641991, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12231626782989809337&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.scu.edu.cn;stu.scu.edu.cn;gmail.com;scu.edu.cn;scu.edu.cn", + "email": "stu.scu.edu.cn;stu.scu.edu.cn;gmail.com;scu.edu.cn;scu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Sichuan University", + "aff_unique_dep": "College of Computer Science", + "aff_unique_url": "https://www.scu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26041", + "title": "Metric Nearness Made Practical", + "track": "main", + "status": "Technical", + "abstract": "Given a square matrix with noisy dissimilarity measures between pairs of data samples, the metric nearness model computes the best approximation of the matrix from a set of valid distance metrics. Despite its wide applications in machine learning and data processing tasks, the model faces non-trivial computational requirements in seeking the solution due to the large number of metric constraints associated with the feasible region. Our work designed a practical approach in two stages to tackle the challenge and improve the model's scalability and applicability. The first stage computes a fast yet high-quality approximate solution from a set of isometrically embeddable metrics, further improved by an effective heuristic. The second stage refines the approximate solution with the Halpern-Lions-Wittmann-Bauschke projection algorithm, which converges quickly to the optimal solution. In empirical evaluations, the proposed approach runs at least an order of magnitude faster than the state-of-the-art solutions, with significantly improved scalability, complete conformity to constraints, less memory consumption, and other desirable features in real applications.", + "primary_area": "machine learning ii", + "author": "Wenye Li; Fangchen Yu; Zichen Ma", + "authorids": "", + "aff": "The Chinese University of Hong Kong, Shenzhen + Shenzhen Research Institute of Big Data; The Chinese University of Hong Kong, Shenzhen; The Chinese University of Hong Kong, Shenzhen", + "bibtex": "@article{Li_Yu_Ma_2023, title={Metric Nearness Made Practical}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26041}, DOI={10.1609/aaai.v37i7.26041}, abstractNote={Given a square matrix with noisy dissimilarity measures between pairs of data samples, the metric nearness model computes the best approximation of the matrix from a set of valid distance metrics. Despite its wide applications in machine learning and data processing tasks, the model faces non-trivial computational requirements in seeking the solution due to the large number of metric constraints associated with the feasible region. Our work designed a practical approach in two stages to tackle the challenge and improve the model\u2019s scalability and applicability. The first stage computes a fast yet high-quality approximate solution from a set of isometrically embeddable metrics, further improved by an effective heuristic. The second stage refines the approximate solution with the Halpern-Lions-Wittmann-Bauschke projection algorithm, which converges quickly to the optimal solution. In empirical evaluations, the proposed approach runs at least an order of magnitude faster than the state-of-the-art solutions, with significantly improved scalability, complete conformity to constraints, less memory consumption, and other desirable features in real applications.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Wenye and Yu, Fangchen and Ma, Zichen}, year={2023}, month={Jun.}, pages={8648-8656} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26041/25813", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26041", + "pdf_size": 408449, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11435020860854983303&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn", + "email": "cuhk.edu.cn;link.cuhk.edu.cn;link.cuhk.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Shenzhen Research Institute of Big Data", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cuhk.edu.cn;http://www.sribd.cn", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26058", + "title": "Metric Residual Network for Sample Efficient Goal-Conditioned Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Goal-conditioned reinforcement learning (GCRL) has a wide range of potential real-world applications, including manipulation and navigation problems in robotics. Especially in such robotics tasks, sample efficiency is of the utmost importance for GCRL since, by default, the agent is only rewarded when it reaches its goal. While several methods have been proposed to improve the sample efficiency of GCRL, one relatively under-studied approach is the design of neural architectures to support sample efficiency.\nIn this work, we introduce a novel neural architecture for GCRL that achieves significantly better sample efficiency than the commonly-used monolithic network architecture. \nThe key insight is that the \noptimal action-value function must satisfy the triangle inequality in a specific sense.\nFurthermore, we introduce the metric residual network (MRN) that deliberately decomposes the action-value function into the negated summation of a metric plus a residual asymmetric component. MRN provably approximates any optimal action-value function, thus making it a fitting neural architecture for GCRL.\nWe conduct comprehensive experiments across 12 standard benchmark environments in GCRL. The empirical results demonstrate that MRN uniformly outperforms other state-of-the-art GCRL neural architectures in terms of sample efficiency. The code is available at https://github.com/Cranial-XIX/metric-residual-network.", + "primary_area": "machine learning ii", + "author": "Bo Liu; Yihao Feng; Qiang Liu; Peter Stone", + "authorids": "", + "aff": "The University of Texas at Austin; Salesforce Research; The University of Texas at Austin; The University of Texas at Austin + Sony AI", + "bibtex": "@article{Liu_Feng_Liu_Stone_2023, title={Metric Residual Network for Sample Efficient Goal-Conditioned Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26058}, DOI={10.1609/aaai.v37i7.26058}, abstractNote={Goal-conditioned reinforcement learning (GCRL) has a wide range of potential real-world applications, including manipulation and navigation problems in robotics. Especially in such robotics tasks, sample efficiency is of the utmost importance for GCRL since, by default, the agent is only rewarded when it reaches its goal. While several methods have been proposed to improve the sample efficiency of GCRL, one relatively under-studied approach is the design of neural architectures to support sample efficiency.\nIn this work, we introduce a novel neural architecture for GCRL that achieves significantly better sample efficiency than the commonly-used monolithic network architecture. The key insight is that the optimal action-value function must satisfy the triangle inequality in a specific sense.\nFurthermore, we introduce the metric residual network (MRN) that deliberately decomposes the action-value function into the negated summation of a metric plus a residual asymmetric component. MRN provably approximates any optimal action-value function, thus making it a fitting neural architecture for GCRL.\nWe conduct comprehensive experiments across 12 standard benchmark environments in GCRL. The empirical results demonstrate that MRN uniformly outperforms other state-of-the-art GCRL neural architectures in terms of sample efficiency. The code is available at https://github.com/Cranial-XIX/metric-residual-network.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Bo and Feng, Yihao and Liu, Qiang and Stone, Peter}, year={2023}, month={Jun.}, pages={8799-8806} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26058/25830", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26058", + "pdf_size": 1261719, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13333234518236576208&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.utexas.edu;cs.utexas.edu;cs.utexas.edu;cs.utexas.edu", + "email": "cs.utexas.edu;cs.utexas.edu;cs.utexas.edu;cs.utexas.edu", + "github": "https://github.com/Cranial-XIX/metric-residual-network", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0+2", + "aff_unique_norm": "University of Texas at Austin;Salesforce;Sony", + "aff_unique_dep": ";Salesforce Research;Sony AI", + "aff_unique_url": "https://www.utexas.edu;https://research.salesforce.com;https://www.sony.com", + "aff_unique_abbr": "UT Austin;Salesforce;Sony AI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Austin;", + "aff_country_unique_index": "0;0;0;0+1", + "aff_country_unique": "United States;Japan" + }, + { + "id": "article-25374", + "title": "MicroAST: Towards Super-fast Ultra-Resolution Arbitrary Style Transfer", + "track": "main", + "status": "Technical", + "abstract": "Arbitrary style transfer (AST) transfers arbitrary artistic styles onto content images. Despite the recent rapid progress, existing AST methods are either incapable or too slow to run at ultra-resolutions (e.g., 4K) with limited resources, which heavily hinders their further applications. In this paper, we tackle this dilemma by learning a straightforward and lightweight model, dubbed MicroAST. The key insight is to completely abandon the use of cumbersome pre-trained Deep Convolutional Neural Networks (e.g., VGG) at inference. Instead, we design two micro encoders (content and style encoders) and one micro decoder for style transfer. The content encoder aims at extracting the main structure of the content image. The style encoder, coupled with a modulator, encodes the style image into learnable dual-modulation signals that modulate both intermediate features and convolutional filters of the decoder, thus injecting more sophisticated and flexible style signals to guide the stylizations. In addition, to boost the ability of the style encoder to extract more distinct and representative style signals, we also introduce a new style signal contrastive loss in our model. Compared to the state of the art, our MicroAST not only produces visually superior results but also is 5-73 times smaller and 6-18 times faster, for the first time enabling super-fast (about 0.5 seconds) AST at 4K ultra-resolutions.", + "primary_area": "computer vision iii", + "author": "Zhizhong Wang; Lei Zhao; Zhiwen Zuo; Ailin Li; Haibo Chen; Wei Xing; Dongming Lu", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Wang_Zhao_Zuo_Li_Chen_Xing_Lu_2023, title={MicroAST: Towards Super-fast Ultra-Resolution Arbitrary Style Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25374}, DOI={10.1609/aaai.v37i3.25374}, abstractNote={Arbitrary style transfer (AST) transfers arbitrary artistic styles onto content images. Despite the recent rapid progress, existing AST methods are either incapable or too slow to run at ultra-resolutions (e.g., 4K) with limited resources, which heavily hinders their further applications. In this paper, we tackle this dilemma by learning a straightforward and lightweight model, dubbed MicroAST. The key insight is to completely abandon the use of cumbersome pre-trained Deep Convolutional Neural Networks (e.g., VGG) at inference. Instead, we design two micro encoders (content and style encoders) and one micro decoder for style transfer. The content encoder aims at extracting the main structure of the content image. The style encoder, coupled with a modulator, encodes the style image into learnable dual-modulation signals that modulate both intermediate features and convolutional filters of the decoder, thus injecting more sophisticated and flexible style signals to guide the stylizations. In addition, to boost the ability of the style encoder to extract more distinct and representative style signals, we also introduce a new style signal contrastive loss in our model. Compared to the state of the art, our MicroAST not only produces visually superior results but also is 5-73 times smaller and 6-18 times faster, for the first time enabling super-fast (about 0.5 seconds) AST at 4K ultra-resolutions.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhizhong and Zhao, Lei and Zuo, Zhiwen and Li, Ailin and Chen, Haibo and Xing, Wei and Lu, Dongming}, year={2023}, month={Jun.}, pages={2742-2750} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25374/25146", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25374", + "pdf_size": 15978556, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10313381567920025808&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Computer Science and Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25862", + "title": "Min-Max Submodular Ranking for Multiple Agents", + "track": "main", + "status": "Technical", + "abstract": "In the submodular ranking (SR) problem, the input consists of a set of submodular functions defined on a ground set of elements. The goal is to order elements for all the functions to have value above a certain threshold as soon on average as possible, assuming we choose one element per time. The problem is flexible enough to capture various applications in machine learning, including decision trees. \n\nThis paper considers the min-max version of SR where multiple instances share the ground set. With the view of each instance being associated with an agent, the min-max problem is to order the common elements to minimize the maximum objective of all agents---thus, finding a fair solution for all agents. We give approximation algorithms for this problem and demonstrate their effectiveness in the application of finding a decision tree for multiple agents.", + "primary_area": "machine learning i", + "author": "Qingyun Chen; Sungjin Im; Benjamin Moseley; Chenyang Xu; Ruilong Zhang", + "authorids": "", + "aff": "Electrical Engineering and Computer Science, University of California at Merced; Electrical Engineering and Computer Science, University of California at Merced; Tepper School of Business, Carnegie Mellon University; Software Engineering Institute, East China Normal University + College of Computer Science, Zhejiang University; Department of Computer Science, City University of Hong Kong", + "bibtex": "@article{Chen_Im_Moseley_Xu_Zhang_2023, title={Min-Max Submodular Ranking for Multiple Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25862}, DOI={10.1609/aaai.v37i6.25862}, abstractNote={In the submodular ranking (SR) problem, the input consists of a set of submodular functions defined on a ground set of elements. The goal is to order elements for all the functions to have value above a certain threshold as soon on average as possible, assuming we choose one element per time. The problem is flexible enough to capture various applications in machine learning, including decision trees. This paper considers the min-max version of SR where multiple instances share the ground set. With the view of each instance being associated with an agent, the min-max problem is to order the common elements to minimize the maximum objective of all agents---thus, finding a fair solution for all agents. We give approximation algorithms for this problem and demonstrate their effectiveness in the application of finding a decision tree for multiple agents.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Qingyun and Im, Sungjin and Moseley, Benjamin and Xu, Chenyang and Zhang, Ruilong}, year={2023}, month={Jun.}, pages={7061-7068} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25862/25634", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25862", + "pdf_size": 488404, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:TOGEm-wqjwoJ:scholar.google.com/&scioq=Min-Max+Submodular+Ranking+for+Multiple+Agents&hl=en&as_sdt=0,33", + "gs_version_total": 7, + "aff_domain": "ucmerced.edu;ucmerced.edu;andrew.cmu.edu;zju.edu.cn;my.cityu.edu.hk", + "email": "ucmerced.edu;ucmerced.edu;andrew.cmu.edu;zju.edu.cn;my.cityu.edu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2+3;4", + "aff_unique_norm": "University of California, Merced;Carnegie Mellon University;East China Normal University;Zhejiang University;City University of Hong Kong", + "aff_unique_dep": "Electrical Engineering and Computer Science;Tepper School of Business;Software Engineering Institute;College of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.ucmerced.edu;https://www.cmu.edu;http://www.ecnu.edu.cn;http://www.zju.edu.cn;https://www.cityu.edu.hk", + "aff_unique_abbr": "UC Merced;CMU;ECNU;ZJU;CityU", + "aff_campus_unique_index": "0;0;2", + "aff_campus_unique": "Merced;;Shanghai", + "aff_country_unique_index": "0;0;0;1+1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-25455", + "title": "Mind the Gap: Polishing Pseudo Labels for Accurate Semi-supervised Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Exploiting pseudo labels (e.g., categories and bounding boxes) of unannotated objects produced by a teacher detector have underpinned much of recent progress in semi-supervised object detection (SSOD). However, due to the limited generalization capacity of the teacher detector caused by the scarce annotations, the produced pseudo labels often deviate from ground truth, especially those with relatively low classification confidences, thus limiting the generalization performance of SSOD. To mitigate this problem, we propose a dual pseudo-label polishing framework for SSOD. Instead of directly exploiting the pseudo labels produced by the teacher detector, we take the first attempt at reducing their deviation from ground truth using dual polishing learning, where two differently structured polishing networks are elaborately developed and trained using synthesized paired pseudo labels and the corresponding ground truth for categories and bounding boxes on the given annotated objects, respectively. By doing this, both polishing networks can infer more accurate pseudo labels for unannotated objects through sufficiently exploiting their context knowledge based on the initially produced pseudo labels, and thus improve the generalization performance of SSOD. Moreover, such a scheme can be seamlessly plugged into the existing SSOD framework for joint end-to-end learning. In addition, we propose to disentangle the polished pseudo categories and bounding boxes of unannotated objects for separate category classification and bounding box regression in SSOD, which enables introducing more unannotated objects during model training and thus further improves the performance. Experiments on both PASCAL VOC and MS-COCO benchmarks demonstrate the superiority of the proposed method over existing state-of-the-art baselines. The code can be found at https://github.com/snowdusky/DualPolishLearning.", + "primary_area": "computer vision iii", + "author": "Lei Zhang; Yuxuan Sun; Wei Wei", + "authorids": "", + "aff": "School of Computer Science, Northwestern Polytechnical University, China; School of Computer Science, Northwestern Polytechnical University, China; School of Computer Science, Northwestern Polytechnical University, China + Research & Development Institute of Northwestern Polytechnical University in Shenzhen, China", + "bibtex": "@article{Zhang_Sun_Wei_2023, title={Mind the Gap: Polishing Pseudo Labels for Accurate Semi-supervised Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25455}, DOI={10.1609/aaai.v37i3.25455}, abstractNote={Exploiting pseudo labels (e.g., categories and bounding boxes) of unannotated objects produced by a teacher detector have underpinned much of recent progress in semi-supervised object detection (SSOD). However, due to the limited generalization capacity of the teacher detector caused by the scarce annotations, the produced pseudo labels often deviate from ground truth, especially those with relatively low classification confidences, thus limiting the generalization performance of SSOD. To mitigate this problem, we propose a dual pseudo-label polishing framework for SSOD. Instead of directly exploiting the pseudo labels produced by the teacher detector, we take the first attempt at reducing their deviation from ground truth using dual polishing learning, where two differently structured polishing networks are elaborately developed and trained using synthesized paired pseudo labels and the corresponding ground truth for categories and bounding boxes on the given annotated objects, respectively. By doing this, both polishing networks can infer more accurate pseudo labels for unannotated objects through sufficiently exploiting their context knowledge based on the initially produced pseudo labels, and thus improve the generalization performance of SSOD. Moreover, such a scheme can be seamlessly plugged into the existing SSOD framework for joint end-to-end learning. In addition, we propose to disentangle the polished pseudo categories and bounding boxes of unannotated objects for separate category classification and bounding box regression in SSOD, which enables introducing more unannotated objects during model training and thus further improves the performance. Experiments on both PASCAL VOC and MS-COCO benchmarks demonstrate the superiority of the proposed method over existing state-of-the-art baselines. The code can be found at https://github.com/snowdusky/DualPolishLearning.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Lei and Sun, Yuxuan and Wei, Wei}, year={2023}, month={Jun.}, pages={3463-3471} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25455/25227", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25455", + "pdf_size": 2048327, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4123062675460603942&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "email": "nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "github": "https://github.com/snowdusky/DualPolishLearning", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+0", + "aff_unique_norm": "Northwestern Polytechnical University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.nwpu.edu.cn", + "aff_unique_abbr": "NPU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26405", + "title": "Minimax AUC Fairness: Efficient Algorithm with Provable Convergence", + "track": "main", + "status": "Technical", + "abstract": "The use of machine learning models in consequential decision making often exacerbates societal inequity, in particular yielding disparate impact on members of marginalized groups defined by race and gender. The area under the ROC curve (AUC) is widely used to evaluate the performance of a scoring function in machine learning, but is studied in algorithmic fairness less than other performance metrics. Due to the pairwise nature of the AUC, defining an AUC-based group fairness metric is pairwise-dependent and may involve both intra-group and inter-group AUCs. Importantly, considering only one category of AUCs is not sufficient to mitigate unfairness in AUC optimization. In this paper, we propose a minimax learning and bias mitigation framework that incorporates both intra-group and inter-group AUCs while maintaining utility. Based on this Rawlsian framework, we design an efficient stochastic optimization algorithm and prove its convergence to the minimum group-level AUC. We conduct numerical experiments on both synthetic and real-world datasets to validate the effectiveness of the minimax framework and the proposed optimization algorithm.", + "primary_area": "philosophy and ethics of ai", + "author": "Zhenhuan Yang; Yan Lok Ko; Kush R. Varshney; Yiming Ying", + "authorids": "", + "aff": "Etsy, Inc, Brooklyn, New York, USA; University at Albany, State University of New York, Albany, New York, USA; IBM Research, Yorktown Heights, New York, USA; University at Albany, State University of New York, Albany, New York, USA", + "bibtex": "@article{Yang_Ko_Varshney_Ying_2023, title={Minimax AUC Fairness: Efficient Algorithm with Provable Convergence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26405}, DOI={10.1609/aaai.v37i10.26405}, abstractNote={The use of machine learning models in consequential decision making often exacerbates societal inequity, in particular yielding disparate impact on members of marginalized groups defined by race and gender. The area under the ROC curve (AUC) is widely used to evaluate the performance of a scoring function in machine learning, but is studied in algorithmic fairness less than other performance metrics. Due to the pairwise nature of the AUC, defining an AUC-based group fairness metric is pairwise-dependent and may involve both intra-group and inter-group AUCs. Importantly, considering only one category of AUCs is not sufficient to mitigate unfairness in AUC optimization. In this paper, we propose a minimax learning and bias mitigation framework that incorporates both intra-group and inter-group AUCs while maintaining utility. Based on this Rawlsian framework, we design an efficient stochastic optimization algorithm and prove its convergence to the minimum group-level AUC. We conduct numerical experiments on both synthetic and real-world datasets to validate the effectiveness of the minimax framework and the proposed optimization algorithm.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zhenhuan and Ko, Yan Lok and Varshney, Kush R. and Ying, Yiming}, year={2023}, month={Jun.}, pages={11909-11917} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26405/26177", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26405", + "pdf_size": 2401493, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1396531477450157277&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "hotmail.com;albany.edu;us.ibm.com;albany.edu", + "email": "hotmail.com;albany.edu;us.ibm.com;albany.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Etsy, Inc;State University of New York at Albany;IBM Research", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.etsy.com;https://www.albany.edu;https://www.ibm.com/research", + "aff_unique_abbr": "Etsy;SUNY Albany;IBM", + "aff_campus_unique_index": "0;1;2;1", + "aff_campus_unique": "Brooklyn;Albany;Yorktown Heights", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25673", + "title": "Mining and Applying Composition Knowledge of Dance Moves for Style-Concentrated Dance Generation", + "track": "main", + "status": "Technical", + "abstract": "Choreography refers to creation of dance motions according to both music and dance knowledge, where the created dances should be style-specific and consistent. However, most of the existing methods generate dances using the given music as the only reference, lacking the stylized dancing knowledge, namely, the flag motion patterns contained in different styles. Without the stylized prior knowledge, these approaches are not promising to generate controllable style or diverse moves for each dance style, nor new dances complying with stylized knowledge. To address this issue, we propose a novel music-to-dance generation framework guided by style embedding, considering both input music and stylized dancing knowledge. These style embeddings are learnt representations of style-consistent kinematic abstraction of reference dance videos, which can act as controllable factors to impose style constraints on dance generation in a latent manner. Hence, we can make the style embedding fit into any given style while allowing the flexibility to generate new compatible dance moves by modifying the style embedding according to the learnt representations of a certain style. We are the first to achieve knowledge-driven style control in dance generation tasks. To support this study, we build a large multi-style music-to-dance dataset referred to as I-Dance. The qualitative and quantitative evaluations demonstrate the advantage of the proposed framework, as well as the ability to synthesize diverse moves under a dance style directed by style embedding.", + "primary_area": "domain s of application", + "author": "Xinjian Zhang; Su Yang; Yi Xu; Weishan Zhang; Longwen Gao", + "authorids": "", + "aff": "School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; Department of Software Engineering, China University of Petroleum (East China); Bilibili", + "bibtex": "@article{Zhang_Yang_Xu_Zhang_Gao_2023, title={Mining and Applying Composition Knowledge of Dance Moves for Style-Concentrated Dance Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25673}, DOI={10.1609/aaai.v37i4.25673}, abstractNote={Choreography refers to creation of dance motions according to both music and dance knowledge, where the created dances should be style-specific and consistent. However, most of the existing methods generate dances using the given music as the only reference, lacking the stylized dancing knowledge, namely, the flag motion patterns contained in different styles. Without the stylized prior knowledge, these approaches are not promising to generate controllable style or diverse moves for each dance style, nor new dances complying with stylized knowledge. To address this issue, we propose a novel music-to-dance generation framework guided by style embedding, considering both input music and stylized dancing knowledge. These style embeddings are learnt representations of style-consistent kinematic abstraction of reference dance videos, which can act as controllable factors to impose style constraints on dance generation in a latent manner. Hence, we can make the style embedding fit into any given style while allowing the flexibility to generate new compatible dance moves by modifying the style embedding according to the learnt representations of a certain style. We are the first to achieve knowledge-driven style control in dance generation tasks. To support this study, we build a large multi-style music-to-dance dataset referred to as I-Dance. The qualitative and quantitative evaluations demonstrate the advantage of the proposed framework, as well as the ability to synthesize diverse moves under a dance style directed by style embedding.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xinjian and Yang, Su and Xu, Yi and Zhang, Weishan and Gao, Longwen}, year={2023}, month={Jun.}, pages={5411-5419} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25673/25445", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25673", + "pdf_size": 4349539, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16906615337747720694&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;upc.edu.cn;bilibili.com", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;upc.edu.cn;bilibili.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;3", + "aff_unique_norm": "Fudan University;Shanghai Key Laboratory of Intelligent Information Processing;China University of Petroleum (East China);Bilibili Inc.", + "aff_unique_dep": "School of Computer Science;Intelligent Information Processing;Department of Software Engineering;", + "aff_unique_url": "https://www.fudan.edu.cn;;http://www.cup.edu.cn;https://www.bilibili.com", + "aff_unique_abbr": "Fudan;;CUP;Bilibili", + "aff_campus_unique_index": ";;;1", + "aff_campus_unique": ";East China", + "aff_country_unique_index": "0+0;0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25284", + "title": "Minority-Oriented Vicinity Expansion with Attentive Aggregation for Video Long-Tailed Recognition", + "track": "main", + "status": "Technical", + "abstract": "A dramatic increase in real-world video volume with extremely diverse and emerging topics naturally forms a long-tailed video distribution in terms of their categories, and it spotlights the need for Video Long-Tailed Recognition (VLTR).\nIn this work, we summarize the challenges in VLTR and explore how to overcome them.\nThe challenges are: (1) it is impractical to re-train the whole model for high-quality features, (2) acquiring frame-wise labels requires extensive cost, and (3) long-tailed data triggers biased training.\nYet, most existing works for VLTR unavoidably utilize image-level features extracted from pretrained models which are task-irrelevant, and learn by video-level labels.\nTherefore, to deal with such (1) task-irrelevant features and (2) video-level labels, we introduce two complementary learnable feature aggregators.\nLearnable layers in each aggregator are to produce task-relevant representations, and each aggregator is to assemble the snippet-wise knowledge into a video representative.\nThen, we propose Minority-Oriented Vicinity Expansion (MOVE) that explicitly leverages the class frequency into approximating the vicinity distributions to alleviate (3) biased training.\nBy combining these solutions, our approach achieves state-of-the-art results on large-scale VideoLT and synthetically induced Imbalanced-MiniKinetics200. \nWith VideoLT features from ResNet-50, it attains 18% and 58% relative improvements on head and tail classes over the previous state-of-the-art method, respectively. \nCode and dataset are available at https://github.com/wjun0830/MOVE.", + "primary_area": "computer vision ii", + "author": "WonJun Moon; Hyun Seok Seong; Jae-Pil Heo", + "authorids": "", + "aff": "Sungkyunkwan University; Sungkyunkwan University; Sungkyunkwan University", + "bibtex": "@article{Moon_Seong_Heo_2023, title={Minority-Oriented Vicinity Expansion with Attentive Aggregation for Video Long-Tailed Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25284}, DOI={10.1609/aaai.v37i2.25284}, abstractNote={A dramatic increase in real-world video volume with extremely diverse and emerging topics naturally forms a long-tailed video distribution in terms of their categories, and it spotlights the need for Video Long-Tailed Recognition (VLTR).\nIn this work, we summarize the challenges in VLTR and explore how to overcome them.\nThe challenges are: (1) it is impractical to re-train the whole model for high-quality features, (2) acquiring frame-wise labels requires extensive cost, and (3) long-tailed data triggers biased training.\nYet, most existing works for VLTR unavoidably utilize image-level features extracted from pretrained models which are task-irrelevant, and learn by video-level labels.\nTherefore, to deal with such (1) task-irrelevant features and (2) video-level labels, we introduce two complementary learnable feature aggregators.\nLearnable layers in each aggregator are to produce task-relevant representations, and each aggregator is to assemble the snippet-wise knowledge into a video representative.\nThen, we propose Minority-Oriented Vicinity Expansion (MOVE) that explicitly leverages the class frequency into approximating the vicinity distributions to alleviate (3) biased training.\nBy combining these solutions, our approach achieves state-of-the-art results on large-scale VideoLT and synthetically induced Imbalanced-MiniKinetics200. With VideoLT features from ResNet-50, it attains 18% and 58% relative improvements on head and tail classes over the previous state-of-the-art method, respectively. Code and dataset are available at https://github.com/wjun0830/MOVE.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Moon, WonJun and Seong, Hyun Seok and Heo, Jae-Pil}, year={2023}, month={Jun.}, pages={1931-1939} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25284/25056", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25284", + "pdf_size": 464735, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3162080988588436704&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff_domain": "g.skku.edu;g.skku.edu;g.skku.edu", + "email": "g.skku.edu;g.skku.edu;g.skku.edu", + "github": "https://github.com/wjun0830/MOVE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Sungkyunkwan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.skku.edu", + "aff_unique_abbr": "SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26766", + "title": "Misspecification in Inverse Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "The aim of Inverse Reinforcement Learning (IRL) is to infer a reward function R from a policy pi. To do this, we need a model of how pi relates to R. In the current literature, the most common models are optimality, Boltzmann rationality, and causal entropy maximisation. One of the primary motivations behind IRL is to infer human preferences from human behaviour. However, the true relationship between human preferences and human behaviour is much more complex than any of the models currently used in IRL. This means that they are misspecified, which raises the worry that they might lead to unsound inferences if applied to real-world data. In this paper, we provide a mathematical analysis of how robust different IRL models are to misspecification, and answer precisely how the demonstrator policy may differ from each of the standard models before that model leads to faulty inferences about the reward function R. We also introduce a framework for reasoning about misspecification in IRL, together with formal tools that can be used to easily derive the misspecification robustness of new IRL models.", + "primary_area": "safe and robust ai", + "author": "Joar Skalse; Alessandro Abate", + "authorids": "", + "aff": "Oxford University, Department of Computer Science; Oxford University, Department of Computer Science", + "bibtex": "@article{Skalse_Abate_2023, title={Misspecification in Inverse Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26766}, DOI={10.1609/aaai.v37i12.26766}, abstractNote={The aim of Inverse Reinforcement Learning (IRL) is to infer a reward function R from a policy pi. To do this, we need a model of how pi relates to R. In the current literature, the most common models are optimality, Boltzmann rationality, and causal entropy maximisation. One of the primary motivations behind IRL is to infer human preferences from human behaviour. However, the true relationship between human preferences and human behaviour is much more complex than any of the models currently used in IRL. This means that they are misspecified, which raises the worry that they might lead to unsound inferences if applied to real-world data. In this paper, we provide a mathematical analysis of how robust different IRL models are to misspecification, and answer precisely how the demonstrator policy may differ from each of the standard models before that model leads to faulty inferences about the reward function R. We also introduce a framework for reasoning about misspecification in IRL, together with formal tools that can be used to easily derive the misspecification robustness of new IRL models.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Skalse, Joar and Abate, Alessandro}, year={2023}, month={Jun.}, pages={15136-15143} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26766/26538", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26766", + "pdf_size": 161626, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14162821081477132480&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Oxford", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26402", + "title": "Mitigating Adversarial Norm Training with Moral Axioms", + "track": "main", + "status": "Technical", + "abstract": "This paper addresses the issue of adversarial attacks on ethical AI systems. We investigate using moral axioms and rules of deontic logic in a norm learning framework to mitigate adversarial norm training. This model of moral intuition and construction provides AI systems with moral guard rails yet still allows for learning conventions. We evaluate our approach by drawing inspiration from a study commonly used in moral development research. This questionnaire aims to test an agent's ability to reason to moral conclusions despite opposed testimony. Our findings suggest that our model can still correctly evaluate moral situations and learn conventions in an adversarial training environment. We conclude that adding axiomatic moral prohibitions and deontic inference rules to a norm learning model makes it less vulnerable to adversarial attacks.", + "primary_area": "philosophy and ethics of ai", + "author": "Taylor Olson; Kenneth D. Forbus", + "authorids": "", + "aff": "Northwestern University; Northwestern University", + "bibtex": "@article{Olson_Forbus_2023, title={Mitigating Adversarial Norm Training with Moral Axioms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26402}, DOI={10.1609/aaai.v37i10.26402}, abstractNote={This paper addresses the issue of adversarial attacks on ethical AI systems. We investigate using moral axioms and rules of deontic logic in a norm learning framework to mitigate adversarial norm training. This model of moral intuition and construction provides AI systems with moral guard rails yet still allows for learning conventions. We evaluate our approach by drawing inspiration from a study commonly used in moral development research. This questionnaire aims to test an agent\u2019s ability to reason to moral conclusions despite opposed testimony. Our findings suggest that our model can still correctly evaluate moral situations and learn conventions in an adversarial training environment. We conclude that adding axiomatic moral prohibitions and deontic inference rules to a norm learning model makes it less vulnerable to adversarial attacks.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Olson, Taylor and Forbus, Kenneth D.}, year={2023}, month={Jun.}, pages={11882-11889} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26402/26174", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26402", + "pdf_size": 478403, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15134134867343549347&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff_domain": "u.northwestern.edu;northwestern.edu", + "email": "u.northwestern.edu;northwestern.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Northwestern University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.northwestern.edu", + "aff_unique_abbr": "NU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25398", + "title": "Mitigating Artifacts in Real-World Video Super-resolution Models", + "track": "main", + "status": "Technical", + "abstract": "The recurrent structure is a prevalent framework for the task of video super-resolution, which models the temporal dependency between frames via hidden states. When applied to real-world scenarios with unknown and complex degradations, hidden states tend to contain unpleasant artifacts and propagate them to restored frames. In this circumstance, our analyses show that such artifacts can be largely alleviated when the hidden state is replaced with a cleaner counterpart. Based on the observations, we propose a Hidden State Attention (HSA) module to mitigate artifacts in real-world video super-resolution. Specifically, we first adopt various cheap filters to produce a hidden state pool. For example, Gaussian blur filters are for smoothing artifacts while sharpening filters are for enhancing details. To aggregate a new hidden state that contains fewer artifacts from the hidden state pool, we devise a Selective Cross Attention (SCA) module, in which the attention between input features and each hidden state is calculated. Equipped with HSA, our proposed method, namely FastRealVSR, is able to achieve 2x speedup while obtaining better performance than Real-BasicVSR. Codes will be available at https://github.com/TencentARC/FastRealVSR.", + "primary_area": "computer vision iii", + "author": "Liangbin Xie; Xintao Wang; Shuwei Shi; Jinjin Gu; Chao Dong; Ying Shan", + "authorids": "", + "aff": "The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences+University of Macau+ARC Lab, Tencent PCG; ARC Lab, Tencent PCG; The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences+Shenzhen International Graduate School, Tsinghua University; The University of Sydney+Shanghai Artificial Intelligence Laboratory; The Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences+Shanghai Artificial Intelligence Laboratory; ARC Lab, Tencent PCG", + "bibtex": "@article{Xie_Wang_Shi_Gu_Dong_Shan_2023, title={Mitigating Artifacts in Real-World Video Super-resolution Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25398}, DOI={10.1609/aaai.v37i3.25398}, abstractNote={The recurrent structure is a prevalent framework for the task of video super-resolution, which models the temporal dependency between frames via hidden states. When applied to real-world scenarios with unknown and complex degradations, hidden states tend to contain unpleasant artifacts and propagate them to restored frames. In this circumstance, our analyses show that such artifacts can be largely alleviated when the hidden state is replaced with a cleaner counterpart. Based on the observations, we propose a Hidden State Attention (HSA) module to mitigate artifacts in real-world video super-resolution. Specifically, we first adopt various cheap filters to produce a hidden state pool. For example, Gaussian blur filters are for smoothing artifacts while sharpening filters are for enhancing details. To aggregate a new hidden state that contains fewer artifacts from the hidden state pool, we devise a Selective Cross Attention (SCA) module, in which the attention between input features and each hidden state is calculated. Equipped with HSA, our proposed method, namely FastRealVSR, is able to achieve 2x speedup while obtaining better performance than Real-BasicVSR. Codes will be available at https://github.com/TencentARC/FastRealVSR.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Liangbin and Wang, Xintao and Shi, Shuwei and Gu, Jinjin and Dong, Chao and Shan, Ying}, year={2023}, month={Jun.}, pages={2956-2964} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25398/25170", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25398", + "pdf_size": 1611395, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16112636732715904427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "siat.ac.cn;tencent.com;mails.tsinghua.edu.cn;sydney.edu.au;siat.ac.cn;tencent.com", + "email": "siat.ac.cn;tencent.com;mails.tsinghua.edu.cn;sydney.edu.au;siat.ac.cn;tencent.com", + "github": "https://github.com/TencentARC/FastRealVSR", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;2;0+3;4+5;0+5;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Macau;Tencent;Tsinghua University;University of Sydney;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "Provincial Key Laboratory of Computer Vision and Virtual Reality Technology;;ARC Lab;Shenzhen International Graduate School;;", + "aff_unique_url": "http://www.cas.cn;https://www.um.edu.mo;https://www.tencent.com;https://www.tsinghua.edu.cn;https://www.sydney.edu.au;http://www.shailab.org/", + "aff_unique_abbr": "CAS;UM;Tencent;THU;USYD;Shanghai AI Lab", + "aff_campus_unique_index": "0;0+0;;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+1+0;0;0+0;2+0;0+0;0", + "aff_country_unique": "China;Macau;Australia" + }, + { + "id": "article-26539", + "title": "Mitigating Negative Style Transfer in Hybrid Dialogue System", + "track": "main", + "status": "Technical", + "abstract": "As the functionality of dialogue systems evolves, hybrid dialogue systems that accomplish user-specific goals and participate in open-topic chitchat with users are attracting growing attention. Existing research learns both tasks concurrently utilizing a multi-task fusion technique but ignores the negative transfer phenomenon induced by the unique textual style differences. Therefore, contrastive learning based on the latent variable model is used to decouple the various textual genres in the latent space. We devise supervised and self-supervised positive and negative sample constructions for diverse datasets. In addition, to capitalize on the style information contained in the decoupled latent variables, we employ a style prefix that incorporates latent variables further to control the generation of responses with varying styles. We performed extensive experiments on three dialogue datasets, including a hybrid dialogue dataset and two task-oriented dialogue datasets. The experimental results demonstrate that our method can mitigate the negative style transfer issue and achieves state-of-the-art performance on multiple dialogue datasets.", + "primary_area": "speech natural language processing", + "author": "Shimin Li; Qinyuan Cheng; Linyang Li; Xipeng Qiu", + "authorids": "", + "aff": "School of Computer Science, Fudan University; School of Computer Science, Fudan University; School of Computer Science, Fudan University; School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing, Fudan University", + "bibtex": "@article{Li_Cheng_Li_Qiu_2023, title={Mitigating Negative Style Transfer in Hybrid Dialogue System}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26539}, DOI={10.1609/aaai.v37i11.26539}, abstractNote={As the functionality of dialogue systems evolves, hybrid dialogue systems that accomplish user-specific goals and participate in open-topic chitchat with users are attracting growing attention. Existing research learns both tasks concurrently utilizing a multi-task fusion technique but ignores the negative transfer phenomenon induced by the unique textual style differences. Therefore, contrastive learning based on the latent variable model is used to decouple the various textual genres in the latent space. We devise supervised and self-supervised positive and negative sample constructions for diverse datasets. In addition, to capitalize on the style information contained in the decoupled latent variables, we employ a style prefix that incorporates latent variables further to control the generation of responses with varying styles. We performed extensive experiments on three dialogue datasets, including a hybrid dialogue dataset and two task-oriented dialogue datasets. The experimental results demonstrate that our method can mitigate the negative style transfer issue and achieves state-of-the-art performance on multiple dialogue datasets.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shimin and Cheng, Qinyuan and Li, Linyang and Qiu, Xipeng}, year={2023}, month={Jun.}, pages={13103-13111} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26539/26311", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26539", + "pdf_size": 330058, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10230713152885598692&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26983", + "title": "Mitigating Negative Transfer in Multi-Task Learning with Exponential Moving Average Loss Weighting Strategies (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Multi-Task Learning (MTL) is a growing subject of interest in deep learning, due to its ability to train models more efficiently on multiple tasks compared to using a group of conventional single-task models. However, MTL can be impractical as certain tasks can dominate training and hurt performance in others, thus making some tasks perform better in a single-task model compared to a multi-task one. Such problems are broadly classified as negative transfer, and many prior approaches in the literature have been made to mitigate these issues. One such current approach to alleviate negative transfer is to weight each of the losses so that they are on the same scale. Whereas current loss balancing approaches rely on either optimization or complex numerical analysis, none directly scale the losses based on their observed magnitudes. We propose multiple techniques for loss balancing based on scaling by the exponential moving average and benchmark them against current best-performing methods on three established datasets. On these datasets, they achieve comparable, if not higher, performance compared to current best-performing methods.", + "primary_area": "", + "author": "Anish Lakkapragada; Essam Sleiman; Saimourya Surabhi; Dennis P. Wall", + "authorids": "", + "aff": "Lynbrook High School, San Jose, CA 95129 + Stanford University, Stanford, CA 94305; University of California, Davis, CA, 95616; Stanford University, Stanford, CA 94305; Stanford University, Stanford, CA 94305", + "bibtex": "@article{Lakkapragada_Sleiman_Surabhi_Wall_2024, title={Mitigating Negative Transfer in Multi-Task Learning with Exponential Moving Average Loss Weighting Strategies (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26983}, DOI={10.1609/aaai.v37i13.26983}, abstractNote={Multi-Task Learning (MTL) is a growing subject of interest in deep learning, due to its ability to train models more efficiently on multiple tasks compared to using a group of conventional single-task models. However, MTL can be impractical as certain tasks can dominate training and hurt performance in others, thus making some tasks perform better in a single-task model compared to a multi-task one. Such problems are broadly classified as negative transfer, and many prior approaches in the literature have been made to mitigate these issues. One such current approach to alleviate negative transfer is to weight each of the losses so that they are on the same scale. Whereas current loss balancing approaches rely on either optimization or complex numerical analysis, none directly scale the losses based on their observed magnitudes. We propose multiple techniques for loss balancing based on scaling by the exponential moving average and benchmark them against current best-performing methods on three established datasets. On these datasets, they achieve comparable, if not higher, performance compared to current best-performing methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lakkapragada, Anish and Sleiman, Essam and Surabhi, Saimourya and Wall, Dennis P.}, year={2024}, month={Jul.}, pages={16246-16247} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26983/26755", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26983", + "pdf_size": 67046, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13979007545850668131&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stanford.edu;ucdavis.edu;stanford.edu;stanford.edu", + "email": "stanford.edu;ucdavis.edu;stanford.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;1;1", + "aff_unique_norm": "Lynbrook High School;Stanford University;University of California, Davis", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.lynbrookhs.org;https://www.stanford.edu;https://www.ucdavis.edu", + "aff_unique_abbr": ";Stanford;UC Davis", + "aff_campus_unique_index": "0+1;2;1;1", + "aff_campus_unique": "San Jose;Stanford;Davis", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26699", + "title": "MixFairFace: Towards Ultimate Fairness via MixFair Adapter in Face Recognition", + "track": "aaai special track", + "status": "Technical", + "abstract": "Although significant progress has been made in face recognition, demographic bias still exists in face recognition systems. For instance, it usually happens that the face recognition performance for a certain demographic group is lower than the others. In this paper, we propose MixFairFace framework to improve the fairness in face recognition models. First of all, we argue that the commonly used attribute-based fairness metric is not appropriate for face recognition. A face recognition system can only be considered fair while every person has a close performance. Hence, we propose a new evaluation protocol to fairly evaluate the fairness performance of different approaches. Different from previous approaches that require sensitive attribute labels such as race and gender for reducing the demographic bias, we aim at addressing the identity bias in face representation, i.e., the performance inconsistency between different identities, without the need for sensitive attribute labels. To this end, we propose MixFair Adapter to determine and reduce the identity bias of training samples. Our extensive experiments demonstrate that our MixFairFace approach achieves state-of-the-art fairness performance on all benchmark datasets.", + "primary_area": "ai for social impact", + "author": "Fu-En Wang; Chien-Yi Wang; Min Sun; Shang-Hong Lai", + "authorids": "", + "aff": "Microsoft AI R&D Center, Taiwan + National Tsing Hua University, Taiwan; Microsoft AI R&D Center, Taiwan; National Tsing Hua University, Taiwan; Microsoft AI R&D Center, Taiwan + National Tsing Hua University, Taiwan", + "bibtex": "@article{Wang_Wang_Sun_Lai_2023, title={MixFairFace: Towards Ultimate Fairness via MixFair Adapter in Face Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26699}, DOI={10.1609/aaai.v37i12.26699}, abstractNote={Although significant progress has been made in face recognition, demographic bias still exists in face recognition systems. For instance, it usually happens that the face recognition performance for a certain demographic group is lower than the others. In this paper, we propose MixFairFace framework to improve the fairness in face recognition models. First of all, we argue that the commonly used attribute-based fairness metric is not appropriate for face recognition. A face recognition system can only be considered fair while every person has a close performance. Hence, we propose a new evaluation protocol to fairly evaluate the fairness performance of different approaches. Different from previous approaches that require sensitive attribute labels such as race and gender for reducing the demographic bias, we aim at addressing the identity bias in face representation, i.e., the performance inconsistency between different identities, without the need for sensitive attribute labels. To this end, we propose MixFair Adapter to determine and reduce the identity bias of training samples. Our extensive experiments demonstrate that our MixFairFace approach achieves state-of-the-art fairness performance on all benchmark datasets.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Fu-En and Wang, Chien-Yi and Sun, Min and Lai, Shang-Hong}, year={2023}, month={Jun.}, pages={14531-14538} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26699/26471", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26699", + "pdf_size": 950567, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6455176956392435482&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "gapp.nthu.edu.tw;gmail.com;ee.nthu.edu.tw;cs.nthu.edu.tw", + "email": "gapp.nthu.edu.tw;gmail.com;ee.nthu.edu.tw;cs.nthu.edu.tw", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;0+1", + "aff_unique_norm": "Microsoft;National Tsing Hua University", + "aff_unique_dep": "AI R&D Center;", + "aff_unique_url": "https://www.microsoft.com;https://www.nthu.edu.tw", + "aff_unique_abbr": "Microsoft;NTHU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Taiwan, China;", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26360", + "title": "Mixed-Variable Black-Box Optimisation Using Value Proposal Trees", + "track": "main", + "status": "Technical", + "abstract": "Many real-world optimisation problems are defined over both categorical and continuous variables, yet efficient optimisation methods such as Bayesian Optimisation (BO) are ill-equipped to handle such mixed-variable search spaces. The optimisation breadth introduced by categorical variables in the mixed-input setting has seen recent approaches operating on local trust regions, but these methods can be greedy in suboptimal regions of the search space. In this paper, we adopt a holistic view and aim to consolidate optimisation of the categorical and continuous sub-spaces under a single acquisition metric. We develop a tree-based method which retains a global view of the optimisation spaces by identifying regions in the search space with high potential candidates which we call value proposals. Our method uses these proposals to make selections on both the categorical and continuous components of the input. We show that this approach significantly outperforms existing mixed-variable optimisation approaches across several mixed-variable black-box optimisation tasks.", + "primary_area": "machine learning iv", + "author": "Yan Zuo; Vu Nguyen; Amir Dezfouli; David Alexander; Benjamin Ward Muir; Iadine Chades", + "authorids": "", + "aff": "Amazon; Amazon; CSIRO; CSIRO; CSIRO; CSIRO", + "bibtex": "@article{Zuo_Nguyen_Dezfouli_Alexander_Muir_Chades_2023, title={Mixed-Variable Black-Box Optimisation Using Value Proposal Trees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26360}, DOI={10.1609/aaai.v37i9.26360}, abstractNote={Many real-world optimisation problems are defined over both categorical and continuous variables, yet efficient optimisation methods such as Bayesian Optimisation (BO) are ill-equipped to handle such mixed-variable search spaces. The optimisation breadth introduced by categorical variables in the mixed-input setting has seen recent approaches operating on local trust regions, but these methods can be greedy in suboptimal regions of the search space. In this paper, we adopt a holistic view and aim to consolidate optimisation of the categorical and continuous sub-spaces under a single acquisition metric. We develop a tree-based method which retains a global view of the optimisation spaces by identifying regions in the search space with high potential candidates which we call value proposals. Our method uses these proposals to make selections on both the categorical and continuous components of the input. We show that this approach significantly outperforms existing mixed-variable optimisation approaches across several mixed-variable black-box optimisation tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zuo, Yan and Nguyen, Vu and Dezfouli, Amir and Alexander, David and Muir, Benjamin Ward and Chades, Iadine}, year={2023}, month={Jun.}, pages={11506-11514} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26360/26132", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26360", + "pdf_size": 858656, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:RtiD4w5NacsJ:scholar.google.com/&scioq=Mixed-Variable+Black-Box+Optimisation+Using+Value+Proposal+Trees&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "amazon.com;amazon.com;data61.csiro.au;data61.csiro.au;csiro.au;csiro.au", + "email": "amazon.com;amazon.com;data61.csiro.au;data61.csiro.au;csiro.au;csiro.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;1;1", + "aff_unique_norm": "Amazon.com, Inc.;Commonwealth Scientific and Industrial Research Organisation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.amazon.com;https://www.csiro.au", + "aff_unique_abbr": "Amazon;CSIRO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;1;1", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-26178", + "title": "Mixture Manifold Networks: A Computationally Efficient Baseline for Inverse Modeling", + "track": "main", + "status": "Technical", + "abstract": "We propose and show the efficacy of a new method to address generic inverse problems. Inverse modeling is the task whereby one seeks to determine the hidden parameters of a natural system that produce a given set of observed measurements. Recent work has shown impressive results using deep learning, but we note that there is a trade-off between model performance and computational time. For some applications, the computational time at inference for the best performing inverse modeling method may be overly prohibitive to its use. In seeking a faster, high-performing model, we present a new method that leverages multiple manifolds as a mixture of backward (e.g., inverse) models in a forward-backward model architecture. These multiple backwards models all share a common forward model, and their training is mitigated by generating training examples from the forward model. The proposed method thus has two innovations: 1) the multiple Manifold Mixture Network (MMN) architecture, and 2) the training procedure involving augmenting backward model training data using the forward model. We demonstrate the advantages of our method by comparing to several baselines on four benchmark inverse problems, and we furthermore provide analysis to motivate its design.", + "primary_area": "machine learning iii", + "author": "Gregory P. Spell; Simiao Ren; Leslie M. Collins; Jordan M. Malof", + "authorids": "", + "aff": "Duke University, Department of Electrical & Computer Engineering, Durham, NC USA; Duke University, Department of Electrical & Computer Engineering, Durham, NC USA; Duke University, Department of Electrical & Computer Engineering, Durham, NC USA; University of Montana, Department of Computer Science, Missoula, MT USA", + "bibtex": "@article{Spell_Ren_Collins_Malof_2023, title={Mixture Manifold Networks: A Computationally Efficient Baseline for Inverse Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26178}, DOI={10.1609/aaai.v37i8.26178}, abstractNote={We propose and show the efficacy of a new method to address generic inverse problems. Inverse modeling is the task whereby one seeks to determine the hidden parameters of a natural system that produce a given set of observed measurements. Recent work has shown impressive results using deep learning, but we note that there is a trade-off between model performance and computational time. For some applications, the computational time at inference for the best performing inverse modeling method may be overly prohibitive to its use. In seeking a faster, high-performing model, we present a new method that leverages multiple manifolds as a mixture of backward (e.g., inverse) models in a forward-backward model architecture. These multiple backwards models all share a common forward model, and their training is mitigated by generating training examples from the forward model. The proposed method thus has two innovations: 1) the multiple Manifold Mixture Network (MMN) architecture, and 2) the training procedure involving augmenting backward model training data using the forward model. We demonstrate the advantages of our method by comparing to several baselines on four benchmark inverse problems, and we furthermore provide analysis to motivate its design.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Spell, Gregory P. and Ren, Simiao and Collins, Leslie M. and Malof, Jordan M.}, year={2023}, month={Jun.}, pages={9874-9881} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26178/25950", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26178", + "pdf_size": 1086887, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15957521208499900034&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "duke.edu;duke.edu;duke.edu;umontana.edu", + "email": "duke.edu;duke.edu;duke.edu;umontana.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Duke University;University of Montana", + "aff_unique_dep": "Department of Electrical & Computer Engineering;Department of Computer Science", + "aff_unique_url": "https://www.duke.edu;https://www.umt.edu", + "aff_unique_abbr": "Duke;UM", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Durham;Missoula", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26137", + "title": "Mixture Uniform Distribution Modeling and Asymmetric Mix Distillation for Class Incremental Learning", + "track": "main", + "status": "Technical", + "abstract": "Exemplar rehearsal-based methods with knowledge distillation (KD) have been widely used in class incremental learning (CIL) scenarios. However, they still suffer from performance degradation because of severely distribution discrepancy between training and test set caused by the limited storage memory on previous classes. In this paper, we mathematically model the data distribution and the discrepancy at the incremental stages with mixture uniform distribution (MUD). Then, we propose the asymmetric mix distillation method to uniformly minimize the error of each class from distribution discrepancy perspective. Specifically, we firstly promote mixup in CIL scenarios with the incremental mix samplers and incremental mix factor to calibrate the raw training data distribution. Next, mix distillation label augmentation is incorporated into the data distribution to inherit the knowledge information from the previous models. Based on the above augmented data distribution, our trained model effectively alleviates the performance degradation and extensive experimental results validate that our method exhibits superior performance on CIL benchmarks.", + "primary_area": "machine learning iii", + "author": "Sunyuan Qiang; Jiayi Hou; Jun Wan; Yanyan Liang; Zhen Lei; Du Zhang", + "authorids": "", + "aff": "Macau University of Science and Technology; Lafayette College; Macau University of Science and Technology+National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences; Macau University of Science and Technology; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences; Macau University of Science and Technology", + "bibtex": "@article{Qiang_Hou_Wan_Liang_Lei_Zhang_2023, title={Mixture Uniform Distribution Modeling and Asymmetric Mix Distillation for Class Incremental Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26137}, DOI={10.1609/aaai.v37i8.26137}, abstractNote={Exemplar rehearsal-based methods with knowledge distillation (KD) have been widely used in class incremental learning (CIL) scenarios. However, they still suffer from performance degradation because of severely distribution discrepancy between training and test set caused by the limited storage memory on previous classes. In this paper, we mathematically model the data distribution and the discrepancy at the incremental stages with mixture uniform distribution (MUD). Then, we propose the asymmetric mix distillation method to uniformly minimize the error of each class from distribution discrepancy perspective. Specifically, we firstly promote mixup in CIL scenarios with the incremental mix samplers and incremental mix factor to calibrate the raw training data distribution. Next, mix distillation label augmentation is incorporated into the data distribution to inherit the knowledge information from the previous models. Based on the above augmented data distribution, our trained model effectively alleviates the performance degradation and extensive experimental results validate that our method exhibits superior performance on CIL benchmarks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qiang, Sunyuan and Hou, Jiayi and Wan, Jun and Liang, Yanyan and Lei, Zhen and Zhang, Du}, year={2023}, month={Jun.}, pages={9498-9506} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26137/25909", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26137", + "pdf_size": 330717, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10709819283645733834&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ia.ac.cn; ;must.edu.mo; ; ;", + "email": "ia.ac.cn; ;must.edu.mo; ; ;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0+2+3;0;2;0", + "aff_unique_norm": "Macau University of Science and Technology;Lafayette College;Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": ";;Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "https://www.must.edu.mo;https://www.lafayette.edu;http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "MUST;Lafayette;CAS;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0+2+2;0;2;0", + "aff_country_unique": "Macau;United States;China" + }, + { + "id": "article-26617", + "title": "MoEC: Mixture of Expert Clusters", + "track": "main", + "status": "Technical", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its promising scaling capability with affordable computational overhead. MoE models convert dense layers into sparse experts, and utilize a gated routing network to make experts conditionally activated. However, as the number of experts grows, MoE with outrageous parameters suffers from overfitting and sparse data allocation. Such problems are especially severe on tasks with limited data, thus hindering the progress towards improving performance by scaling up. We verify that there exists a performance upper bound of scaling up sparse MoE. In this work, we propose Mixture of Expert Clusters \u2014 a general approach to enable expert layers to learn more diverse and appropriate knowledge by imposing variance-based constraints on the routing stage. Given this, we could further propose a cluster-level expert dropout strategy specifically designed for the expert cluster structure. Our experiments reveal that MoEC could improve performance on machine translation and natural language understanding tasks. MoEC plays a positive role in mitigating overfitting and sparse data allocation problems, thus fully releasing the potential of large-scale sparse models.", + "primary_area": "speech natural language processing", + "author": "Yuan Xie; Shaohan Huang; Tianyu Chen; Furu Wei", + "authorids": "", + "aff": "Microsoft Research Asia, China; Microsoft Research Asia, China; Microsoft Research Asia, China; Microsoft Research Asia, China", + "bibtex": "@article{Xie_Huang_Chen_Wei_2023, title={MoEC: Mixture of Expert Clusters}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26617}, DOI={10.1609/aaai.v37i11.26617}, abstractNote={Sparsely Mixture of Experts (MoE) has received great interest due to its promising scaling capability with affordable computational overhead. MoE models convert dense layers into sparse experts, and utilize a gated routing network to make experts conditionally activated. However, as the number of experts grows, MoE with outrageous parameters suffers from overfitting and sparse data allocation. Such problems are especially severe on tasks with limited data, thus hindering the progress towards improving performance by scaling up. We verify that there exists a performance upper bound of scaling up sparse MoE. In this work, we propose Mixture of Expert Clusters \u2014 a general approach to enable expert layers to learn more diverse and appropriate knowledge by imposing variance-based constraints on the routing stage. Given this, we could further propose a cluster-level expert dropout strategy specifically designed for the expert cluster structure. Our experiments reveal that MoEC could improve performance on machine translation and natural language understanding tasks. MoEC plays a positive role in mitigating overfitting and sparse data allocation problems, thus fully releasing the potential of large-scale sparse models.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Yuan and Huang, Shaohan and Chen, Tianyu and Wei, Furu}, year={2023}, month={Jun.}, pages={13807-13815} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26617/26389", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26617", + "pdf_size": 1258886, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18201314464612608138&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/xy980523/MoEc model", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Microsoft Research Asia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "MSRA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26907", + "title": "MoMusic: A Motion-Driven Human-AI Collaborative Music Composition and Performing System", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "The significant development of artificial neural network architectures has facilitated the increasing adoption of automated music composition models over the past few years. However, most existing systems feature algorithmic generative structures based on hard code and predefined rules, generally excluding interactive or improvised behaviors. We propose a motion based music system, MoMusic, as a AI real time music generation system. MoMusic features a partially randomized harmonic sequencing model based on a probabilistic analysis of tonal chord progressions, mathematically abstracted through musical set theory. This model is presented against a dual dimension grid that produces resulting sounds through a posture recognition mechanism. A camera captures the users' fingers' movement and trajectories, creating coherent, partially improvised harmonic progressions. MoMusic integrates several timbrical registers, from traditional classical instruments such as the piano to a new ''human voice instrument'' created using a voice conversion technique. Our research demonstrates MoMusic's interactiveness, ability to inspire musicians, and ability to generate coherent musical material with various timbrical registers. MoMusic's capabilities could be easily expanded to incorporate different forms of posture controlled timbrical transformation, rhythmic transformation, dynamic transformation, or even digital sound processing techniques.", + "primary_area": "", + "author": "Weizhen Bian; Yijin Song; Nianzhen Gu; Tin Yan Chan; Tsz To Lo; Tsun Sun Li; King Chak Wong; Wei Xue; Roberto Alonso Trillo", + "authorids": "", + "aff": "Department of Computer Science, Hong Kong Baptist University, Hong Kong + Academy of Music, Hong Kong Baptist University, Hong Kong; Department of Computer Science, Hong Kong Baptist University, Hong Kong + Academy of Music, Hong Kong Baptist University, Hong Kong; Department of Computer Science, Hong Kong Baptist University, Hong Kong + Academy of Music, Hong Kong Baptist University, Hong Kong; Academy of Music, Hong Kong Baptist University, Hong Kong; Academy of Music, Hong Kong Baptist University, Hong Kong; Academy of Music, Hong Kong Baptist University, Hong Kong; Academy of Music, Hong Kong Baptist University, Hong Kong; Department of Computer Science, Hong Kong Baptist University, Hong Kong; Academy of Music, Hong Kong Baptist University, Hong Kong", + "bibtex": "@article{Bian_Song_Gu_Chan_Lo_Li_Wong_Xue_Alonso Trillo_2024, title={MoMusic: A Motion-Driven Human-AI Collaborative Music Composition and Performing System}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26907}, DOI={10.1609/aaai.v37i13.26907}, abstractNote={The significant development of artificial neural network architectures has facilitated the increasing adoption of automated music composition models over the past few years. However, most existing systems feature algorithmic generative structures based on hard code and predefined rules, generally excluding interactive or improvised behaviors. We propose a motion based music system, MoMusic, as a AI real time music generation system. MoMusic features a partially randomized harmonic sequencing model based on a probabilistic analysis of tonal chord progressions, mathematically abstracted through musical set theory. This model is presented against a dual dimension grid that produces resulting sounds through a posture recognition mechanism. A camera captures the users\u2019 fingers\u2019 movement and trajectories, creating coherent, partially improvised harmonic progressions. MoMusic integrates several timbrical registers, from traditional classical instruments such as the piano to a new \u2019\u2019human voice instrument\u2019\u2019 created using a voice conversion technique. Our research demonstrates MoMusic\u2019s interactiveness, ability to inspire musicians, and ability to generate coherent musical material with various timbrical registers. MoMusic\u2019s capabilities could be easily expanded to incorporate different forms of posture controlled timbrical transformation, rhythmic transformation, dynamic transformation, or even digital sound processing techniques.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bian, Weizhen and Song, Yijin and Gu, Nianzhen and Chan, Tin Yan and Lo, Tsz To and Li, Tsun Sun and Wong, King Chak and Xue, Wei and Alonso Trillo, Roberto}, year={2024}, month={Jul.}, pages={16057-16062} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26907/26679", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26907", + "pdf_size": 1104189, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15792146833372357153&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;comp.hkbu.edu.hk;hkbu.edu.hk", + "email": "life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;life.hkbu.edu.hk;comp.hkbu.edu.hk;hkbu.edu.hk", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+0;0+0;0+0;0;0;0;0;0;0", + "aff_unique_norm": "Hong Kong Baptist University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.hkbu.edu.hk", + "aff_unique_abbr": "HKBU", + "aff_campus_unique_index": "0+0;0+0;0+0;0;0;0;0;0;0", + "aff_campus_unique": "Hong Kong", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26859", + "title": "MobilePTX: Sparse Coding for Pneumothorax Detection Given Limited Training Examples", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Point-of-Care Ultrasound (POCUS) refers to clinician-performed and interpreted ultrasonography at the patient's bedside. Interpreting these images requires a high level of expertise, which may not be available during emergencies. In this paper, we support POCUS by developing classifiers that can aid medical professionals by diagnosing whether or not a patient has pneumothorax. We decomposed the task into multiple steps, using YOLOv4 to extract relevant regions of the video and a 3D sparse coding model to represent video features. Given the difficulty in acquiring positive training videos, we trained a small-data classifier with a maximum of 15 positive and 32 negative examples. To counteract this limitation, we leveraged subject matter expert (SME) knowledge to limit the hypothesis space, thus reducing the cost of data collection. We present results using two lung ultrasound datasets and demonstrate that our model is capable of achieving performance on par with SMEs in pneumothorax identification. We then developed an iOS application that runs our full system in less than 4 seconds on an iPad Pro, and less than 8 seconds on an iPhone 13 Pro, labeling key regions in the lung sonogram to provide interpretable diagnoses.", + "primary_area": "emerging applications of ai", + "author": "Darryl Hannan; Steven C. Nesbit; Ximing Wen; Glen Smith; Qiao Zhang; Alberto Goffi; Vincent Chan; Michael J. Morris; John C. Hunninghake; Nicholas E. Villalobos; Edward Kim; Rosina O. Weber; Christopher J. MacLellan", + "authorids": "", + "aff": "Drexel University; Drexel University; Drexel University; Drexel University; Drexel University; University of Toronto; University of Toronto; Brooke Army Medical Center; Brooke Army Medical Center; Brooke Army Medical Center; Drexel University; Drexel University; Georgia Institute of Technology", + "bibtex": "@article{Hannan_Nesbit_Wen_Smith_Zhang_Goffi_Chan_Morris_Hunninghake_Villalobos_Kim_Weber_MacLellan_2024, title={MobilePTX: Sparse Coding for Pneumothorax Detection Given Limited Training Examples}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26859}, DOI={10.1609/aaai.v37i13.26859}, abstractNote={Point-of-Care Ultrasound (POCUS) refers to clinician-performed and interpreted ultrasonography at the patient\u2019s bedside. Interpreting these images requires a high level of expertise, which may not be available during emergencies. In this paper, we support POCUS by developing classifiers that can aid medical professionals by diagnosing whether or not a patient has pneumothorax. We decomposed the task into multiple steps, using YOLOv4 to extract relevant regions of the video and a 3D sparse coding model to represent video features. Given the difficulty in acquiring positive training videos, we trained a small-data classifier with a maximum of 15 positive and 32 negative examples. To counteract this limitation, we leveraged subject matter expert (SME) knowledge to limit the hypothesis space, thus reducing the cost of data collection. We present results using two lung ultrasound datasets and demonstrate that our model is capable of achieving performance on par with SMEs in pneumothorax identification. We then developed an iOS application that runs our full system in less than 4 seconds on an iPad Pro, and less than 8 seconds on an iPhone 13 Pro, labeling key regions in the lung sonogram to provide interpretable diagnoses.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hannan, Darryl and Nesbit, Steven C. and Wen, Ximing and Smith, Glen and Zhang, Qiao and Goffi, Alberto and Chan, Vincent and Morris, Michael J. and Hunninghake, John C. and Villalobos, Nicholas E. and Kim, Edward and Weber, Rosina O. and MacLellan, Christopher J.}, year={2024}, month={Jul.}, pages={15675-15681} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26859/26631", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26859", + "pdf_size": 3092651, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5776341851890491425&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "drexel.edu; ; ; ; ; ; ; ; ; ; ; ;gatech.edu", + "email": "drexel.edu; ; ; ; ; ; ; ; ; ; ; ;gatech.edu", + "github": "", + "project": "", + "author_num": 13, + "aff_unique_index": "0;0;0;0;0;1;1;2;2;2;0;0;3", + "aff_unique_norm": "Drexel University;University of Toronto;Brooke Army Medical Center;Georgia Institute of Technology", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.drexel.edu;https://www.utoronto.ca;https://www BAMC.amedd.army.mil/;https://www.gatech.edu", + "aff_unique_abbr": "Drexel;U of T;BAMC;Georgia Tech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;1;0;0;0;0;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-25874", + "title": "MobileTL: On-Device Transfer Learning with Inverted Residual Blocks", + "track": "main", + "status": "Technical", + "abstract": "Transfer learning on edge is challenging due to on-device limited resources. Existing work addresses this issue by training a subset of parameters or adding model patches. Developed with inference in mind, Inverted Residual Blocks (IRBs) split a convolutional layer into depthwise and pointwise convolutions, leading to more stacking layers, e.g., convolution, normalization, and activation layers. Though they are efficient for inference, IRBs require that additional activation maps are stored in memory for training weights for convolution layers and scales for normalization layers. As a result, their high memory cost prohibits training IRBs on resource-limited edge devices, and making them unsuitable in the context of transfer learning. To address this issue, we present MobileTL, a memory and computationally efficient on-device transfer learning method for models built with IRBs. MobileTL trains the shifts for internal normalization layers to avoid storing activation maps for the backward pass. Also, MobileTL approximates the backward computation of the activation layer (e.g., Hard-Swish and ReLU6) as a signed function which enables storing a binary mask instead of activation maps for the backward pass. MobileTL fine-tunes a few top blocks (close to output) rather than propagating the gradient through the whole network to reduce the computation cost. Our method reduces memory usage by 46% and 53% for MobileNetV2 and V3 IRBs, respectively. For MobileNetV3, we observe a 36% reduction in floating-point operations (FLOPs) when fine-tuning 5 blocks, while only incurring a 0.6% accuracy reduction on CIFAR10. Extensive experiments on multiple datasets demonstrate that our method is Pareto-optimal (best accuracy under given hardware constraints) compared to prior work in transfer learning for edge devices.", + "primary_area": "machine learning i", + "author": "Hung-Yueh Chiang; Natalia Frumkin; Feng Liang; Diana Marculescu", + "authorids": "", + "aff": "The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin", + "bibtex": "@article{Chiang_Frumkin_Liang_Marculescu_2023, title={MobileTL: On-Device Transfer Learning with Inverted Residual Blocks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25874}, DOI={10.1609/aaai.v37i6.25874}, abstractNote={Transfer learning on edge is challenging due to on-device limited resources. Existing work addresses this issue by training a subset of parameters or adding model patches. Developed with inference in mind, Inverted Residual Blocks (IRBs) split a convolutional layer into depthwise and pointwise convolutions, leading to more stacking layers, e.g., convolution, normalization, and activation layers. Though they are efficient for inference, IRBs require that additional activation maps are stored in memory for training weights for convolution layers and scales for normalization layers. As a result, their high memory cost prohibits training IRBs on resource-limited edge devices, and making them unsuitable in the context of transfer learning. To address this issue, we present MobileTL, a memory and computationally efficient on-device transfer learning method for models built with IRBs. MobileTL trains the shifts for internal normalization layers to avoid storing activation maps for the backward pass. Also, MobileTL approximates the backward computation of the activation layer (e.g., Hard-Swish and ReLU6) as a signed function which enables storing a binary mask instead of activation maps for the backward pass. MobileTL fine-tunes a few top blocks (close to output) rather than propagating the gradient through the whole network to reduce the computation cost. Our method reduces memory usage by 46% and 53% for MobileNetV2 and V3 IRBs, respectively. For MobileNetV3, we observe a 36% reduction in floating-point operations (FLOPs) when fine-tuning 5 blocks, while only incurring a 0.6% accuracy reduction on CIFAR10. Extensive experiments on multiple datasets demonstrate that our method is Pareto-optimal (best accuracy under given hardware constraints) compared to prior work in transfer learning for edge devices.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chiang, Hung-Yueh and Frumkin, Natalia and Liang, Feng and Marculescu, Diana}, year={2023}, month={Jun.}, pages={7166-7174} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25874/25646", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25874", + "pdf_size": 4032211, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8285131663458349855&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "utexas.edu;utexas.edu;utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu;utexas.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Texas at Austin", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26975", + "title": "Mobility Prediction via Sequential Trajectory Disentanglement (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Accurately predicting human mobility is a critical task in location-based recommendation. Most prior approaches focus on fusing multiple semantics trajectories to forecast the future movement of people, and fail to consider the distinct relations in underlying context of human mobility, resulting in a narrow perspective to comprehend human motions. Inspired by recent advances in disentanglement learning, we propose a novel self-supervised method called SelfMove for next POI prediction. SelfMove seeks to disentangle the potential time-invariant and time-varying factors from massive trajectories, which provides an interpretable view to understand the complex semantics underlying human mobility representations. To address the data sparsity issue, we present two realistic trajectory augmentation approaches to help understand the intrinsic periodicity and constantly changing intents of humans. In addition, a POI-centric graph structure is proposed to explore both homogeneous and heterogeneous collaborative signals behind historical trajectories. Experiments on two real-world datasets demonstrate the superiority of SelfMove compared to the state-of-the-art baselines.", + "primary_area": "", + "author": "Jinyu Hong; Fan Zhou; Qiang Gao; Ping Kuang; Kunpeng Zhang", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China+Kash Institute of Electronics and Information Industry; Southwestern University of Finance and Economics+Kash Institute of Electronics and Information Industry; University of Electronic Science and Technology of China; University of Maryland, College park", + "bibtex": "@article{Hong_Zhou_Gao_Kuang_Zhang_2024, title={Mobility Prediction via Sequential Trajectory Disentanglement (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26975}, DOI={10.1609/aaai.v37i13.26975}, abstractNote={Accurately predicting human mobility is a critical task in location-based recommendation. Most prior approaches focus on fusing multiple semantics trajectories to forecast the future movement of people, and fail to consider the distinct relations in underlying context of human mobility, resulting in a narrow perspective to comprehend human motions. Inspired by recent advances in disentanglement learning, we propose a novel self-supervised method called SelfMove for next POI prediction. SelfMove seeks to disentangle the potential time-invariant and time-varying factors from massive trajectories, which provides an interpretable view to understand the complex semantics underlying human mobility representations. To address the data sparsity issue, we present two realistic trajectory augmentation approaches to help understand the intrinsic periodicity and constantly changing intents of humans. In addition, a POI-centric graph structure is proposed to explore both homogeneous and heterogeneous collaborative signals behind historical trajectories. Experiments on two real-world datasets demonstrate the superiority of SelfMove compared to the state-of-the-art baselines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hong, Jinyu and Zhou, Fan and Gao, Qiang and Kuang, Ping and Zhang, Kunpeng}, year={2024}, month={Jul.}, pages={16230-16231} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26975/26747", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26975", + "pdf_size": 132089, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15380274773324389195&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "std.uestc.edu.cn;uestc.edu.cn;swufe.edu.cn;uestc.edu.cn;umd.edu", + "email": "std.uestc.edu.cn;uestc.edu.cn;swufe.edu.cn;uestc.edu.cn;umd.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2+1;0;3", + "aff_unique_norm": "University of Electronic Science and Technology of China;Kash Institute of Electronics and Information Industry;Southwestern University of Finance and Economics;University of Maryland", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uestc.edu.cn;;https://www.swufe.edu.cn;https://www/umd.edu", + "aff_unique_abbr": "UESTC;;SWUFE;UMD", + "aff_campus_unique_index": ";;1", + "aff_campus_unique": ";College Park", + "aff_country_unique_index": "0;0;0;0;2", + "aff_country_unique": "China;;United States" + }, + { + "id": "article-26913", + "title": "Model AI Assignments 2023", + "track": "eaai symposium model ai assignment abstracts", + "status": "Technical", + "abstract": "The Model AI Assignments session seeks to gather and disseminate the best assignment designs of the Artificial Intelligence (AI) Education community. Recognizing that assignments form the core of student learning experience, we here present abstracts of six AI assignments from the 2023 session that are easily adoptable, playfully engaging, and flexible for a variety of instructor needs. Assignment specifications and supporting resources may be found at http://modelai.gettysburg.edu .", + "primary_area": "", + "author": "Todd W. Neller; Raechel Walker; Olivia Dias; Zeynep Yal\u00e7\u0131n; Cynthia Breazeal; Matt Taylor; Michele Donini; Erin J. Talvitie; Charlie Pilgrim; Paolo Turrini; James Maher; Matthew Boutell; Justin Wilson; Narges Norouzi; Jonathan Scott", + "authorids": "", + "aff": "Gettysburg College; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Wellesley College; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Amazon Web Services; Harvey Mudd College; The University of Warwick; The University of Warwick; United States Air Force Academy; United States Air Force Academy; United States Air Force Academy; University of California, Santa Cruz; University of California, Santa Cruz", + "bibtex": "@article{Neller_Walker_Dias_Yal\u00e7\u0131n_Breazeal_Taylor_Donini_Talvitie_Pilgrim_Turrini_Maher_Boutell_Wilson_Norouzi_Scott_2024, title={Model AI Assignments 2023}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26913}, DOI={10.1609/aaai.v37i13.26913}, abstractNote={The Model AI Assignments session seeks to gather and disseminate the best assignment designs of the Artificial Intelligence (AI) Education community. Recognizing that assignments form the core of student learning experience, we here present abstracts of six AI assignments from the 2023 session that are easily adoptable, playfully engaging, and flexible for a variety of instructor needs. Assignment specifications and supporting resources may be found at http://modelai.gettysburg.edu .}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Neller, Todd W. and Walker, Raechel and Dias, Olivia and Yal\u00e7\u0131n, Zeynep and Breazeal, Cynthia and Taylor, Matt and Donini, Michele and Talvitie, Erin J. and Pilgrim, Charlie and Turrini, Paolo and Maher, James and Boutell, Matthew and Wilson, Justin and Norouzi, Narges and Scott, Jonathan}, year={2024}, month={Jul.}, pages={16104-16105} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26913/26685", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26913", + "pdf_size": 50192, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff_domain": ";;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;", + "github": "", + "project": "http://modelai.gettysburg.edu", + "author_num": 15, + "aff_unique_index": "0;1;1;2;1;1;3;4;5;5;6;6;6;7;7", + "aff_unique_norm": "Gettysburg College;Massachusetts Institute of Technology;Wellesley College;Amazon Web Services;Harvey Mudd College;University of Warwick;United States Air Force Academy;University of California, Santa Cruz", + "aff_unique_dep": ";;;;;;;", + "aff_unique_url": "https://www.gettysburg.edu;https://web.mit.edu;https://www.wellesley.edu;https://aws.amazon.com;https://www.hmc.edu;https://warwick.ac.uk;https://www.usafa.edu/;https://www.ucsc.edu", + "aff_unique_abbr": "Gettysburg College;MIT;Wellesley;AWS;HMC;Warwick;USAFA;UCSC", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Santa Cruz", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;1;1;0;0;0;0;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-26944", + "title": "Model Selection of Graph Signage Models Using Maximum Likelihood (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Complex systems across various domains can be naturally modeled as signed networks with positive and negative edges. In this work, we design a new class of signage models and show how to select the model parameters that best fit real-world datasets using maximum likelihood.", + "primary_area": "", + "author": "Angelina Brilliantova; Ivona Bez\u00e1kov\u00e1", + "authorids": "", + "aff": "Rochester Institute of Technology; Rochester Institute of Technology", + "bibtex": "@article{Brilliantova_Bez\u00e1kov\u00e1_2024, title={Model Selection of Graph Signage Models Using Maximum Likelihood (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26944}, DOI={10.1609/aaai.v37i13.26944}, abstractNote={Complex systems across various domains can be naturally modeled as signed networks with positive and negative edges. In this work, we design a new class of signage models and show how to select the model parameters that best fit real-world datasets using maximum likelihood.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brilliantova, Angelina and Bez\u00e1kov\u00e1, Ivona}, year={2024}, month={Jul.}, pages={16168-16169} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26944/26716", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26944", + "pdf_size": 81225, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:lTlfVJelhjwJ:scholar.google.com/&scioq=Model+Selection+of+Graph+Signage+Models+Using+Maximum+Likelihood+(Student+Abstract)&hl=en&as_sdt=0,44", + "gs_version_total": 3, + "aff_domain": "rit.edu; ", + "email": "rit.edu; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25903", + "title": "Model-Based Offline Reinforcement Learning with Local Misspecification", + "track": "main", + "status": "Technical", + "abstract": "We present a model-based offline reinforcement learning policy performance lower bound that explicitly captures dynamics model misspecification and distribution mismatch and we propose an empirical algorithm for optimal offline policy selection. Theoretically, we prove a novel safe policy improvement theorem by establishing pessimism approximations to the value function. Our key insight is to jointly consider selecting over dynamics models and policies: as long as a dynamics model can accurately represent the dynamics of the state-action pairs visited by a given policy, it is possible to approximate the value of that particular policy. We analyze our lower bound in the LQR setting and also show competitive performance to previous lower bounds on policy selection across a set of D4RL tasks.", + "primary_area": "machine learning i", + "author": "Kefan Dong; Yannis Flet-Berliac; Allen Nie; Emma Brunskill", + "authorids": "", + "aff": "Stanford University; Stanford University; Stanford University; Stanford University", + "bibtex": "@article{Dong_Flet-Berliac_Nie_Brunskill_2023, title={Model-Based Offline Reinforcement Learning with Local Misspecification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25903}, DOI={10.1609/aaai.v37i6.25903}, abstractNote={We present a model-based offline reinforcement learning policy performance lower bound that explicitly captures dynamics model misspecification and distribution mismatch and we propose an empirical algorithm for optimal offline policy selection. Theoretically, we prove a novel safe policy improvement theorem by establishing pessimism approximations to the value function. Our key insight is to jointly consider selecting over dynamics models and policies: as long as a dynamics model can accurately represent the dynamics of the state-action pairs visited by a given policy, it is possible to approximate the value of that particular policy. We analyze our lower bound in the LQR setting and also show competitive performance to previous lower bounds on policy selection across a set of D4RL tasks.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Kefan and Flet-Berliac, Yannis and Nie, Allen and Brunskill, Emma}, year={2023}, month={Jun.}, pages={7423-7431} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25903/25675", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25903", + "pdf_size": 234608, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3130395229405060133&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "email": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27056", + "title": "Model-Based Offline Weighted Policy Optimization (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "A promising direction for applying reinforcement learning to the real world is learning from offline datasets. Offline reinforcement learning aims to learn policies from pre-collected datasets without online interaction with the environment. Due to the lack of further interaction, offline reinforcement learning faces severe extrapolation error, leading to policy learning failure. In this paper, we investigate the weighted Bellman update in model-based offline reinforcement learning. We explore uncertainty estimation in ensemble dynamics models, then use a variational autoencoder to fit the behavioral prior, and finally propose an algorithm called Model-Based Offline Weighted Policy Optimization (MOWPO), which uses a combination of model confidence and behavioral prior as weights to reduce the impact of inaccurate samples on policy optimization. Experiment results show that MOWPO achieves better performance than state-of-the-art algorithms, and both the model confidence weight and the behavioral prior weight can play an active role in offline policy optimization.", + "primary_area": "", + "author": "Renzhe Zhou; Zongzhang Zhang; Yang Yu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Zhou_Zhang_Yu_2024, title={Model-Based Offline Weighted Policy Optimization (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27056}, DOI={10.1609/aaai.v37i13.27056}, abstractNote={A promising direction for applying reinforcement learning to the real world is learning from offline datasets. Offline reinforcement learning aims to learn policies from pre-collected datasets without online interaction with the environment. Due to the lack of further interaction, offline reinforcement learning faces severe extrapolation error, leading to policy learning failure. In this paper, we investigate the weighted Bellman update in model-based offline reinforcement learning. We explore uncertainty estimation in ensemble dynamics models, then use a variational autoencoder to fit the behavioral prior, and finally propose an algorithm called Model-Based Offline Weighted Policy Optimization (MOWPO), which uses a combination of model confidence and behavioral prior as weights to reduce the impact of inaccurate samples on policy optimization. Experiment results show that MOWPO achieves better performance than state-of-the-art algorithms, and both the model confidence weight and the behavioral prior weight can play an active role in offline policy optimization.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Renzhe and Zhang, Zongzhang and Yu, Yang}, year={2024}, month={Jul.}, pages={16392-16393} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27056/26828", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27056", + "pdf_size": 150325, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=771330097702750020&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25964", + "title": "Model-Based Reinforcement Learning with Multinomial Logistic Function Approximation", + "track": "main", + "status": "Technical", + "abstract": "We study model-based reinforcement learning (RL) for episodic Markov decision processes (MDP) whose transition probability is parametrized by an unknown transition core with features of state and action. Despite much recent progress in analyzing algorithms in the linear MDP setting, the understanding of more general transition models is very restrictive. In this paper, we propose a provably efficient RL algorithm for the MDP whose state transition is given by a multinomial logistic model. We show that our proposed algorithm based on the upper confidence bounds achieves O(d\u221a(H^3 T)) regret bound where d is the dimension of the transition core, H is the horizon, and T is the total number of steps. To the best of our knowledge, this is the first model-based RL algorithm with multinomial logistic function approximation with provable guarantees. We also comprehensively evaluate our proposed algorithm numerically and show that it consistently outperforms the existing methods, hence achieving both provable efficiency and practical superior performance.", + "primary_area": "machine learning ii", + "author": "Taehyun Hwang; Min-hwan Oh", + "authorids": "", + "aff": "Seoul National University, Seoul, Republic of Korea; Seoul National University, Seoul, Republic of Korea", + "bibtex": "@article{Hwang_Oh_2023, title={Model-Based Reinforcement Learning with Multinomial Logistic Function Approximation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25964}, DOI={10.1609/aaai.v37i7.25964}, abstractNote={We study model-based reinforcement learning (RL) for episodic Markov decision processes (MDP) whose transition probability is parametrized by an unknown transition core with features of state and action. Despite much recent progress in analyzing algorithms in the linear MDP setting, the understanding of more general transition models is very restrictive. In this paper, we propose a provably efficient RL algorithm for the MDP whose state transition is given by a multinomial logistic model. We show that our proposed algorithm based on the upper confidence bounds achieves O(d\u221a(H^3 T)) regret bound where d is the dimension of the transition core, H is the horizon, and T is the total number of steps. To the best of our knowledge, this is the first model-based RL algorithm with multinomial logistic function approximation with provable guarantees. We also comprehensively evaluate our proposed algorithm numerically and show that it consistently outperforms the existing methods, hence achieving both provable efficiency and practical superior performance.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hwang, Taehyun and Oh, Min-hwan}, year={2023}, month={Jun.}, pages={7971-7979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25964/25736", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25964", + "pdf_size": 255563, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15138724746592989171&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Seoul National University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.snu.ac.kr", + "aff_unique_abbr": "SNU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-25776", + "title": "Model-Checking for Ability-Based Logics with Constrained Plans", + "track": "main", + "status": "Technical", + "abstract": "We investigate the complexity of the model-checking problem for a family of modal logics capturing the notion of \u201cknowing how\u201d. We consider the most standard ability-based knowing how logic, for which we show that model-checking is PSpace-complete. By contrast, a multi-agent variant based on an uncertainty relation between plans in which uncertainty is encoded by a regular language, is shown to admit a PTime model-checking problem. We extend with budgets the above-mentioned ability-logics, as done for ATL-like logics. We show that for the former logic enriched with budgets, the complexity increases to at least ExpSpace-hardness, whereas for the latter, the PTime bound is preserved. Other variant logics are discussed along the paper.", + "primary_area": "knowledge representation and reasoning", + "author": "St\u00e9phane Demri; Raul Fervari", + "authorids": "", + "aff": "Universit \u00b4e Paris-Saclay, CNRS, ENS Paris-Saclay, Laboratoire M \u00b4ethodes Formelles, 91190, Gif-Sur-Yvette, France; FAMAF, Universidad Nacional de C \u00b4ordoba & CONICET, Argentina + Guangdong Technion - Israel Institute of Technology, China", + "bibtex": "@article{Demri_Fervari_2023, title={Model-Checking for Ability-Based Logics with Constrained Plans}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25776}, DOI={10.1609/aaai.v37i5.25776}, abstractNote={We investigate the complexity of the model-checking problem for a family of modal logics capturing the notion of \u201cknowing how\u201d. We consider the most standard ability-based knowing how logic, for which we show that model-checking is PSpace-complete. By contrast, a multi-agent variant based on an uncertainty relation between plans in which uncertainty is encoded by a regular language, is shown to admit a PTime model-checking problem. We extend with budgets the above-mentioned ability-logics, as done for ATL-like logics. We show that for the former logic enriched with budgets, the complexity increases to at least ExpSpace-hardness, whereas for the latter, the PTime bound is preserved. Other variant logics are discussed along the paper.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Demri, St\u00e9phane and Fervari, Raul}, year={2023}, month={Jun.}, pages={6305-6312} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25776/25548", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25776", + "pdf_size": 213670, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18174447040391397310&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+2", + "aff_unique_norm": "Universit\u00e9 Paris-Saclay;Universidad Nacional de Cordoba;Guangdong Technion - Israel Institute of Technology", + "aff_unique_dep": "Laboratoire M\u00e9thodes Formelles;FAMAF;", + "aff_unique_url": "https://www.universite-paris-saclay.fr;;", + "aff_unique_abbr": "UPS;;", + "aff_campus_unique_index": "0;", + "aff_campus_unique": "Gif-Sur-Yvette;", + "aff_country_unique_index": "0;1+2", + "aff_country_unique": "France;Argentina;China" + }, + { + "id": "article-25748", + "title": "Modeling Human Trust and Reliance in AI-Assisted Decision Making: A Markovian Approach", + "track": "main", + "status": "Technical", + "abstract": "The increased integration of artificial intelligence (AI) technologies in human workflows has resulted in a new paradigm of AI-assisted decision making,\nin which an AI model provides decision recommendations while humans make the final decisions. To best support humans in decision making, it is critical to obtain a quantitative understanding of how humans interact with and rely on AI. Previous studies often model humans' reliance on AI as an analytical process, i.e., reliance decisions are made based on cost-benefit analysis. However, theoretical models in psychology suggest that the reliance decisions can often be driven by emotions like humans' trust in AI models. In this paper, we propose a hidden Markov model to capture the affective process underlying the human-AI interaction in AI-assisted decision making, by characterizing how decision makers adjust their trust in AI over time and make reliance decisions based on their trust. Evaluations on real human behavior data collected from human-subject experiments show that the proposed model outperforms various baselines in accurately predicting humans' reliance behavior in AI-assisted decision making. Based on the proposed model, we further provide insights into how humans' trust and reliance dynamics in AI-assisted decision making is influenced by contextual factors like decision stakes and their interaction experiences.", + "primary_area": "humans and ai", + "author": "Zhuoyan Li; Zhuoran Lu; Ming Yin", + "authorids": "", + "aff": "Purdue University; Purdue University; Purdue University", + "bibtex": "@article{Li_Lu_Yin_2023, title={Modeling Human Trust and Reliance in AI-Assisted Decision Making: A Markovian Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25748}, DOI={10.1609/aaai.v37i5.25748}, abstractNote={The increased integration of artificial intelligence (AI) technologies in human workflows has resulted in a new paradigm of AI-assisted decision making,\nin which an AI model provides decision recommendations while humans make the final decisions. To best support humans in decision making, it is critical to obtain a quantitative understanding of how humans interact with and rely on AI. Previous studies often model humans\u2019 reliance on AI as an analytical process, i.e., reliance decisions are made based on cost-benefit analysis. However, theoretical models in psychology suggest that the reliance decisions can often be driven by emotions like humans\u2019 trust in AI models. In this paper, we propose a hidden Markov model to capture the affective process underlying the human-AI interaction in AI-assisted decision making, by characterizing how decision makers adjust their trust in AI over time and make reliance decisions based on their trust. Evaluations on real human behavior data collected from human-subject experiments show that the proposed model outperforms various baselines in accurately predicting humans\u2019 reliance behavior in AI-assisted decision making. Based on the proposed model, we further provide insights into how humans\u2019 trust and reliance dynamics in AI-assisted decision making is influenced by contextual factors like decision stakes and their interaction experiences.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zhuoyan and Lu, Zhuoran and Yin, Ming}, year={2023}, month={Jun.}, pages={6056-6064} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25748/25520", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25748", + "pdf_size": 2203449, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10306798818998345095&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "purdue.edu;purdue.edu;purdue.edu", + "email": "purdue.edu;purdue.edu;purdue.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Purdue University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.purdue.edu", + "aff_unique_abbr": "Purdue", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26936", + "title": "Modeling Metacognitive and Cognitive Processes in Data Science Problem Solving (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Data Science (DS) is an interdisciplinary topic that is applicable to many domains. In this preliminary investigation, we use caselet, a mini-version of a case study, as a learning tool to allow students to practice data science problem solving (DSPS). Using a dataset collected from a real-world classroom, we performed correlation analysis to reveal the structure of cognition and metacognition processes. We also explored the similarity of different DS knowledge components based on students\u2019 performance. In addition, we built a predictive model to characterize the relationship between metacognition, cognition, and learning gain.", + "primary_area": "", + "author": "Maryam Alomair; Shimei Pan; Lujie Karen Chen", + "authorids": "", + "aff": "University of Maryland, Baltimore County+King Faisal University; University of Maryland, Baltimore County; University of Maryland, Baltimore County", + "bibtex": "@article{Alomair_Pan_Chen_2024, title={Modeling Metacognitive and Cognitive Processes in Data Science Problem Solving (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26936}, DOI={10.1609/aaai.v37i13.26936}, abstractNote={Data Science (DS) is an interdisciplinary topic that is applicable to many domains. In this preliminary investigation, we use caselet, a mini-version of a case study, as a learning tool to allow students to practice data science problem solving (DSPS). Using a dataset collected from a real-world classroom, we performed correlation analysis to reveal the structure of cognition and metacognition processes. We also explored the similarity of different DS knowledge components based on students\u2019 performance. In addition, we built a predictive model to characterize the relationship between metacognition, cognition, and learning gain.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alomair, Maryam and Pan, Shimei and Chen, Lujie Karen}, year={2024}, month={Jul.}, pages={16152-16153} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26936/26708", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26936", + "pdf_size": 3507811, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:lPXnqeVukUYJ:scholar.google.com/&scioq=Modeling+Metacognitive+and+Cognitive+Processes+in+Data+Science+Problem+Solving+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "umbc.edu;umbc.edu;umbc.edu", + "email": "umbc.edu;umbc.edu;umbc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "University of Maryland, Baltimore County;King Faisal University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.umbc.edu;https://www.kfu.edu.sa", + "aff_unique_abbr": "UMBC;KFU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Baltimore County;", + "aff_country_unique_index": "0+1;0;0", + "aff_country_unique": "United States;Saudi Arabia" + }, + { + "id": "article-26915", + "title": "Modeling Strategies as Programs: How to Study Strategy Differences in Intelligent Systems with Program Synthesis", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "When faced with novel tasks, humans have the ability to form successful strategies, seemingly without much effort. Artificial systems, on the other, hand cannot, at least when the flexibility at which humans perform is considered. For my dissertation, I am using program synthesis as a tool to study the factors that affect strategy choices in intelligent systems. I am evaluating my work through agents that reason through problems from the Abstract Reasoning Corpus and The Block Design Task.", + "primary_area": "", + "author": "James Ainooson", + "authorids": "", + "aff": "Department of Computer Science, Vanderbilt University", + "bibtex": "@article{Ainooson_2024, title={Modeling Strategies as Programs: How to Study Strategy Differences in Intelligent Systems with Program Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26915}, DOI={10.1609/aaai.v37i13.26915}, abstractNote={When faced with novel tasks, humans have the ability to form successful strategies, seemingly without much effort. Artificial systems, on the other, hand cannot, at least when the flexibility at which humans perform is considered. For my dissertation, I am using program synthesis as a tool to study the factors that affect strategy choices in intelligent systems. I am evaluating my work through agents that reason through problems from the Abstract Reasoning Corpus and The Block Design Task.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ainooson, James}, year={2024}, month={Jul.}, pages={16109-16110} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26915/26687", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26915", + "pdf_size": 57599, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:u7nAhiytEkcJ:scholar.google.com/&scioq=Modeling+Strategies+as+Programs:+How+to+Study+Strategy+Differences+in+Intelligent+Systems+with+Program+Synthesis&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "vanderbilt.edu", + "email": "vanderbilt.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Vanderbilt University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.vanderbilt.edu", + "aff_unique_abbr": "Vanderbilt", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26241", + "title": "Models as Agents: Optimizing Multi-Step Predictions of Interactive Local Models in Model-Based Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Research in model-based reinforcement learning has made significant progress in recent years. Compared to single-agent settings, the exponential dimension growth of the joint state-action space in multi-agent systems dramatically increases the complexity of the environment dynamics, which makes it infeasible to learn an accurate global model and thus necessitates the use of agent-wise local models. However, during multi-step model rollouts, the prediction of one local model can affect the predictions of other local models in the next step. As a result, local prediction errors can be propagated to other localities and eventually give rise to considerably large global errors. Furthermore, since the models are generally used to predict for multiple steps, simply minimizing one-step prediction errors regardless of their long-term effect on other models may further aggravate the propagation of local errors. To this end, we propose Models as AGents (MAG), a multi-agent model optimization framework that reversely treats the local models as multi-step decision making agents and the current policies as the dynamics during the model rollout process. In this way, the local models are able to consider the multi-step mutual affect between each other before making predictions. Theoretically, we show that the objective of MAG is approximately equivalent to maximizing a lower bound of the true environment return. Experiments on the challenging StarCraft II benchmark demonstrate the effectiveness of MAG.", + "primary_area": "machine learning iv", + "author": "Zifan Wu; Chao Yu; Chen Chen; Jianye Hao; Hankz Hankui Zhuo", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, Guangdong, China+Pengcheng Laboratory, Shenzhen, Guangdong, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, Guangdong, China+Pengcheng Laboratory, Shenzhen, Guangdong, China; Huawei Noah\u2019s Ark Lab, Beijing, China; Huawei Noah\u2019s Ark Lab, Beijing, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, Guangdong, China", + "bibtex": "@article{Wu_Yu_Chen_Hao_Zhuo_2023, title={Models as Agents: Optimizing Multi-Step Predictions of Interactive Local Models in Model-Based Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26241}, DOI={10.1609/aaai.v37i9.26241}, abstractNote={Research in model-based reinforcement learning has made significant progress in recent years. Compared to single-agent settings, the exponential dimension growth of the joint state-action space in multi-agent systems dramatically increases the complexity of the environment dynamics, which makes it infeasible to learn an accurate global model and thus necessitates the use of agent-wise local models. However, during multi-step model rollouts, the prediction of one local model can affect the predictions of other local models in the next step. As a result, local prediction errors can be propagated to other localities and eventually give rise to considerably large global errors. Furthermore, since the models are generally used to predict for multiple steps, simply minimizing one-step prediction errors regardless of their long-term effect on other models may further aggravate the propagation of local errors. To this end, we propose Models as AGents (MAG), a multi-agent model optimization framework that reversely treats the local models as multi-step decision making agents and the current policies as the dynamics during the model rollout process. In this way, the local models are able to consider the multi-step mutual affect between each other before making predictions. Theoretically, we show that the objective of MAG is approximately equivalent to maximizing a lower bound of the true environment return. Experiments on the challenging StarCraft II benchmark demonstrate the effectiveness of MAG.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Zifan and Yu, Chao and Chen, Chen and Hao, Jianye and Zhuo, Hankz Hankui}, year={2023}, month={Jun.}, pages={10435-10443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26241/26013", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26241", + "pdf_size": 697706, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12894730231218263772&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;163.com;huawei.com", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn;163.com;huawei.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;2;0", + "aff_unique_norm": "Sun Yat-sen University;Pengcheng Laboratory;Huawei Noah\u2019s Ark Lab", + "aff_unique_dep": "School of Computer Science and Engineering;;", + "aff_unique_url": "http://www.sysu.edu.cn;;https://www.huawei.com/en/ai/noahs-ark-lab", + "aff_unique_abbr": "SYSU;;HNA Lab", + "aff_campus_unique_index": "0+1;0+1;2;2;0", + "aff_campus_unique": "Guangzhou;Shenzhen;Beijing", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25662", + "title": "Molformer: Motif-Based Transformer on 3D Heterogeneous Molecular Graphs", + "track": "main", + "status": "Technical", + "abstract": "Procuring expressive molecular representations underpins AI-driven molecule design and scientific discovery. The research mainly focuses on atom-level homogeneous molecular graphs, ignoring the rich information in subgraphs or motifs. However, it has been widely accepted that substructures play a dominant role in identifying and determining molecular properties. To address such issues, we formulate heterogeneous molecular graphs (HMGs) and introduce a novel architecture to exploit both molecular motifs and 3D geometry. Precisely, we extract functional groups as motifs for small molecules and employ reinforcement learning to adaptively select quaternary amino acids as motif candidates for proteins. Then HMGs are constructed with both atom-level and motif-level nodes. To better accommodate those HMGs, we introduce a variant of the Transformer named Molformer, which adopts a heterogeneous self-attention layer to distinguish the interactions between multi-level nodes. Besides, it is also coupled with a multi-scale mechanism to capture fine-grained local patterns with increasing contextual scales. An attentive farthest point sampling algorithm is also proposed to obtain the molecular representations. We validate Molformer across a broad range of domains, including quantum chemistry, physiology, and biophysics. Extensive experiments show that Molformer outperforms or achieves the comparable performance of several state-of-the-art baselines. Our work provides a promising way to utilize informative motifs from the perspective of multi-level graph construction. The code is available at https://github.com/smiles724/Molformer.", + "primary_area": "domain s of application", + "author": "Fang Wu; Dragomir Radev; Stan Z. Li", + "authorids": "", + "aff": "School of Engineering, Westlake University + Institute of AI Industry Research, Tsinghua University; Department of Computer Science, Yale University; School of Engineering, Westlake University", + "bibtex": "@article{Wu_Radev_Li_2023, title={Molformer: Motif-Based Transformer on 3D Heterogeneous Molecular Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25662}, DOI={10.1609/aaai.v37i4.25662}, abstractNote={Procuring expressive molecular representations underpins AI-driven molecule design and scientific discovery. The research mainly focuses on atom-level homogeneous molecular graphs, ignoring the rich information in subgraphs or motifs. However, it has been widely accepted that substructures play a dominant role in identifying and determining molecular properties. To address such issues, we formulate heterogeneous molecular graphs (HMGs) and introduce a novel architecture to exploit both molecular motifs and 3D geometry. Precisely, we extract functional groups as motifs for small molecules and employ reinforcement learning to adaptively select quaternary amino acids as motif candidates for proteins. Then HMGs are constructed with both atom-level and motif-level nodes. To better accommodate those HMGs, we introduce a variant of the Transformer named Molformer, which adopts a heterogeneous self-attention layer to distinguish the interactions between multi-level nodes. Besides, it is also coupled with a multi-scale mechanism to capture fine-grained local patterns with increasing contextual scales. An attentive farthest point sampling algorithm is also proposed to obtain the molecular representations. We validate Molformer across a broad range of domains, including quantum chemistry, physiology, and biophysics. Extensive experiments show that Molformer outperforms or achieves the comparable performance of several state-of-the-art baselines. Our work provides a promising way to utilize informative motifs from the perspective of multi-level graph construction. The code is available at https://github.com/smiles724/Molformer.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Fang and Radev, Dragomir and Li, Stan Z.}, year={2023}, month={Jun.}, pages={5312-5320} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25662/25434", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25662", + "pdf_size": 314923, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13177322193979222715&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "columbia.edu;yale.edu;westlake.edu.cn", + "email": "columbia.edu;yale.edu;westlake.edu.cn", + "github": "https://github.com/smiles724/Molformer", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0", + "aff_unique_norm": "Westlake University;Tsinghua University;Yale University", + "aff_unique_dep": "School of Engineering;Institute of AI Industry Research;Department of Computer Science", + "aff_unique_url": "https://www.westlake.edu.cn;https://www.tsinghua.edu.cn;https://www.yale.edu", + "aff_unique_abbr": ";THU;Yale", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25781", + "title": "Monitoring Arithmetic Temporal Properties on Finite Traces", + "track": "main", + "status": "Technical", + "abstract": "We study monitoring of linear-time arithmetic properties against finite traces generated by an unknown dynamic system. The monitoring state is determined by considering at once the trace prefix seen so far, and all its possible finite-length, future continuations. This makes monitoring at least as hard as satisfiability and validity. Traces consist of finite sequences of assignments of a fixed set of variables to numerical values. Properties are specified in a logic we call ALTLf, combining LTLf (LTL on finite traces) with linear arithmetic constraints that may carry lookahead, i.e., variables may be compared over multiple instants of the trace. While the monitoring problem for this setting is undecidable in general, we show decidability for (a) properties without lookahead, and (b) properties with lookahead that satisfy the abstract, semantic condition of finite summary, studied before in the context of model checking. We then single out concrete, practically relevant classes of constraints guaranteeing finite summary. Feasibility is witnessed by a prototype implementation.", + "primary_area": "knowledge representation and reasoning", + "author": "Paolo Felli; Marco Montali; Fabio Patrizi; Sarah Winkler", + "authorids": "", + "aff": "Universit `a di Bologna \u2013 Bologna \u2013 Italy; Free University of Bozen-Bolzano \u2013 Bolzano \u2013 Italy; Sapienza Universit `a di Roma \u2013 Rome \u2013 Italy; Free University of Bozen-Bolzano \u2013 Bolzano \u2013 Italy", + "bibtex": "@article{Felli_Montali_Patrizi_Winkler_2023, title={Monitoring Arithmetic Temporal Properties on Finite Traces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25781}, DOI={10.1609/aaai.v37i5.25781}, abstractNote={We study monitoring of linear-time arithmetic properties against finite traces generated by an unknown dynamic system. The monitoring state is determined by considering at once the trace prefix seen so far, and all its possible finite-length, future continuations. This makes monitoring at least as hard as satisfiability and validity. Traces consist of finite sequences of assignments of a fixed set of variables to numerical values. Properties are specified in a logic we call ALTLf, combining LTLf (LTL on finite traces) with linear arithmetic constraints that may carry lookahead, i.e., variables may be compared over multiple instants of the trace. While the monitoring problem for this setting is undecidable in general, we show decidability for (a) properties without lookahead, and (b) properties with lookahead that satisfy the abstract, semantic condition of finite summary, studied before in the context of model checking. We then single out concrete, practically relevant classes of constraints guaranteeing finite summary. Feasibility is witnessed by a prototype implementation.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Felli, Paolo and Montali, Marco and Patrizi, Fabio and Winkler, Sarah}, year={2023}, month={Jun.}, pages={6346-6354} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25781/25553", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25781", + "pdf_size": 264183, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10685541502242359969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 15, + "aff_domain": "unibo.it;inf.unibz.it;diag.uniroma1.it;inf.unibz.it", + "email": "unibo.it;inf.unibz.it;diag.uniroma1.it;inf.unibz.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Universit\u00e0 di Bologna;Free University of Bozen-Bolzano;Sapienza University of Rome", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unibo.it;https://www.unibz.it;https://www.uniroma1.it", + "aff_unique_abbr": "Unibo;UNIBZ;Sapienza", + "aff_campus_unique_index": "0;1;2;1", + "aff_campus_unique": "Bologna;Bolzano;Rome", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26755", + "title": "Monitoring Model Deterioration with Explainable Uncertainty Estimation via Non-parametric Bootstrap", + "track": "aaai special track", + "status": "Technical", + "abstract": "Monitoring machine learning models once they are deployed\nis challenging. It is even more challenging to decide when\nto retrain models in real-case scenarios when labeled data is\nbeyond reach, and monitoring performance metrics becomes\nunfeasible. In this work, we use non-parametric bootstrapped\nuncertainty estimates and SHAP values to provide explainable\nuncertainty estimation as a technique that aims to monitor\nthe deterioration of machine learning models in deployment\nenvironments, as well as determine the source of model deteri-\noration when target labels are not available. Classical methods\nare purely aimed at detecting distribution shift, which can lead\nto false positives in the sense that the model has not deterio-\nrated despite a shift in the data distribution. To estimate model\nuncertainty we construct prediction intervals using a novel\nbootstrap method, which improves previous state-of-the-art\nwork. We show that both our model deterioration detection\nsystem as well as our uncertainty estimation method achieve\nbetter performance than the current state-of-the-art. Finally,\nwe use explainable AI techniques to gain an understanding\nof the drivers of model deterioration. We release an open\nsource Python package, doubt, which implements our pro-\nposed methods, as well as the code used to reproduce our\nexperiments.", + "primary_area": "safe and robust ai", + "author": "Carlos Mougan; Dan Saattrup Nielsen", + "authorids": "", + "aff": "University of Southampton, United Kingdom; The Alexandra Institute, Denmark", + "bibtex": "@article{Mougan_Nielsen_2023, title={Monitoring Model Deterioration with Explainable Uncertainty Estimation via Non-parametric Bootstrap}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26755}, DOI={10.1609/aaai.v37i12.26755}, abstractNote={Monitoring machine learning models once they are deployed\nis challenging. It is even more challenging to decide when\nto retrain models in real-case scenarios when labeled data is\nbeyond reach, and monitoring performance metrics becomes\nunfeasible. In this work, we use non-parametric bootstrapped\nuncertainty estimates and SHAP values to provide explainable\nuncertainty estimation as a technique that aims to monitor\nthe deterioration of machine learning models in deployment\nenvironments, as well as determine the source of model deteri-\noration when target labels are not available. Classical methods\nare purely aimed at detecting distribution shift, which can lead\nto false positives in the sense that the model has not deterio-\nrated despite a shift in the data distribution. To estimate model\nuncertainty we construct prediction intervals using a novel\nbootstrap method, which improves previous state-of-the-art\nwork. We show that both our model deterioration detection\nsystem as well as our uncertainty estimation method achieve\nbetter performance than the current state-of-the-art. Finally,\nwe use explainable AI techniques to gain an understanding\nof the drivers of model deterioration. We release an open\nsource Python package, doubt, which implements our pro-\nposed methods, as well as the code used to reproduce our\nexperiments.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mougan, Carlos and Nielsen, Dan Saattrup}, year={2023}, month={Jun.}, pages={15037-15045} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26755/26527", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26755", + "pdf_size": 387397, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15302043297049039968&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "southampton.ac.uk;alexandra.dk", + "email": "southampton.ac.uk;alexandra.dk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Southampton;The Alexandra Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.southampton.ac.uk;https://www.alexandra.dk", + "aff_unique_abbr": "Southampton;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United Kingdom;Denmark" + }, + { + "id": "article-26817", + "title": "Monitoring and Intervening on Large Populations of Weakly Coupled Processes with Social Impact Applications", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Many real-world sequential decision problems can be decomposed into processes with independent dynamics that are coupled via the action structure. We discuss recent work on such problems and future directions.", + "primary_area": "", + "author": "Andrew Perrault", + "authorids": "", + "aff": "The Ohio State University", + "bibtex": "@article{Perrault_2024, title={Monitoring and Intervening on Large Populations of Weakly Coupled Processes with Social Impact Applications}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26817}, DOI={10.1609/aaai.v37i13.26817}, abstractNote={Many real-world sequential decision problems can be decomposed into processes with independent dynamics that are coupled via the action structure. We discuss recent work on such problems and future directions.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Perrault, Andrew}, year={2024}, month={Jul.}, pages={15450-15450} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26817/26589", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26817", + "pdf_size": 48322, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:SHEIieTWSKMJ:scholar.google.com/&scioq=Monitoring+and+Intervening+on+Large+Populations+of+Weakly+Coupled+Processes+with+Social+Impact+Applications&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "osu.edu", + "email": "osu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "The Ohio State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.osu.edu", + "aff_unique_abbr": "OSU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25739", + "title": "Moral Machine or Tyranny of the Majority?", + "track": "main", + "status": "Technical", + "abstract": "With artificial intelligence systems increasingly applied in consequential domains, researchers have begun to ask how AI systems ought to act in ethically charged situations where even humans lack consensus. In the Moral Machine project, researchers crowdsourced answers to \"Trolley Problems\" concerning autonomous vehicles. Subsequently, Noothigattu et al. (2018) proposed inferring linear functions that approximate each individual's preferences and aggregating these linear models by averaging parameters across the population. In this paper, we examine this averaging mechanism, focusing on fairness concerns and strategic effects. We investigate a simple setting where the population consists of two groups, the minority constitutes an \u03b1 < 0.5 share of the population, and within-group preferences are homogeneous. Focusing on the fraction of contested cases where the minority group prevails, we make the following observations: (a) even when all parties report their preferences truthfully, the fraction of disputes where the minority prevails is less than proportionate in \u03b1; (b) the degree of sub-proportionality grows more severe as the level of disagreement between the groups increases; (c) when parties report preferences strategically, pure strategy equilibria do not always exist; and (d) whenever a pure strategy equilibrium exists, the majority group prevails 100% of the time. These findings raise concerns about stability and fairness of averaging as a mechanism for aggregating diverging voices. Finally, we discuss alternatives, including randomized dictatorship and median-based mechanisms.", + "primary_area": "humans and ai", + "author": "Michael Feffer; Hoda Heidari; Zachary C. Lipton", + "authorids": "", + "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", + "bibtex": "@article{Feffer_Heidari_Lipton_2023, title={Moral Machine or Tyranny of the Majority?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25739}, DOI={10.1609/aaai.v37i5.25739}, abstractNote={With artificial intelligence systems increasingly applied in consequential domains, researchers have begun to ask how AI systems ought to act in ethically charged situations where even humans lack consensus. In the Moral Machine project, researchers crowdsourced answers to "Trolley Problems" concerning autonomous vehicles. Subsequently, Noothigattu et al. (2018) proposed inferring linear functions that approximate each individual\u2019s preferences and aggregating these linear models by averaging parameters across the population. In this paper, we examine this averaging mechanism, focusing on fairness concerns and strategic effects. We investigate a simple setting where the population consists of two groups, the minority constitutes an \u03b1 < 0.5 share of the population, and within-group preferences are homogeneous. Focusing on the fraction of contested cases where the minority group prevails, we make the following observations: (a) even when all parties report their preferences truthfully, the fraction of disputes where the minority prevails is less than proportionate in \u03b1; (b) the degree of sub-proportionality grows more severe as the level of disagreement between the groups increases; (c) when parties report preferences strategically, pure strategy equilibria do not always exist; and (d) whenever a pure strategy equilibrium exists, the majority group prevails 100% of the time. These findings raise concerns about stability and fairness of averaging as a mechanism for aggregating diverging voices. Finally, we discuss alternatives, including randomized dictatorship and median-based mechanisms.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feffer, Michael and Heidari, Hoda and Lipton, Zachary C.}, year={2023}, month={Jun.}, pages={5974-5982} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25739/25511", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25739", + "pdf_size": 237306, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2632540756335760201&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25761", + "title": "Moving-Landmark Assisted Distributed Learning Based Decentralized Cooperative Localization (DL-DCL) with Fault Tolerance", + "track": "main", + "status": "Technical", + "abstract": "This paper considers the problem of cooperative localization of multiple robots under uncertainty, communicating over a partially connected, dynamic communication network and assisted by an agile landmark. Each robot owns an IMU and a relative pose sensing suite, which can get faulty due to system or environmental uncertainty, and therefore exhibit large bias in their estimation output. For the robots to localize accurately under sensor failure and system or environmental uncertainty, a novel Distributed Learning based Decentralized Cooperative Localization (DL-DCL) algorithm is proposed that involves real-time learning of an information fusion strategy by each robot for combining pose estimates from its own sensors as well as from those of its neighboring robots, and utilizing the moving landmark's pose information as a feedback to the learning process. Convergence analysis shows that the learning process converges exponentially under certain reasonable assumptions. Simulations involving sensor failures inducing around 40-60 times increase in the nominal bias show DL-DCL's estimation performance to be approximately 40% better than the well-known covariance-based estimate fusion methods. For the evaluation of DL-DCL's implementability and fault-tolerance capability in practice, a high-fidelity simulation is carried out in Gazebo with ROS2.", + "primary_area": "intelligent robotics", + "author": "Shubhankar Gupta; Suresh Sundaram", + "authorids": "", + "aff": "Artificial Intelligence and Robotics Lab (AIRL), Department of Aerospace Engineering, Indian Institute of Science, Bengaluru, Karnataka, India; Artificial Intelligence and Robotics Lab (AIRL), Department of Aerospace Engineering, Indian Institute of Science, Bengaluru, Karnataka, India", + "bibtex": "@article{Gupta_Sundaram_2023, title={Moving-Landmark Assisted Distributed Learning Based Decentralized Cooperative Localization (DL-DCL) with Fault Tolerance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25761}, DOI={10.1609/aaai.v37i5.25761}, abstractNote={This paper considers the problem of cooperative localization of multiple robots under uncertainty, communicating over a partially connected, dynamic communication network and assisted by an agile landmark. Each robot owns an IMU and a relative pose sensing suite, which can get faulty due to system or environmental uncertainty, and therefore exhibit large bias in their estimation output. For the robots to localize accurately under sensor failure and system or environmental uncertainty, a novel Distributed Learning based Decentralized Cooperative Localization (DL-DCL) algorithm is proposed that involves real-time learning of an information fusion strategy by each robot for combining pose estimates from its own sensors as well as from those of its neighboring robots, and utilizing the moving landmark\u2019s pose information as a feedback to the learning process. Convergence analysis shows that the learning process converges exponentially under certain reasonable assumptions. Simulations involving sensor failures inducing around 40-60 times increase in the nominal bias show DL-DCL\u2019s estimation performance to be approximately 40% better than the well-known covariance-based estimate fusion methods. For the evaluation of DL-DCL\u2019s implementability and fault-tolerance capability in practice, a high-fidelity simulation is carried out in Gazebo with ROS2.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gupta, Shubhankar and Sundaram, Suresh}, year={2023}, month={Jun.}, pages={6175-6182} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25761/25533", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25761", + "pdf_size": 511154, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9310795845927867319&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "iisc.ac.in;iisc.ac.in", + "email": "iisc.ac.in;iisc.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indian Institute of Science", + "aff_unique_dep": "Department of Aerospace Engineering", + "aff_unique_url": "https://www.iisc.ac.in", + "aff_unique_abbr": "IISc", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Bengaluru", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26850", + "title": "MuMIC \u2013 Multimodal Embedding for Multi-Label Image Classification with Tempered Sigmoid", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Multi-label image classification is a foundational topic in various domains. Multimodal learning approaches have recently achieved outstanding results in image representation and single-label image classification. For instance, Contrastive Language-Image Pretraining (CLIP) demonstrates impressive image-text representation learning abilities and is robust to natural distribution shifts. This success inspires us to leverage multimodal learning for multi-label classification tasks, and benefit from contrastively learnt pretrained models.\n\nWe propose the Multimodal Multi-label Image Classification (MuMIC) framework, which utilizes a hardness-aware tempered sigmoid based Binary Cross Entropy loss function, thus enables the optimization on multi-label objectives and transfer learning on CLIP. MuMIC is capable of providing high classification performance, handling real-world noisy data, supporting zero-shot predictions, and producing domain-specific image embeddings.\n\nIn this study, a total of 120 image classes are defined, and more than 140K positive annotations are collected on approximately 60K Booking.com images. The final MuMIC model is deployed on Booking.com Content Intelligence Platform, and it outperforms other state-of-the-art models with 85.6% GAP@10 and 83.8% GAP on all 120 classes, as well as a 90.1% macro mAP score across 32 majority classes. We summarize the modelling choices which are extensively tested through ablation studies. To the best of our knowledge, we are the first to adapt contrastively learnt multimodal pretraining for real-world multi-label image classification problems, and the innovation can be transferred to other domains.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Fengjun Wang; Sarai Mizrachi; Moran Beladev; Guy Nadav; Gil Amsalem; Karen Lastmann Assaraf; Hadas Harush Boker", + "authorids": "", + "aff": "Booking.com; Booking.com; Booking.com; Booking.com; Booking.com; Booking.com; Booking.com", + "bibtex": "@article{Wang_Mizrachi_Beladev_Nadav_Amsalem_Lastmann Assaraf_Harush Boker_2024, title={MuMIC \u2013 Multimodal Embedding for Multi-Label Image Classification with Tempered Sigmoid}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26850}, DOI={10.1609/aaai.v37i13.26850}, abstractNote={Multi-label image classification is a foundational topic in various domains. Multimodal learning approaches have recently achieved outstanding results in image representation and single-label image classification. For instance, Contrastive Language-Image Pretraining (CLIP) demonstrates impressive image-text representation learning abilities and is robust to natural distribution shifts. This success inspires us to leverage multimodal learning for multi-label classification tasks, and benefit from contrastively learnt pretrained models. We propose the Multimodal Multi-label Image Classification (MuMIC) framework, which utilizes a hardness-aware tempered sigmoid based Binary Cross Entropy loss function, thus enables the optimization on multi-label objectives and transfer learning on CLIP. MuMIC is capable of providing high classification performance, handling real-world noisy data, supporting zero-shot predictions, and producing domain-specific image embeddings. In this study, a total of 120 image classes are defined, and more than 140K positive annotations are collected on approximately 60K Booking.com images. The final MuMIC model is deployed on Booking.com Content Intelligence Platform, and it outperforms other state-of-the-art models with 85.6% GAP@10 and 83.8% GAP on all 120 classes, as well as a 90.1% macro mAP score across 32 majority classes. We summarize the modelling choices which are extensively tested through ablation studies. To the best of our knowledge, we are the first to adapt contrastively learnt multimodal pretraining for real-world multi-label image classification problems, and the innovation can be transferred to other domains.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Fengjun and Mizrachi, Sarai and Beladev, Moran and Nadav, Guy and Amsalem, Gil and Lastmann Assaraf, Karen and Harush Boker, Hadas}, year={2024}, month={Jul.}, pages={15603-15611} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26850/26622", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26850", + "pdf_size": 5814372, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12351949621702152595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "booking.com;booking.com;booking.com;booking.com;booking.com;booking.com;booking.com", + "email": "booking.com;booking.com;booking.com;booking.com;booking.com;booking.com;booking.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Booking.com", + "aff_unique_dep": "", + "aff_unique_url": "https://www.booking.com", + "aff_unique_abbr": "Booking", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "article-25471", + "title": "MulGT: Multi-Task Graph-Transformer with Task-Aware Knowledge Injection and Domain Knowledge-Driven Pooling for Whole Slide Image Analysis", + "track": "main", + "status": "Technical", + "abstract": "Whole slide image (WSI) has been widely used to assist automated diagnosis under the deep learning fields. However, most previous works only discuss the SINGLE task setting which is not aligned with real clinical setting, where pathologists often conduct multiple diagnosis tasks simultaneously. Also, it is commonly recognized that the multi-task learning paradigm can improve learning efficiency by exploiting commonalities and differences across multiple tasks. To this end, we present a novel multi-task framework (i.e., MulGT) for WSI analysis by the specially designed Graph-Transformer equipped with Task-aware Knowledge Injection and Domain Knowledge-driven Graph Pooling modules. Basically, with the Graph Neural Network and Transformer as the building commons, our framework is able to learn task-agnostic low-level local information as well as task-specific high-level global representation. Considering that different tasks in WSI analysis depend on different features and properties, we also design a novel Task-aware Knowledge Injection module to transfer the task-shared graph embedding into task-specific feature spaces to learn more accurate representation for different tasks. Further, we elaborately design a novel Domain Knowledge-driven Graph Pooling module for each task to improve both the accuracy and robustness of different tasks by leveraging different diagnosis patterns of multiple tasks. We evaluated our method on two public WSI datasets from TCGA projects, i.e., esophageal carcinoma and kidney carcinoma. Experimental results show that our method outperforms single-task counterparts and the state-of-theart methods on both tumor typing and staging tasks.", + "primary_area": "computer vision iii", + "author": "Weiqin Zhao; Shujun Wang; Maximus Yeung; Tianye Niu; Lequan Yu", + "authorids": "", + "aff": "The University of Hong Kong, Hong Kong SAR, China; University of Cambridge, Cambridge, UK; The University of Hong Kong, Hong Kong SAR, China + Shenzhen Bay Laboratory, Shenzhen, China; Shenzhen Bay Laboratory, Shenzhen, China; The University of Hong Kong, Hong Kong SAR, China", + "bibtex": "@article{Zhao_Wang_Yeung_Niu_Yu_2023, title={MulGT: Multi-Task Graph-Transformer with Task-Aware Knowledge Injection and Domain Knowledge-Driven Pooling for Whole Slide Image Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25471}, DOI={10.1609/aaai.v37i3.25471}, abstractNote={Whole slide image (WSI) has been widely used to assist automated diagnosis under the deep learning fields. However, most previous works only discuss the SINGLE task setting which is not aligned with real clinical setting, where pathologists often conduct multiple diagnosis tasks simultaneously. Also, it is commonly recognized that the multi-task learning paradigm can improve learning efficiency by exploiting commonalities and differences across multiple tasks. To this end, we present a novel multi-task framework (i.e., MulGT) for WSI analysis by the specially designed Graph-Transformer equipped with Task-aware Knowledge Injection and Domain Knowledge-driven Graph Pooling modules. Basically, with the Graph Neural Network and Transformer as the building commons, our framework is able to learn task-agnostic low-level local information as well as task-specific high-level global representation. Considering that different tasks in WSI analysis depend on different features and properties, we also design a novel Task-aware Knowledge Injection module to transfer the task-shared graph embedding into task-specific feature spaces to learn more accurate representation for different tasks. Further, we elaborately design a novel Domain Knowledge-driven Graph Pooling module for each task to improve both the accuracy and robustness of different tasks by leveraging different diagnosis patterns of multiple tasks. We evaluated our method on two public WSI datasets from TCGA projects, i.e., esophageal carcinoma and kidney carcinoma. Experimental results show that our method outperforms single-task counterparts and the state-of-theart methods on both tumor typing and staging tasks.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Weiqin and Wang, Shujun and Yeung, Maximus and Niu, Tianye and Yu, Lequan}, year={2023}, month={Jun.}, pages={3606-3614} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25471/25243", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25471", + "pdf_size": 1196540, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12163985597463587960&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 9, + "aff_domain": "connect.hku.hk;cam.ac.uk;pathology.hku.hk;szbl.ac.cn;hku.hk", + "email": "connect.hku.hk;cam.ac.uk;pathology.hku.hk;szbl.ac.cn;hku.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0+2;2;0", + "aff_unique_norm": "The University of Hong Kong;University of Cambridge;Shenzhen Bay Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.hku.hk;https://www.cam.ac.uk;", + "aff_unique_abbr": "HKU;Cambridge;", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1;0+0;0;0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26636", + "title": "Multi-Action Dialog Policy Learning from Logged User Feedback", + "track": "main", + "status": "Technical", + "abstract": "Multi-action dialog policy (MADP), which generates multiple atomic dialog actions per turn, has been widely applied in task-oriented dialog systems to provide expressive and efficient system responses. Existing MADP models usually imitate action combinations from the labeled multi-action dialog samples. Due to data limitations, they generalize poorly toward unseen dialog flows. While reinforcement learning-based methods are proposed to incorporate the service ratings from real users and user simulators as external supervision signals, they suffer from sparse and less credible dialog-level rewards. To cope with this problem, we explore to improve MADPL with explicit and implicit turn-level user feedback received for historical predictions (i.e., logged user feedback) that are cost-efficient to collect and faithful to real-world scenarios. The task is challenging since the logged user feedback provides only partial label feedback limited to the particular historical dialog actions predicted by the agent. To fully exploit such feedback information, we propose BanditMatch, which addresses the task from a feedback-enhanced semi-supervised learning perspective with a hybrid learning objective of SSL and bandit learning. BanditMatch integrates pseudo-labeling methods to better explore the action space through constructing full label feedback. Extensive experiments show that our BanditMatch improves MADPL over the state-of-the-art methods by generating more concise and informative responses. The source code and the appendix of this paper can be obtained from https://github.com/ShuoZhangXJTU/BanditMatch.", + "primary_area": "speech natural language processing", + "author": "Shuo Zhang; Junzhou Zhao; Pinghui Wang; Tianxiang Wang; Zi Liang; Jing Tao; Yi Huang; Junlan Feng", + "authorids": "", + "aff": "MOE KLINNS Lab, Xi\u2019an Jiaotong University; MOE KLINNS Lab, Xi\u2019an Jiaotong University; MOE KLINNS Lab, Xi\u2019an Jiaotong University; MOE KLINNS Lab, Xi\u2019an Jiaotong University; MOE KLINNS Lab, Xi\u2019an Jiaotong University; MOE KLINNS Lab, Xi\u2019an Jiaotong University; JIUTIAN Team, China Mobile Research; JIUTIAN Team, China Mobile Research", + "bibtex": "@article{Zhang_Zhao_Wang_Wang_Liang_Tao_Huang_Feng_2023, title={Multi-Action Dialog Policy Learning from Logged User Feedback}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26636}, DOI={10.1609/aaai.v37i11.26636}, abstractNote={Multi-action dialog policy (MADP), which generates multiple atomic dialog actions per turn, has been widely applied in task-oriented dialog systems to provide expressive and efficient system responses. Existing MADP models usually imitate action combinations from the labeled multi-action dialog samples. Due to data limitations, they generalize poorly toward unseen dialog flows. While reinforcement learning-based methods are proposed to incorporate the service ratings from real users and user simulators as external supervision signals, they suffer from sparse and less credible dialog-level rewards. To cope with this problem, we explore to improve MADPL with explicit and implicit turn-level user feedback received for historical predictions (i.e., logged user feedback) that are cost-efficient to collect and faithful to real-world scenarios. The task is challenging since the logged user feedback provides only partial label feedback limited to the particular historical dialog actions predicted by the agent. To fully exploit such feedback information, we propose BanditMatch, which addresses the task from a feedback-enhanced semi-supervised learning perspective with a hybrid learning objective of SSL and bandit learning. BanditMatch integrates pseudo-labeling methods to better explore the action space through constructing full label feedback. Extensive experiments show that our BanditMatch improves MADPL over the state-of-the-art methods by generating more concise and informative responses. The source code and the appendix of this paper can be obtained from https://github.com/ShuoZhangXJTU/BanditMatch.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Shuo and Zhao, Junzhou and Wang, Pinghui and Wang, Tianxiang and Liang, Zi and Tao, Jing and Huang, Yi and Feng, Junlan}, year={2023}, month={Jun.}, pages={13976-13983} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26636/26408", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26636", + "pdf_size": 656443, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=958239906853575077&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;stu.xjtu.edu.cn;mail.xjtu.edu.cn;chinamobile.com;chinamobile.com", + "email": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;mail.xjtu.edu.cn;stu.xjtu.edu.cn;mail.xjtu.edu.cn;chinamobile.com;chinamobile.com", + "github": "https://github.com/ShuoZhangXJTU/BanditMatch", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;1;1", + "aff_unique_norm": "Xi'an Jiaotong University;China Mobile Research Institute", + "aff_unique_dep": "MOE KLINNS Lab;JIUTIAN Team", + "aff_unique_url": "http://www.xjtu.edu.cn;https://www.chinamobile.com/", + "aff_unique_abbr": "XJTU;CMRI", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25803", + "title": "Multi-Aspect Explainable Inductive Relation Prediction by Sentence Transformer", + "track": "main", + "status": "Technical", + "abstract": "Recent studies on knowledge graphs (KGs) show that path-based methods empowered by pre-trained language models perform well in the provision of inductive and explainable relation predictions. In this paper, we introduce the concepts of relation path coverage and relation path confidence to filter out unreliable paths prior to model training to elevate the model performance. Moreover, we propose Knowledge Reasoning Sentence Transformer (KRST) to predict inductive relations in KGs. KRST is designed to encode the extracted reliable paths in KGs, allowing us to properly cluster paths and provide multi-aspect explanations. We conduct extensive experiments on three real-world datasets. The experimental results show that compared to SOTA models, KRST achieves the best performance in most transductive and inductive test cases (4 of 6), and in 11 of 12 few-shot test cases.", + "primary_area": "knowledge representation and reasoning", + "author": "Zhixiang Su; Di Wang; Chunyan Miao; Lizhen Cui", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanyang Technological University (NTU), Singapore + SDU-NTU Centre for Artiticial Intelligence Research (C-FAIR), Shandong University (SDU), China + Joint NTU-WeBank Research Centre on Fintech, NTU, Singapore; Joint NTU-UBC Research Centre of Excellence in Active Living for the Elderly (LILY), NTU, Singapore + School of Computer Science and Engineering, Nanyang Technological University (NTU), Singapore + SDU-NTU Centre for Artiticial Intelligence Research (C-FAIR), Shandong University (SDU), China + Joint NTU-WeBank Research Centre on Fintech, NTU, Singapore; School of Computer Science and Engineering, Nanyang Technological University (NTU), Singapore + SDU-NTU Centre for Artiticial Intelligence Research (C-FAIR), Shandong University (SDU), China + Joint NTU-WeBank Research Centre on Fintech, NTU, Singapore + Joint NTU-UBC Research Centre of Excellence in Active Living for the Elderly (LILY), NTU, Singapore; School of Software, SDU, China", + "bibtex": "@article{Su_Wang_Miao_Cui_2023, title={Multi-Aspect Explainable Inductive Relation Prediction by Sentence Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25803}, DOI={10.1609/aaai.v37i5.25803}, abstractNote={Recent studies on knowledge graphs (KGs) show that path-based methods empowered by pre-trained language models perform well in the provision of inductive and explainable relation predictions. In this paper, we introduce the concepts of relation path coverage and relation path confidence to filter out unreliable paths prior to model training to elevate the model performance. Moreover, we propose Knowledge Reasoning Sentence Transformer (KRST) to predict inductive relations in KGs. KRST is designed to encode the extracted reliable paths in KGs, allowing us to properly cluster paths and provide multi-aspect explanations. We conduct extensive experiments on three real-world datasets. The experimental results show that compared to SOTA models, KRST achieves the best performance in most transductive and inductive test cases (4 of 6), and in 11 of 12 few-shot test cases.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Zhixiang and Wang, Di and Miao, Chunyan and Cui, Lizhen}, year={2023}, month={Jun.}, pages={6533-6540} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25803/25575", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25803", + "pdf_size": 362597, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7083521304707222518&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;sdu.edu.cn", + "email": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+0;0+0+1+0;0+1+0+0;1", + "aff_unique_norm": "Nanyang Technological University;Shandong University", + "aff_unique_dep": "School of Computer Science and Engineering;SDU-NTU Centre for Artiticial Intelligence Research (C-FAIR)", + "aff_unique_url": "https://www.ntu.edu.sg;http://www.sdu.edu.cn", + "aff_unique_abbr": "NTU;SDU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Singapore;", + "aff_country_unique_index": "0+1+0;0+0+1+0;0+1+0+0;1", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25932", + "title": "Multi-Classifier Adversarial Optimization for Active Learning", + "track": "main", + "status": "Technical", + "abstract": "Active learning (AL) aims to find a better trade-off between labeling costs and model performance by consciously selecting more informative samples to label. Recently, adversarial approaches have emerged as effective solutions. Most of them leverage generative adversarial networks to align feature distributions of labeled and unlabeled data, upon which discriminators are trained to better distinguish between them. However, these methods fail to consider the relationship between unlabeled samples and decision boundaries, and their training processes are often complex and unstable. To this end, this paper proposes a novel adversarial AL method, namely multi-classifier adversarial optimization for active learning (MAOAL). MAOAL employs task-specific decision boundaries for data alignment while selecting the most informative samples to label. To fulfill this, we introduce a novel classifier class confusion (C3) metric, which represents the classifier discrepancy as the inter-class correlation of classifier outputs. Without any additional hyper-parameters, the C3 metric further reduces the negative impacts of ambiguous samples in the process of distribution alignment and sample selection. More concretely, the network is trained adversarially by adding two auxiliary classifiers, reducing the distribution bias of labeled and unlabeled samples by minimizing the C3 loss between classifiers, while learning tighter decision boundaries and highlighting hard samples by maximizing the C3 loss. Finally, the unlabeled samples with the highest C3 loss are selected to label. Extensive experiments demonstrate the superiority of our approach over state-of-the-art AL methods in terms of image classification and object detection.", + "primary_area": "machine learning i", + "author": "Lin Geng; Ningzhong Liu; Jie Qin", + "authorids": "", + "aff": "School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China; School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China; School of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China", + "bibtex": "@article{Geng_Liu_Qin_2023, title={Multi-Classifier Adversarial Optimization for Active Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25932}, DOI={10.1609/aaai.v37i6.25932}, abstractNote={Active learning (AL) aims to find a better trade-off between labeling costs and model performance by consciously selecting more informative samples to label. Recently, adversarial approaches have emerged as effective solutions. Most of them leverage generative adversarial networks to align feature distributions of labeled and unlabeled data, upon which discriminators are trained to better distinguish between them. However, these methods fail to consider the relationship between unlabeled samples and decision boundaries, and their training processes are often complex and unstable. To this end, this paper proposes a novel adversarial AL method, namely multi-classifier adversarial optimization for active learning (MAOAL). MAOAL employs task-specific decision boundaries for data alignment while selecting the most informative samples to label. To fulfill this, we introduce a novel classifier class confusion (C3) metric, which represents the classifier discrepancy as the inter-class correlation of classifier outputs. Without any additional hyper-parameters, the C3 metric further reduces the negative impacts of ambiguous samples in the process of distribution alignment and sample selection. More concretely, the network is trained adversarially by adding two auxiliary classifiers, reducing the distribution bias of labeled and unlabeled samples by minimizing the C3 loss between classifiers, while learning tighter decision boundaries and highlighting hard samples by maximizing the C3 loss. Finally, the unlabeled samples with the highest C3 loss are selected to label. Extensive experiments demonstrate the superiority of our approach over state-of-the-art AL methods in terms of image classification and object detection.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Geng, Lin and Liu, Ningzhong and Qin, Jie}, year={2023}, month={Jun.}, pages={7687-7695} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25932/25704", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25932", + "pdf_size": 2389376, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3666086327218964275&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "nuaa.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "email": "nuaa.edu.cn;nuaa.edu.cn;nuaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.nuaa.edu.cn", + "aff_unique_abbr": "NUAA", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25569", + "title": "Multi-Domain Generalized Graph Meta Learning", + "track": "main", + "status": "Technical", + "abstract": "Graph meta learning aims to learn historical knowledge from training graph neural networks (GNNs) models and adapt it to downstream learning tasks in a target graph, which has drawn increasing attention due to its ability of knowledge transfer and fast adaptation. While existing graph meta learning approaches assume the learning tasks are from the same graph domain but lack the solution for multi-domain adaptation. In this paper, we address the multi-domain generalized graph meta learning problem, which is challenging due to non-Euclidean data, inequivalent feature spaces, and heterogeneous distributions. To this end, we propose a novel solution called MD-Gram for multi-domain graph generalization. It introduces an empirical graph generalization method that uses empirical vectors to form a unified expression of non-Euclidean graph data. Then it proposes a multi-domain graphs transformation approach to transform the learning tasks from multiple source-domain graphs with inequivalent feature spaces into a common domain, where graph meta learning is conducted to learn generalized knowledge. It further adopts a domain-specific GNN enhancement method to learn a customized GNN model to achieve fast adaptation in the unseen target domain. Extensive experiments based on four real-world graph domain datasets show that the proposed method significantly outperforms the state-of-the-art in multi-domain graph meta learning tasks.", + "primary_area": "data mining and knowledge management", + "author": "Mingkai Lin; Wenzhong Li; Ding Li; Yizhou Chen; Guohao Li; Sanglu Lu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University", + "bibtex": "@article{Lin_Li_Li_Chen_Li_Lu_2023, title={Multi-Domain Generalized Graph Meta Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25569}, DOI={10.1609/aaai.v37i4.25569}, abstractNote={Graph meta learning aims to learn historical knowledge from training graph neural networks (GNNs) models and adapt it to downstream learning tasks in a target graph, which has drawn increasing attention due to its ability of knowledge transfer and fast adaptation. While existing graph meta learning approaches assume the learning tasks are from the same graph domain but lack the solution for multi-domain adaptation. In this paper, we address the multi-domain generalized graph meta learning problem, which is challenging due to non-Euclidean data, inequivalent feature spaces, and heterogeneous distributions. To this end, we propose a novel solution called MD-Gram for multi-domain graph generalization. It introduces an empirical graph generalization method that uses empirical vectors to form a unified expression of non-Euclidean graph data. Then it proposes a multi-domain graphs transformation approach to transform the learning tasks from multiple source-domain graphs with inequivalent feature spaces into a common domain, where graph meta learning is conducted to learn generalized knowledge. It further adopts a domain-specific GNN enhancement method to learn a customized GNN model to achieve fast adaptation in the unseen target domain. Extensive experiments based on four real-world graph domain datasets show that the proposed method significantly outperforms the state-of-the-art in multi-domain graph meta learning tasks.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Mingkai and Li, Wenzhong and Li, Ding and Chen, Yizhou and Li, Guohao and Lu, Sanglu}, year={2023}, month={Jun.}, pages={4479-4487} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25569/25341", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25569", + "pdf_size": 752581, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=675303130188662233&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn; ; ; ; ", + "email": "smail.nju.edu.cn;nju.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing University", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26935", + "title": "Multi-Horizon Learning in Procedurally-Generated Environments for Off-Policy Reinforcement Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Value estimates at multiple timescales can help create advanced discounting functions and allow agents to form more effective predictive models of their environment. In this work, we investigate learning over multiple horizons concurrently for off-policy reinforcement learning by using an advantage-based action selection method and introducing architectural improvements. Our proposed agent learns over multiple horizons simultaneously, while using either exponential or hyperbolic discounting functions. We implement our approach on Rainbow, a value-based off-policy algorithm, and test on Procgen, a collection of procedurally-generated environments, to demonstrate the effectiveness of this approach, specifically to evaluate the agent's performance in previously unseen scenarios.", + "primary_area": "", + "author": "Raja Farrukh Ali; Kevin Duong; Nasik Muhammad Nafi; William Hsu", + "authorids": "", + "aff": "Department of Computer Science, Kansas State University; Department of Computer Science, Kansas State University; Department of Computer Science, Kansas State University; Department of Computer Science, Kansas State University", + "bibtex": "@article{Ali_Duong_Nafi_Hsu_2024, title={Multi-Horizon Learning in Procedurally-Generated Environments for Off-Policy Reinforcement Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26935}, DOI={10.1609/aaai.v37i13.26935}, abstractNote={Value estimates at multiple timescales can help create advanced discounting functions and allow agents to form more effective predictive models of their environment. In this work, we investigate learning over multiple horizons concurrently for off-policy reinforcement learning by using an advantage-based action selection method and introducing architectural improvements. Our proposed agent learns over multiple horizons simultaneously, while using either exponential or hyperbolic discounting functions. We implement our approach on Rainbow, a value-based off-policy algorithm, and test on Procgen, a collection of procedurally-generated environments, to demonstrate the effectiveness of this approach, specifically to evaluate the agent\u2019s performance in previously unseen scenarios.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Raja Farrukh and Duong, Kevin and Nafi, Nasik Muhammad and Hsu, William}, year={2024}, month={Jul.}, pages={16150-16151} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26935/26707", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26935", + "pdf_size": 984222, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2066230157851022491&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ksu.edu;ksu.edu;ksu.edu;ksu.edu", + "email": "ksu.edu;ksu.edu;ksu.edu;ksu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Kansas State University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.k-state.edu", + "aff_unique_abbr": "K-State", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25668", + "title": "Multi-Label Few-Shot ICD Coding as Autoregressive Generation with Prompt", + "track": "main", + "status": "Technical", + "abstract": "Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnosis and procedure descriptions using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infer ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GP) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F1 30.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.", + "primary_area": "domain s of application", + "author": "Zhichao Yang; Sunjae Kwon; Zonghai Yao; Hong Yu", + "authorids": "", + "aff": "College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst + Department of Computer Science, University of Massachusetts Lowell + Center for Healthcare Organization and Implementation Research, Veterans Affairs Bedford Healthcare System", + "bibtex": "@article{Yang_Kwon_Yao_Yu_2023, title={Multi-Label Few-Shot ICD Coding as Autoregressive Generation with Prompt}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25668}, DOI={10.1609/aaai.v37i4.25668}, abstractNote={Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with an average of 3,000+ tokens. This task is challenging due to the high-dimensional space of multi-label assignment (155,000+ ICD code candidates) and the long-tail challenge - Many ICD codes are infrequently assigned yet infrequent ICD codes are important clinically. This study addresses the long-tail challenge by transforming this multi-label classification task into an autoregressive generation task. Specifically, we first introduce a novel pretraining objective to generate free text diagnosis and procedure descriptions using the SOAP structure, the medical logic physicians use for note documentation. Second, instead of directly predicting the high dimensional space of ICD codes, our model generates the lower dimension of text descriptions, which then infer ICD codes. Third, we designed a novel prompt template for multi-label classification. We evaluate our Generation with Prompt (GP) model with the benchmark of all code assignment (MIMIC-III-full) and few shot ICD code assignment evaluation benchmark (MIMIC-III-few). Experiments on MIMIC-III-few show that our model performs with a marco F1 30.2, which substantially outperforms the previous MIMIC-III-full SOTA model (marco F1 4.3) and the model specifically designed for few/zero shot setting (marco F1 18.7). Finally, we design a novel ensemble learner, a cross attention reranker with prompts, to integrate previous SOTA and our best few-shot coding predictions. Experiments on MIMIC-III-full show that our ensemble learner substantially improves both macro and micro F1, from 10.4 to 14.6 and from 58.2 to 59.1, respectively.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zhichao and Kwon, Sunjae and Yao, Zonghai and Yu, Hong}, year={2023}, month={Jun.}, pages={5366-5374} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25668/25440", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25668", + "pdf_size": 1977212, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10193861941553436244&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "umass.edu; ; ; ", + "email": "umass.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1+2", + "aff_unique_norm": "University of Massachusetts Amherst;University of Massachusetts Lowell;Veterans Affairs Bedford Healthcare System", + "aff_unique_dep": "College of Information and Computer Sciences;Department of Computer Science;Center for Healthcare Organization and Implementation Research", + "aff_unique_url": "https://www.umass.edu;https://www.uml.edu;https://www.bedford.va.gov/", + "aff_unique_abbr": "UMass Amherst;UMass Lowell;VA Bedford", + "aff_campus_unique_index": "0;0;0;0+1+2", + "aff_campus_unique": "Amherst;Lowell;Bedford", + "aff_country_unique_index": "0;0;0;0+0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25094", + "title": "Multi-Level Compositional Reasoning for Interactive Instruction Following", + "track": "main", + "status": "Technical", + "abstract": "Robotic agents performing domestic chores by natural language directives are required to master the complex job of navigating environment and interacting with objects in the environments. The tasks given to the agents are often composite thus are challenging as completing them require to reason about multiple subtasks, e.g., bring a cup of coffee. To address the challenge, we propose to divide and conquer it by breaking the task into multiple subgoals and attend to them individually for better navigation and interaction. We call it Multi-level Compositional Reasoning Agent (MCR-Agent). Specifically, we learn a three-level action policy. At the highest level, we infer a sequence of human-interpretable subgoals to be executed based on language instructions by a high-level policy composition controller. At the middle level, we discriminatively control the agent\u2019s navigation by a master policy by alternating between a navigation policy and various independent interaction policies. Finally, at the lowest level, we infer manipulation actions with the corresponding object masks using the appropriate interaction policy. Our approach not only generates human interpretable subgoals but also achieves 2.03% absolute gain to comparable state of the arts in the efficiency metric (PLWSR in unseen set) without using rule-based planning or a semantic spatial memory. The\ncode is available at https://github.com/yonseivnl/mcr-agent.", + "primary_area": "computer vision i", + "author": "Suvaansh Bhambri; Byeonghwi Kim; Jonghyun Choi", + "authorids": "", + "aff": "Yonsei University; Yonsei University; Yonsei University", + "bibtex": "@article{Bhambri_Kim_Choi_2023, title={Multi-Level Compositional Reasoning for Interactive Instruction Following}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25094}, DOI={10.1609/aaai.v37i1.25094}, abstractNote={Robotic agents performing domestic chores by natural language directives are required to master the complex job of navigating environment and interacting with objects in the environments. The tasks given to the agents are often composite thus are challenging as completing them require to reason about multiple subtasks, e.g., bring a cup of coffee. To address the challenge, we propose to divide and conquer it by breaking the task into multiple subgoals and attend to them individually for better navigation and interaction. We call it Multi-level Compositional Reasoning Agent (MCR-Agent). Specifically, we learn a three-level action policy. At the highest level, we infer a sequence of human-interpretable subgoals to be executed based on language instructions by a high-level policy composition controller. At the middle level, we discriminatively control the agent\u2019s navigation by a master policy by alternating between a navigation policy and various independent interaction policies. Finally, at the lowest level, we infer manipulation actions with the corresponding object masks using the appropriate interaction policy. Our approach not only generates human interpretable subgoals but also achieves 2.03% absolute gain to comparable state of the arts in the efficiency metric (PLWSR in unseen set) without using rule-based planning or a semantic spatial memory. The\ncode is available at https://github.com/yonseivnl/mcr-agent.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bhambri, Suvaansh and Kim, Byeonghwi and Choi, Jonghyun}, year={2023}, month={Jun.}, pages={223-231} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25094/24866", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25094", + "pdf_size": 3167375, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17923106845558345645&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;yonsei.ac.kr;yonsei.ac.kr", + "email": "gmail.com;yonsei.ac.kr;yonsei.ac.kr", + "github": "https://github.com/yonseivnl/mcr-agent", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Yonsei University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.yonsei.ac.kr", + "aff_unique_abbr": "Yonsei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26346", + "title": "Multi-Level Confidence Learning for Trustworthy Multimodal Classification", + "track": "main", + "status": "Technical", + "abstract": "With the rapid development of various data acquisition technologies, more and more multimodal data come into being. It is important to integrate different modalities which are with high-dimensional features for boosting final multimodal data classification task. However, existing multimodal classification methods mainly focus on exploiting the complementary information of different modalities, while ignoring the learning confidence during information fusion. In this paper, we propose a trustworthy multimodal classification network via multi-level confidence learning, referred to as MLCLNet. Considering that a large number of feature dimensions could not contribute to final classification performance but disturb the discriminability of different samples, we propose a feature confidence learning mechanism to suppress some redundant features, as well as enhancing the expression of discriminative feature dimensions in each modality. In order to capture the inherent sample structure information implied in each modality, we design a graph convolutional network branch to learn the corresponding structure preserved feature representation and generate modal-specific initial classification labels. Since samples from different modalities should share consistent labels, a cross-modal label fusion module is deployed to capture the label correlations of different modalities. In addition, motivated the ideally orthogonality of final fused label matrix, we design a label confidence loss to supervise the network for learning more separable data representations. To the best of our knowledge, MLCLNet is the first work which integrates both feature and label-level confidence learning for multimodal classification. Extensive experiments on four multimodal medical datasets are conducted to validate superior performance of MLCLNet when compared to other state-of-the-art methods.", + "primary_area": "machine learning iv", + "author": "Xiao Zheng; Chang Tang; Zhiguo Wan; Chengyu Hu; Wei Zhang", + "authorids": "", + "aff": "School of Computer, National University of Defense Technology, Changsha 410073, China; School of Computer Science, China University of Geosciences, Wuhan 430074, China; Zhejiang Lab, Hangzhou 311121, China; School of Computer Science, China University of Geosciences, Wuhan 430074, China; Shandong Computer Science Center (National Supercomputing Center in Jinan), Jinan 250000, China", + "bibtex": "@article{Zheng_Tang_Wan_Hu_Zhang_2023, title={Multi-Level Confidence Learning for Trustworthy Multimodal Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26346}, DOI={10.1609/aaai.v37i9.26346}, abstractNote={With the rapid development of various data acquisition technologies, more and more multimodal data come into being. It is important to integrate different modalities which are with high-dimensional features for boosting final multimodal data classification task. However, existing multimodal classification methods mainly focus on exploiting the complementary information of different modalities, while ignoring the learning confidence during information fusion. In this paper, we propose a trustworthy multimodal classification network via multi-level confidence learning, referred to as MLCLNet. Considering that a large number of feature dimensions could not contribute to final classification performance but disturb the discriminability of different samples, we propose a feature confidence learning mechanism to suppress some redundant features, as well as enhancing the expression of discriminative feature dimensions in each modality. In order to capture the inherent sample structure information implied in each modality, we design a graph convolutional network branch to learn the corresponding structure preserved feature representation and generate modal-specific initial classification labels. Since samples from different modalities should share consistent labels, a cross-modal label fusion module is deployed to capture the label correlations of different modalities. In addition, motivated the ideally orthogonality of final fused label matrix, we design a label confidence loss to supervise the network for learning more separable data representations. To the best of our knowledge, MLCLNet is the first work which integrates both feature and label-level confidence learning for multimodal classification. Extensive experiments on four multimodal medical datasets are conducted to validate superior performance of MLCLNet when compared to other state-of-the-art methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Xiao and Tang, Chang and Wan, Zhiguo and Hu, Chengyu and Zhang, Wei}, year={2023}, month={Jun.}, pages={11381-11389} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26346/26118", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26346", + "pdf_size": 1147837, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5549441073860752196&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;3", + "aff_unique_norm": "National University of Defense Technology;China University of Geosciences;Zhejiang Lab;Shandong Computer Science Center", + "aff_unique_dep": "School of Computer;School of Computer Science;;Computer Science Center", + "aff_unique_url": ";http://www.cug.edu.cn;;", + "aff_unique_abbr": ";CUG;;", + "aff_campus_unique_index": "0;1;2;1;3", + "aff_campus_unique": "Changsha;Wuhan;Hangzhou;Jinan", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25799", + "title": "Multi-Level Wavelet Mapping Correlation for Statistical Dependence Measurement: Methodology and Performance", + "track": "main", + "status": "Technical", + "abstract": "We propose a new criterion for measuring dependence between two real variables, namely, Multi-level Wavelet Mapping Correlation (MWMC). MWMC can capture the nonlinear dependencies between variables by measuring their correlation under different levels of wavelet mappings. We show that the empirical estimate of MWMC converges exponentially to its population quantity. To support independence test better with MWMC, we further design a permutation test based on MWMC and prove that our test can not only control the type I error rate (the rate of false positives) well but also ensure that the type II error rate (the rate of false negatives) is upper bounded by O(1/n) (n is the sample size) with finite permutations. By extensive experiments on (conditional) independence tests and causal discovery, we show that our method outperforms existing independence test methods.", + "primary_area": "knowledge representation and reasoning", + "author": "Yixin Ren; Hao Zhang; Yewei Xia; Jihong Guan; Shuigeng Zhou", + "authorids": "", + "aff": "Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China; Department of Computer Science & Technology, Tongji University, China; Shanghai Key Lab of Intelligent Information Processing, and School of Computer Science, Fudan University, China", + "bibtex": "@article{Ren_Zhang_Xia_Guan_Zhou_2023, title={Multi-Level Wavelet Mapping Correlation for Statistical Dependence Measurement: Methodology and Performance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25799}, DOI={10.1609/aaai.v37i5.25799}, abstractNote={We propose a new criterion for measuring dependence between two real variables, namely, Multi-level Wavelet Mapping Correlation (MWMC). MWMC can capture the nonlinear dependencies between variables by measuring their correlation under different levels of wavelet mappings. We show that the empirical estimate of MWMC converges exponentially to its population quantity. To support independence test better with MWMC, we further design a permutation test based on MWMC and prove that our test can not only control the type I error rate (the rate of false positives) well but also ensure that the type II error rate (the rate of false negatives) is upper bounded by O(1/n) (n is the sample size) with finite permutations. By extensive experiments on (conditional) independence tests and causal discovery, we show that our method outperforms existing independence test methods.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ren, Yixin and Zhang, Hao and Xia, Yewei and Guan, Jihong and Zhou, Shuigeng}, year={2023}, month={Jun.}, pages={6499-6506} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25799/25571", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25799", + "pdf_size": 139440, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17608536990409909102&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;tongji.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;tongji.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Fudan University;Tongji University", + "aff_unique_dep": "School of Computer Science;Department of Computer Science & Technology", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.tongji.edu.cn", + "aff_unique_abbr": "Fudan;Tongji", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26579", + "title": "Multi-Mask Label Mapping for Prompt-Based Learning", + "track": "main", + "status": "Technical", + "abstract": "Prompt-based Learning has shown significant success in few-shot classification. The mainstream approach is to concatenate a template for the input text to transform the classification task into a cloze-type task where label mapping plays an important role in finding the ground-truth labels. While current label mapping methods only use the contexts in one single input, it could be crucial if wrong information is contained in the text. Specifically, it is proved in recent work that even the large language models like BERT/RoBERTa make classification decisions heavily dependent on a specific keyword regardless of the task or the context. Such a word is referred to as a lexical cue and if a misleading lexical cue is included in the instance it will lead the model to make a wrong prediction. We propose a multi-mask prompt-based approach with Multi-Mask Label Mapping (MMLM) to reduce the impact of misleading lexical cues by allowing the model to exploit multiple lexical cues. To satisfy the conditions of few-shot learning, an instance augmentation approach for the cloze-type model is proposed and the misleading cues are gradually excluded through training. We demonstrate the effectiveness of MMLM by both theoretical analysis and empirical studies, and show that MMLM outperforms other existing label mapping approaches.", + "primary_area": "speech natural language processing", + "author": "Jirui Qi; Richong Zhang; Jaein Kim; Junfan Chen; Wenyi Qin; Yongyi Mao", + "authorids": "", + "aff": "SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; SKLSDE, School of Computer Science and Engineering, Beihang University, Beijing, China; School of Electrical Engineering and Computer Science, University of Ottawa, Ottawa, Canada", + "bibtex": "@article{Qi_Zhang_Kim_Chen_Qin_Mao_2023, title={Multi-Mask Label Mapping for Prompt-Based Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26579}, DOI={10.1609/aaai.v37i11.26579}, abstractNote={Prompt-based Learning has shown significant success in few-shot classification. The mainstream approach is to concatenate a template for the input text to transform the classification task into a cloze-type task where label mapping plays an important role in finding the ground-truth labels. While current label mapping methods only use the contexts in one single input, it could be crucial if wrong information is contained in the text. Specifically, it is proved in recent work that even the large language models like BERT/RoBERTa make classification decisions heavily dependent on a specific keyword regardless of the task or the context. Such a word is referred to as a lexical cue and if a misleading lexical cue is included in the instance it will lead the model to make a wrong prediction. We propose a multi-mask prompt-based approach with Multi-Mask Label Mapping (MMLM) to reduce the impact of misleading lexical cues by allowing the model to exploit multiple lexical cues. To satisfy the conditions of few-shot learning, an instance augmentation approach for the cloze-type model is proposed and the misleading cues are gradually excluded through training. We demonstrate the effectiveness of MMLM by both theoretical analysis and empirical studies, and show that MMLM outperforms other existing label mapping approaches.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qi, Jirui and Zhang, Richong and Kim, Jaein and Chen, Junfan and Qin, Wenyi and Mao, Yongyi}, year={2023}, month={Jun.}, pages={13465-13473} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26579/26351", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26579", + "pdf_size": 1403427, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18024065485964911010&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;2", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Ottawa", + "aff_unique_dep": "School of Computer Science and Engineering;;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.uottawa.ca", + "aff_unique_abbr": "BUAA;;U Ottawa", + "aff_campus_unique_index": "0;0;0;0;0;2", + "aff_campus_unique": "Beijing;;Ottawa", + "aff_country_unique_index": "0;0+0;0;0;0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-25445", + "title": "Multi-Modal Knowledge Hypergraph for Diverse Image Retrieval", + "track": "main", + "status": "Technical", + "abstract": "The task of keyword-based diverse image retrieval has received considerable attention due to its wide demand in real-world scenarios. Existing methods either rely on a multi-stage re-ranking strategy based on human design to diversify results, or extend sub-semantics via an implicit generator, which either relies on manual labor or lacks explainability. To learn more diverse and explainable representations, we capture sub-semantics in an explicit manner by leveraging the multi-modal knowledge graph (MMKG) that contains richer entities and relations. However, the huge domain gap between the off-the-shelf MMKG and retrieval datasets, as well as the semantic gap between images and texts, make the fusion of MMKG difficult. In this paper, we pioneer a degree-free hypergraph solution that models many-to-many relations to address the challenge of heterogeneous sources and heterogeneous modalities. Specifically, a hyperlink-based solution, Multi-Modal Knowledge Hyper Graph (MKHG) is proposed, which bridges heterogeneous data via various hyperlinks to diversify sub-semantics. Among them, a hypergraph construction module first customizes various hyperedges to link the heterogeneous MMKG and retrieval databases. A multi-modal instance bagging module then explicitly selects instances to diversify the semantics. Meanwhile, a diverse concept aggregator flexibly adapts key sub-semantics. Finally, several losses are adopted to optimize the semantic space. Extensive experiments on two real-world datasets have well verified the effectiveness and explainability of our proposed method.", + "primary_area": "computer vision iii", + "author": "Yawen Zeng; Qin Jin; Tengfei Bao; Wenfeng Li", + "authorids": "", + "aff": "ByteDance AI Lab; School of Information, Renmin University of China; ByteDance AI Lab; ByteDance AI Lab", + "bibtex": "@article{Zeng_Jin_Bao_Li_2023, title={Multi-Modal Knowledge Hypergraph for Diverse Image Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25445}, DOI={10.1609/aaai.v37i3.25445}, abstractNote={The task of keyword-based diverse image retrieval has received considerable attention due to its wide demand in real-world scenarios. Existing methods either rely on a multi-stage re-ranking strategy based on human design to diversify results, or extend sub-semantics via an implicit generator, which either relies on manual labor or lacks explainability. To learn more diverse and explainable representations, we capture sub-semantics in an explicit manner by leveraging the multi-modal knowledge graph (MMKG) that contains richer entities and relations. However, the huge domain gap between the off-the-shelf MMKG and retrieval datasets, as well as the semantic gap between images and texts, make the fusion of MMKG difficult. In this paper, we pioneer a degree-free hypergraph solution that models many-to-many relations to address the challenge of heterogeneous sources and heterogeneous modalities. Specifically, a hyperlink-based solution, Multi-Modal Knowledge Hyper Graph (MKHG) is proposed, which bridges heterogeneous data via various hyperlinks to diversify sub-semantics. Among them, a hypergraph construction module first customizes various hyperedges to link the heterogeneous MMKG and retrieval databases. A multi-modal instance bagging module then explicitly selects instances to diversify the semantics. Meanwhile, a diverse concept aggregator flexibly adapts key sub-semantics. Finally, several losses are adopted to optimize the semantic space. Extensive experiments on two real-world datasets have well verified the effectiveness and explainability of our proposed method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Yawen and Jin, Qin and Bao, Tengfei and Li, Wenfeng}, year={2023}, month={Jun.}, pages={3376-3383} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25445/25217", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25445", + "pdf_size": 1515808, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12680481293976207167&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;ruc.edu.cn;bytedance.com;bytedance.com", + "email": "gmail.com;ruc.edu.cn;bytedance.com;bytedance.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "ByteDance;Renmin University of China", + "aff_unique_dep": "AI Lab;School of Information", + "aff_unique_url": "https://www.bytedance.com;http://www.ruc.edu.cn", + "aff_unique_abbr": "ByteDance;RUC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26955", + "title": "Multi-Modal Protein Knowledge Graph Construction and Applications (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Existing data-centric methods for protein science generally cannot sufficiently capture and leverage biology knowledge, which may be crucial for many protein tasks. To facilitate research in this field, we create ProteinKG65, a knowledge graph for protein science. Using gene ontology and Uniprot knowledge base as a basis, we transform and integrate various kinds of knowledge with aligned descriptions and protein sequences, respectively, to GO terms and protein entities. ProteinKG65 is mainly dedicated to providing a specialized protein knowledge graph, bringing the knowledge of Gene Ontology to protein function and structure prediction. We also illustrate the potential applications of ProteinKG65 with a prototype. Our dataset can be downloaded at https://w3id.org/proteinkg65.", + "primary_area": "", + "author": "Siyuan Cheng; Xiaozhuan Liang; Zhen Bi; Huajun Chen; Ningyu Zhang", + "authorids": "", + "aff": "School of Software Technology, Zhejiang University, Hangzhou, China+Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; School of Software Technology, Zhejiang University, Hangzhou, China+Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; School of Software Technology, Zhejiang University, Hangzhou, China+Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; College of Computer Science and Technology, Zhejiang University, Hangzhou, China+Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; School of Software Technology, Zhejiang University, Hangzhou, China+College of Computer Science and Technology, Zhejiang University, Hangzhou, China+Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies", + "bibtex": "@article{Cheng_Liang_Bi_Chen_Zhang_2024, title={Multi-Modal Protein Knowledge Graph Construction and Applications (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26955}, DOI={10.1609/aaai.v37i13.26955}, abstractNote={Existing data-centric methods for protein science generally cannot sufficiently capture and leverage biology knowledge, which may be crucial for many protein tasks. To facilitate research in this field, we create ProteinKG65, a knowledge graph for protein science. Using gene ontology and Uniprot knowledge base as a basis, we transform and integrate various kinds of knowledge with aligned descriptions and protein sequences, respectively, to GO terms and protein entities. ProteinKG65 is mainly dedicated to providing a specialized protein knowledge graph, bringing the knowledge of Gene Ontology to protein function and structure prediction. We also illustrate the potential applications of ProteinKG65 with a prototype. Our dataset can be downloaded at https://w3id.org/proteinkg65.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Siyuan and Liang, Xiaozhuan and Bi, Zhen and Chen, Huajun and Zhang, Ningyu}, year={2024}, month={Jul.}, pages={16190-16191} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26955/26727", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26955", + "pdf_size": 340751, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2467629764783993495&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "https://w3id.org/proteinkg65", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "School of Software Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "0;0;0;0;0+0", + "aff_campus_unique": "Hangzhou;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25184", + "title": "Multi-Modality Deep Network for Extreme Learned Image Compression", + "track": "main", + "status": "Technical", + "abstract": "Image-based single-modality compression learning approaches have demonstrated exceptionally powerful encoding and decoding capabilities in the past few years , but suffer from blur and severe semantics loss at extremely low bitrates. To address this issue, we propose a multimodal machine learning method for text-guided image compression, in which the semantic information of text is used as prior information to guide image compression for better compression performance. We fully study the role of text description in different components of the codec, and demonstrate its effectiveness. In addition, we adopt the image-text attention module and image-request complement module to better fuse image and text features, and propose an improved multimodal semantic-consistent loss to produce semantically complete reconstructions. Extensive experiments, including a user study, prove that our method can obtain visually pleasing results at extremely low bitrates, and achieves a comparable or even better performance than state-of-the-art methods, even though these methods are at 2x to 4x bitrates of ours.", + "primary_area": "computer vision i", + "author": "Xuhao Jiang; Weimin Tan; Tian Tan; Bo Yan; Liquan Shen", + "authorids": "", + "aff": "School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing, Fudan University, Shanghai, China; School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing, Fudan University, Shanghai, China; School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing, Fudan University, Shanghai, China; School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing, Shanghai Collaborative Innovation Center of Intelligent Visual Computing, Fudan University, Shanghai, China; School of Communication, Shanghai University, Shanghai, China", + "bibtex": "@article{Jiang_Tan_Tan_Yan_Shen_2023, title={Multi-Modality Deep Network for Extreme Learned Image Compression}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25184}, DOI={10.1609/aaai.v37i1.25184}, abstractNote={Image-based single-modality compression learning approaches have demonstrated exceptionally powerful encoding and decoding capabilities in the past few years , but suffer from blur and severe semantics loss at extremely low bitrates. To address this issue, we propose a multimodal machine learning method for text-guided image compression, in which the semantic information of text is used as prior information to guide image compression for better compression performance. We fully study the role of text description in different components of the codec, and demonstrate its effectiveness. In addition, we adopt the image-text attention module and image-request complement module to better fuse image and text features, and propose an improved multimodal semantic-consistent loss to produce semantically complete reconstructions. Extensive experiments, including a user study, prove that our method can obtain visually pleasing results at extremely low bitrates, and achieves a comparable or even better performance than state-of-the-art methods, even though these methods are at 2x to 4x bitrates of ours.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Xuhao and Tan, Weimin and Tan, Tian and Yan, Bo and Shen, Liquan}, year={2023}, month={Jun.}, pages={1033-1041} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25184/24956", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25184", + "pdf_size": 12532399, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12505395926411202986&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;163.com", + "email": "fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Fudan University;Shanghai University", + "aff_unique_dep": "School of Computer Science;School of Communication", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.shu.edu.cn", + "aff_unique_abbr": "Fudan;SHU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25665", + "title": "Multi-Relational Contrastive Learning Graph Neural Network for Drug-Drug Interaction Event Prediction", + "track": "main", + "status": "Technical", + "abstract": "Drug-drug interactions (DDIs) could lead to various unexpected adverse consequences, so-called DDI events. Predicting DDI events can reduce the potential risk of combinatorial therapy and improve the safety of medication use, and has attracted much attention in the deep learning community. Recently, graph neural network (GNN)-based models have aroused broad interest and achieved satisfactory results in the DDI event prediction. Most existing GNN-based models ignore either drug structural information or drug interactive information, but both aspects of information are important for DDI event prediction. Furthermore, accurately predicting rare DDI events is hindered by their inadequate labeled instances. In this paper, we propose a new method, Multi-Relational Contrastive learning Graph Neural Network, MRCGNN for brevity, to predict DDI events. Specifically, MRCGNN integrates the two aspects of information by deploying a GNN on the multi-relational DDI event graph attributed with the drug features extracted from drug molecular graphs. Moreover, we implement a multi-relational graph contrastive learning with a designed dual-view negative counterpart augmentation strategy, to capture implicit information about rare DDI events. Extensive experiments on two datasets show that MRCGNN outperforms the state-of-the-art methods. Besides, we observe that MRCGNN achieves satisfactory performance when predicting rare DDI events.", + "primary_area": "domain s of application", + "author": "Zhankun Xiong; Shichao Liu; Feng Huang; Ziyan Wang; Xuan Liu; Zhongfei Zhang; Wen Zhang", + "authorids": "", + "aff": "College of Informatics, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; Computer Science Department, Binghamton University; College of Informatics, Huazhong Agricultural University", + "bibtex": "@article{Xiong_Liu_Huang_Wang_Liu_Zhang_Zhang_2023, title={Multi-Relational Contrastive Learning Graph Neural Network for Drug-Drug Interaction Event Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25665}, DOI={10.1609/aaai.v37i4.25665}, abstractNote={Drug-drug interactions (DDIs) could lead to various unexpected adverse consequences, so-called DDI events. Predicting DDI events can reduce the potential risk of combinatorial therapy and improve the safety of medication use, and has attracted much attention in the deep learning community. Recently, graph neural network (GNN)-based models have aroused broad interest and achieved satisfactory results in the DDI event prediction. Most existing GNN-based models ignore either drug structural information or drug interactive information, but both aspects of information are important for DDI event prediction. Furthermore, accurately predicting rare DDI events is hindered by their inadequate labeled instances. In this paper, we propose a new method, Multi-Relational Contrastive learning Graph Neural Network, MRCGNN for brevity, to predict DDI events. Specifically, MRCGNN integrates the two aspects of information by deploying a GNN on the multi-relational DDI event graph attributed with the drug features extracted from drug molecular graphs. Moreover, we implement a multi-relational graph contrastive learning with a designed dual-view negative counterpart augmentation strategy, to capture implicit information about rare DDI events. Extensive experiments on two datasets show that MRCGNN outperforms the state-of-the-art methods. Besides, we observe that MRCGNN achieves satisfactory performance when predicting rare DDI events.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiong, Zhankun and Liu, Shichao and Huang, Feng and Wang, Ziyan and Liu, Xuan and Zhang, Zhongfei and Zhang, Wen}, year={2023}, month={Jun.}, pages={5339-5347} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25665/25437", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25665", + "pdf_size": 19604961, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13161396752673908871&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "webmail.hzau.edu.cn;mail.hzau.edu.cn;webmail.hzau.edu.cn;webmail.hzau.edu.cn;webmail.hzau.edu.cn;binghamton.edu;mail.hzau.edu.cn", + "email": "webmail.hzau.edu.cn;mail.hzau.edu.cn;webmail.hzau.edu.cn;webmail.hzau.edu.cn;webmail.hzau.edu.cn;binghamton.edu;mail.hzau.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;0", + "aff_unique_norm": "Huazhong Agricultural University;Binghamton University", + "aff_unique_dep": "College of Informatics;Computer Science Department", + "aff_unique_url": "http://www.hzau.edu.cn/;https://www.binghamton.edu", + "aff_unique_abbr": ";Binghamton", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Binghamton", + "aff_country_unique_index": "0;0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25123", + "title": "Multi-Resolution Monocular Depth Map Fusion by Self-Supervised Gradient-Based Composition", + "track": "main", + "status": "Technical", + "abstract": "Monocular depth estimation is a challenging problem on which deep neural networks have demonstrated great potential. However, depth maps predicted by existing deep models usually lack fine-grained details due to convolution operations and down-samplings in networks. We find that increasing input resolution is helpful to preserve more local details while the estimation at low resolution is more accurate globally. Therefore, we propose a novel depth map fusion module to combine the advantages of estimations with multi-resolution inputs. Instead of merging the low- and high-resolution estimations equally, we adopt the core idea of Poisson fusion, trying to implant the gradient domain of high-resolution depth into the low-resolution depth. While classic Poisson fusion requires a fusion mask as supervision, we propose a self-supervised framework based on guided image filtering. We demonstrate that this gradient-based composition performs much better at noisy immunity, compared with the state-of-the-art depth map fusion method. Our lightweight depth fusion is one-shot and runs in real-time, making it 80X faster than a state-of-the-art depth fusion method. Quantitative evaluations demonstrate that the proposed method can be integrated into many fully convolutional monocular depth estimation backbones with a significant performance boost, leading to state-of-the-art results of detail enhancement on depth maps. Codes are released at https://github.com/yuinsky/gradient-based-depth-map-fusion.", + "primary_area": "computer vision i", + "author": "Yaqiao Dai; Renjiao Yi; Chenyang Zhu; Hongjun He; Kai Xu", + "authorids": "", + "aff": "National University of Defense Technology; National University of Defense Technology; National University of Defense Technology; National University of Defense Technology; National University of Defense Technology", + "bibtex": "@article{Dai_Yi_Zhu_He_Xu_2023, title={Multi-Resolution Monocular Depth Map Fusion by Self-Supervised Gradient-Based Composition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25123}, DOI={10.1609/aaai.v37i1.25123}, abstractNote={Monocular depth estimation is a challenging problem on which deep neural networks have demonstrated great potential. However, depth maps predicted by existing deep models usually lack fine-grained details due to convolution operations and down-samplings in networks. We find that increasing input resolution is helpful to preserve more local details while the estimation at low resolution is more accurate globally. Therefore, we propose a novel depth map fusion module to combine the advantages of estimations with multi-resolution inputs. Instead of merging the low- and high-resolution estimations equally, we adopt the core idea of Poisson fusion, trying to implant the gradient domain of high-resolution depth into the low-resolution depth. While classic Poisson fusion requires a fusion mask as supervision, we propose a self-supervised framework based on guided image filtering. We demonstrate that this gradient-based composition performs much better at noisy immunity, compared with the state-of-the-art depth map fusion method. Our lightweight depth fusion is one-shot and runs in real-time, making it 80X faster than a state-of-the-art depth fusion method. Quantitative evaluations demonstrate that the proposed method can be integrated into many fully convolutional monocular depth estimation backbones with a significant performance boost, leading to state-of-the-art results of detail enhancement on depth maps. Codes are released at https://github.com/yuinsky/gradient-based-depth-map-fusion.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Yaqiao and Yi, Renjiao and Zhu, Chenyang and He, Hongjun and Xu, Kai}, year={2023}, month={Jun.}, pages={488-496} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25123/24895", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25123", + "pdf_size": 4696217, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4458606209213041691&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com; ; ; ", + "email": "gmail.com;gmail.com; ; ; ", + "github": "https://github.com/yuinsky/gradient-based-depth-map-fusion", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "National University of Defense Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.nudt.edu.cn/", + "aff_unique_abbr": "NUDT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25752", + "title": "Multi-Scale Control Signal-Aware Transformer for Motion Synthesis without Phase", + "track": "main", + "status": "Technical", + "abstract": "Synthesizing controllable motion for a character using deep learning has been a promising approach due to its potential to learn a compact model without laborious feature engineering. To produce dynamic motion from weak control signals such as desired paths, existing methods often require auxiliary information such as phases for alleviating motion ambiguity, which limits their generalisation capability. As past poses often contain useful auxiliary hints, in this paper, we propose a task-agnostic deep learning method, namely Multi-scale Control Signal-aware Transformer (MCS-T), with an attention based encoder-decoder architecture to discover the auxiliary information implicitly for synthesizing controllable motion without explicitly requiring auxiliary information such as phase. Specifically, an encoder is devised to adaptively formulate the motion patterns of a character's past poses with multi-scale skeletons, and a decoder driven by control signals to further synthesize and predict the character's state by paying context-specialised attention to the encoded past motion patterns. As a result, it helps alleviate the issues of low responsiveness and slow transition which often happen in conventional methods not using auxiliary information. Both qualitative and quantitative experimental results on an existing biped locomotion dataset, which involves diverse types of motion transitions, demonstrate the effectiveness of our method. In particular, MCS-T is able to successfully generate motions comparable to those generated by the methods using auxiliary information.", + "primary_area": "humans and ai", + "author": "Lintao Wang; Kun Hu; Lei Bai; Yu Ding; Wanli Ouyang; Zhiyong Wang", + "authorids": "", + "aff": "School of Computer Science, The University of Sydney, Australia; School of Computer Science, The University of Sydney, Australia; Shanghai AI Laboratory, China; Netease Fuxi AI Lab, China; School of Computer Science, The University of Sydney, Australia; School of Computer Science, The University of Sydney, Australia", + "bibtex": "@article{Wang_Hu_Bai_Ding_Ouyang_Wang_2023, title={Multi-Scale Control Signal-Aware Transformer for Motion Synthesis without Phase}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25752}, DOI={10.1609/aaai.v37i5.25752}, abstractNote={Synthesizing controllable motion for a character using deep learning has been a promising approach due to its potential to learn a compact model without laborious feature engineering. To produce dynamic motion from weak control signals such as desired paths, existing methods often require auxiliary information such as phases for alleviating motion ambiguity, which limits their generalisation capability. As past poses often contain useful auxiliary hints, in this paper, we propose a task-agnostic deep learning method, namely Multi-scale Control Signal-aware Transformer (MCS-T), with an attention based encoder-decoder architecture to discover the auxiliary information implicitly for synthesizing controllable motion without explicitly requiring auxiliary information such as phase. Specifically, an encoder is devised to adaptively formulate the motion patterns of a character\u2019s past poses with multi-scale skeletons, and a decoder driven by control signals to further synthesize and predict the character\u2019s state by paying context-specialised attention to the encoded past motion patterns. As a result, it helps alleviate the issues of low responsiveness and slow transition which often happen in conventional methods not using auxiliary information. Both qualitative and quantitative experimental results on an existing biped locomotion dataset, which involves diverse types of motion transitions, demonstrate the effectiveness of our method. In particular, MCS-T is able to successfully generate motions comparable to those generated by the methods using auxiliary information.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Lintao and Hu, Kun and Bai, Lei and Ding, Yu and Ouyang, Wanli and Wang, Zhiyong}, year={2023}, month={Jun.}, pages={6092-6100} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25752/25524", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25752", + "pdf_size": 7747043, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8686182199389507757&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "uni.sydney.edu.au;sydney.edu.au;gmail.com;corp.netease.com;sydney.edu.au;sydney.edu.au", + "email": "uni.sydney.edu.au;sydney.edu.au;gmail.com;corp.netease.com;sydney.edu.au;sydney.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "The University of Sydney;Shanghai AI Laboratory;Netease Fuxi AI Lab", + "aff_unique_dep": "School of Computer Science;;AI Lab", + "aff_unique_url": "https://www.sydney.edu.au;;https://www.netease.com", + "aff_unique_abbr": "USYD;;Netease Fuxi", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Sydney;", + "aff_country_unique_index": "0;0;1;1;0;0", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26165", + "title": "Multi-Source Survival Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Survival analysis is the branch of statistics that studies the relation between the characteristics of living entities and their respective survival times, taking into account the partial information held by censored cases. A good analysis can, for example, determine whether one medical treatment for a group of patients is better than another. With the rise of machine learning, survival analysis can be modeled as learning a function that maps studied patients to their survival times. To succeed with that, there are three crucial issues to be tackled. \n First, some patient data is censored: we do not know the true survival times for all patients. Second, data is scarce, which led past research to treat different illness types as domains in a multi-task setup. Third, there is the need for adaptation to new or extremely rare illness types, where little or no labels are available. In contrast to previous multi-task setups, we want to investigate how to efficiently adapt to a new survival target domain from multiple survival source domains. \n For this, we introduce a new survival metric and the corresponding discrepancy measure between survival distributions. These allow us to define domain adaptation for survival analysis while incorporating censored data, which would otherwise have to be dropped. Our experiments on two cancer data sets reveal a superb performance on target domains, a better treatment recommendation, and a weight matrix with a plausible explanation.", + "primary_area": "machine learning iii", + "author": "Ammar Shaker; Carolin Lawrence", + "authorids": "", + "aff": "NEC Laboratories Europe GmbH, Heidelberg, Germany; NEC Laboratories Europe GmbH, Heidelberg, Germany", + "bibtex": "@article{Shaker_Lawrence_2023, title={Multi-Source Survival Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26165}, DOI={10.1609/aaai.v37i8.26165}, abstractNote={Survival analysis is the branch of statistics that studies the relation between the characteristics of living entities and their respective survival times, taking into account the partial information held by censored cases. A good analysis can, for example, determine whether one medical treatment for a group of patients is better than another. With the rise of machine learning, survival analysis can be modeled as learning a function that maps studied patients to their survival times. To succeed with that, there are three crucial issues to be tackled. First, some patient data is censored: we do not know the true survival times for all patients. Second, data is scarce, which led past research to treat different illness types as domains in a multi-task setup. Third, there is the need for adaptation to new or extremely rare illness types, where little or no labels are available. In contrast to previous multi-task setups, we want to investigate how to efficiently adapt to a new survival target domain from multiple survival source domains. For this, we introduce a new survival metric and the corresponding discrepancy measure between survival distributions. These allow us to define domain adaptation for survival analysis while incorporating censored data, which would otherwise have to be dropped. Our experiments on two cancer data sets reveal a superb performance on target domains, a better treatment recommendation, and a weight matrix with a plausible explanation.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shaker, Ammar and Lawrence, Carolin}, year={2023}, month={Jun.}, pages={9752-9762} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26165/25937", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26165", + "pdf_size": 330155, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5737417388599192617&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "neclab.eu;neclab.eu", + "email": "neclab.eu;neclab.eu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "NEC Laboratories Europe", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nec-labs.eu", + "aff_unique_abbr": "NEC LE", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Heidelberg", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25725", + "title": "Multi-Stage Facility Location Problems with Transient Agents", + "track": "main", + "status": "Technical", + "abstract": "We study various models for the one-dimensional multi-stage facility location problems with transient agents, where a transient agent arrives in some stage and stays for a number of consecutive stages. In the problems, we need to serve each agent in one of their stages by determining the location of the facility at each stage. In the first model, we assume there is no cost for moving the facility across the stages. We focus on optimal algorithms to minimize both the social cost objective, defined as the total distance of all agents to the facility over all stages, and the maximum cost objective, defined as the max distance of any agent to the facility over all stages. For each objective, we give a slice-wise polynomial (XP) algorithm (i.e., solvable in m^f(k) for some fixed parameter k and computable function f, where m is the input size) and show that there is a polynomial-time algorithm when a natural first-come-first-serve (FCFS) order of agent serving is enforced. We then consider the mechanism design problem, where the agents' locations and arrival stages are private, and design a group strategy-proof mechanism that achieves good approximation ratios for both objectives and settings with and without FCFS ordering. In the second model, we consider the facility's moving cost between adjacent stages under the social cost objective, which accounts for the total moving distance of the facility. Correspondingly, we design XP (and polynomial time) algorithms and a group strategy-proof mechanism for settings with or without the FCFS ordering.", + "primary_area": "game theory and economic paradigms", + "author": "Xuezhen Wang; Vincent Chau; Hau Chan; Ken C.K. Fong; Minming Li", + "authorids": "", + "aff": "Department of Computer Science, City University of Hong Kong, HKSAR China; School of Computer Science and Engineering, Southeast University, China; Department of Computer Science and Engineering, University of Nebraska-Lincoln, USA; Department of Computing and Decision Sciences, Lingnan University, HKSAR China; Department of Computer Science, City University of Hong Kong, HKSAR China", + "bibtex": "@article{Wang_Chau_Chan_Fong_Li_2023, title={Multi-Stage Facility Location Problems with Transient Agents}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25725}, DOI={10.1609/aaai.v37i5.25725}, abstractNote={We study various models for the one-dimensional multi-stage facility location problems with transient agents, where a transient agent arrives in some stage and stays for a number of consecutive stages. In the problems, we need to serve each agent in one of their stages by determining the location of the facility at each stage. In the first model, we assume there is no cost for moving the facility across the stages. We focus on optimal algorithms to minimize both the social cost objective, defined as the total distance of all agents to the facility over all stages, and the maximum cost objective, defined as the max distance of any agent to the facility over all stages. For each objective, we give a slice-wise polynomial (XP) algorithm (i.e., solvable in m^f(k) for some fixed parameter k and computable function f, where m is the input size) and show that there is a polynomial-time algorithm when a natural first-come-first-serve (FCFS) order of agent serving is enforced. We then consider the mechanism design problem, where the agents\u2019 locations and arrival stages are private, and design a group strategy-proof mechanism that achieves good approximation ratios for both objectives and settings with and without FCFS ordering. In the second model, we consider the facility\u2019s moving cost between adjacent stages under the social cost objective, which accounts for the total moving distance of the facility. Correspondingly, we design XP (and polynomial time) algorithms and a group strategy-proof mechanism for settings with or without the FCFS ordering.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Xuezhen and Chau, Vincent and Chan, Hau and Fong, Ken C.K. and Li, Minming}, year={2023}, month={Jun.}, pages={5850-5857} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25725/25497", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25725", + "pdf_size": 518695, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2131165611085724342&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;seu.edu.cn;unl.edu;ln.edu.hk;cityu.edu.hk", + "email": "gmail.com;seu.edu.cn;unl.edu;ln.edu.hk;cityu.edu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "City University of Hong Kong;Southeast University;University of Nebraska-Lincoln;Lingnan University", + "aff_unique_dep": "Department of Computer Science;School of Computer Science and Engineering;Department of Computer Science and Engineering;Department of Computing and Decision Sciences", + "aff_unique_url": "https://www.cityu.edu.hk;https://www.seu.edu.cn/;https://www.unl.edu;http://www.ln.edu.hk", + "aff_unique_abbr": "CityU;SEU;UNL;Lingnan", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Lincoln", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25389", + "title": "Multi-Stream Representation Learning for Pedestrian Trajectory Prediction", + "track": "main", + "status": "Technical", + "abstract": "Forecasting the future trajectory of pedestrians is an important task in computer vision with a range of applications, from security cameras to autonomous driving. It is very challenging because pedestrians not only move individually across time but also interact spatially, and the spatial and temporal information is deeply coupled with one another in a multi-agent scenario. Learning such complex spatio-temporal correlation is a fundamental issue in pedestrian trajectory prediction. Inspired by the procedure that the hippocampus processes and integrates spatio-temporal information to form memories, we propose a novel multi-stream representation learning module to learn complex spatio-temporal features of pedestrian trajectory. Specifically, we learn temporal, spatial and cross spatio-temporal correlation features in three respective pathways and then adaptively integrate these features with learnable weights by a gated network. Besides, we leverage the sparse attention gate to select informative interactions and correlations brought by complex spatio-temporal modeling and reduce complexity of our model. We evaluate our proposed method on two commonly used datasets, i.e. ETH-UCY and SDD, and the experimental results demonstrate our method achieves the state-of-the-art performance. Code: https://github.com/YuxuanIAIR/MSRL-master", + "primary_area": "computer vision iii", + "author": "Yuxuan Wu; Le Wang; Sanping Zhou; Jinghai Duan; Gang Hua; Wei Tang", + "authorids": "", + "aff": "Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University; Institute of Artificial Intelligence and Robotics, Xi\u2019an Jiaotong University; School of Software Engineering, Xi\u2019an Jiaotong University; Wormpex AI Research; University of Illinois at Chicago", + "bibtex": "@article{Wu_Wang_Zhou_Duan_Hua_Tang_2023, title={Multi-Stream Representation Learning for Pedestrian Trajectory Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25389}, DOI={10.1609/aaai.v37i3.25389}, abstractNote={Forecasting the future trajectory of pedestrians is an important task in computer vision with a range of applications, from security cameras to autonomous driving. It is very challenging because pedestrians not only move individually across time but also interact spatially, and the spatial and temporal information is deeply coupled with one another in a multi-agent scenario. Learning such complex spatio-temporal correlation is a fundamental issue in pedestrian trajectory prediction. Inspired by the procedure that the hippocampus processes and integrates spatio-temporal information to form memories, we propose a novel multi-stream representation learning module to learn complex spatio-temporal features of pedestrian trajectory. Specifically, we learn temporal, spatial and cross spatio-temporal correlation features in three respective pathways and then adaptively integrate these features with learnable weights by a gated network. Besides, we leverage the sparse attention gate to select informative interactions and correlations brought by complex spatio-temporal modeling and reduce complexity of our model. We evaluate our proposed method on two commonly used datasets, i.e. ETH-UCY and SDD, and the experimental results demonstrate our method achieves the state-of-the-art performance. Code: https://github.com/YuxuanIAIR/MSRL-master}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yuxuan and Wang, Le and Zhou, Sanping and Duan, Jinghai and Hua, Gang and Tang, Wei}, year={2023}, month={Jun.}, pages={2875-2882} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25389/25161", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25389", + "pdf_size": 652586, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8656701772163740764&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;xjtu.edu.cn;xjtu.edu.cn;stu.xjtu.edu.cn;gmail.com;uic.edu", + "email": "gmail.com;xjtu.edu.cn;xjtu.edu.cn;stu.xjtu.edu.cn;gmail.com;uic.edu", + "github": "https://github.com/YuxuanIAIR/MSRL-master", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "Xi'an Jiaotong University;Wormpex AI Research;University of Illinois at Chicago", + "aff_unique_dep": "Institute of Artificial Intelligence and Robotics;AI Research;", + "aff_unique_url": "http://www.xjtu.edu.cn;;https://www.uic.edu", + "aff_unique_abbr": "XJTU;Wormpex AI;UIC", + "aff_campus_unique_index": "0;0;0;0;2", + "aff_campus_unique": "Xi'an;;Chicago", + "aff_country_unique_index": "0;0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26366", + "title": "Multi-Unit Auctions for Allocating Chance-Constrained Resources", + "track": "main", + "status": "Technical", + "abstract": "Sharing scarce resources is a key challenge in multi-agent interaction, especially when individual agents are uncertain about their future consumption. We present a new auction mechanism for preallocating multi-unit resources among agents, while limiting the chance of resource violations. By planning for a chance constraint, we strike a balance between worst-case approaches, which under-utilise resources, and expected-case approaches, which lack formal guarantees. We also present an algorithm that allows agents to generate bids via multi-objective reasoning, which are then submitted to the auction. We then discuss how the auction can be extended to non-cooperative scenarios. Finally, we demonstrate empirically that our auction outperforms state-of-the-art techniques for chance-constrained multi-agent resource allocation in complex settings with up to hundreds of agents.", + "primary_area": "multiagent systems", + "author": "Anna Gautier; Bruno Lacerda; Nick Hawes; Michael Wooldridge", + "authorids": "", + "aff": "University of Oxford; University of Oxford; University of Oxford; University of Oxford", + "bibtex": "@article{Gautier_Lacerda_Hawes_Wooldridge_2023, title={Multi-Unit Auctions for Allocating Chance-Constrained Resources}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26366}, DOI={10.1609/aaai.v37i10.26366}, abstractNote={Sharing scarce resources is a key challenge in multi-agent interaction, especially when individual agents are uncertain about their future consumption. We present a new auction mechanism for preallocating multi-unit resources among agents, while limiting the chance of resource violations. By planning for a chance constraint, we strike a balance between worst-case approaches, which under-utilise resources, and expected-case approaches, which lack formal guarantees. We also present an algorithm that allows agents to generate bids via multi-objective reasoning, which are then submitted to the auction. We then discuss how the auction can be extended to non-cooperative scenarios. Finally, we demonstrate empirically that our auction outperforms state-of-the-art techniques for chance-constrained multi-agent resource allocation in complex settings with up to hundreds of agents.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gautier, Anna and Lacerda, Bruno and Hawes, Nick and Wooldridge, Michael}, year={2023}, month={Jun.}, pages={11560-11568} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26366/26138", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26366", + "pdf_size": 570933, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14263402890981836701&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "eng.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk;cs.ox.ac.uk", + "email": "eng.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26077", + "title": "Multi-View Domain Adaptive Object Detection on Camera Networks", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we study a new domain adaptation setting on camera networks, namely Multi-View Domain Adaptive Object Detection (MVDA-OD), in which labeled source data is unavailable in the target adaptation process and target data is captured from multiple overlapping cameras. In such a challenging context, existing methods including adversarial training and self-training fall short due to multi-domain data shift and the lack of source data. To tackle this problem, we propose a novel training framework consisting of two stages. First, we pre-train the backbone using self-supervised learning, in which a multi-view association is developed to construct an effective pretext task. Second, we fine-tune the detection head using robust self-training, where a tracking-based single-view augmentation is introduced to achieve weak-hard consistency learning. By doing so, an object detection model can take advantage of informative samples generated by multi-view association and single-view augmentation to learn discriminative backbones as well as robust detection classifiers. Experiments on two real-world multi-camera datasets demonstrate significant advantages of our approach over the state-of-the-art domain adaptive object detection methods.", + "primary_area": "machine learning ii", + "author": "Yan Lu; Zhun Zhong; Yuanchao Shu", + "authorids": "", + "aff": "Department of Electrical Engineering and Computer Sciences, New York University; Department of Information Engineering and Computer Science, University of Trento; College of Control Science and Engineering, Zhejiang University", + "bibtex": "@article{Lu_Zhong_Shu_2023, title={Multi-View Domain Adaptive Object Detection on Camera Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26077}, DOI={10.1609/aaai.v37i7.26077}, abstractNote={In this paper, we study a new domain adaptation setting on camera networks, namely Multi-View Domain Adaptive Object Detection (MVDA-OD), in which labeled source data is unavailable in the target adaptation process and target data is captured from multiple overlapping cameras. In such a challenging context, existing methods including adversarial training and self-training fall short due to multi-domain data shift and the lack of source data. To tackle this problem, we propose a novel training framework consisting of two stages. First, we pre-train the backbone using self-supervised learning, in which a multi-view association is developed to construct an effective pretext task. Second, we fine-tune the detection head using robust self-training, where a tracking-based single-view augmentation is introduced to achieve weak-hard consistency learning. By doing so, an object detection model can take advantage of informative samples generated by multi-view association and single-view augmentation to learn discriminative backbones as well as robust detection classifiers. Experiments on two real-world multi-camera datasets demonstrate significant advantages of our approach over the state-of-the-art domain adaptive object detection methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Yan and Zhong, Zhun and Shu, Yuanchao}, year={2023}, month={Jun.}, pages={8966-8974} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26077/25849", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26077", + "pdf_size": 13355563, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17716312617460558732&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "hotmail.com;gmail.com;zju.edu.cn", + "email": "hotmail.com;gmail.com;zju.edu.cn", + "github": "", + "project": "https://jason-cs18.github.io", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "New York University;University of Trento;Zhejiang University", + "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences;Department of Information Engineering and Computer Science;College of Control Science and Engineering", + "aff_unique_url": "https://www.nyu.edu;https://www.unitn.it;http://www.zju.edu.cn", + "aff_unique_abbr": "NYU;UniTN;ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2", + "aff_country_unique": "United States;Italy;China" + }, + { + "id": "article-25975", + "title": "Multi-View MOOC Quality Evaluation via Information-Aware Graph Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we study the problem of MOOC quality evaluation that is essential for improving the course materials, promoting students' learning efficiency, and benefiting user services. \nWhile achieving promising performances, current works still suffer from the complicated interactions and relationships of entities in MOOC platforms. \nTo tackle the challenges, we formulate the problem as a course representation learning task based, and develop an Information-aware Graph Representation Learning(IaGRL) for multi-view MOOC quality evaluation. \nSpecifically, We first build a MOOC Heterogeneous Network (HIN) to represent the interactions and relationships among entities in MOOC platforms. \nAnd then we decompose the MOOC HIN into multiple single-relation graphs based on meta-paths to depict multi-view semantics of courses. \nThe course representation learning can be further converted to a multi-view graph representation task. \nDifferent from traditional graph representation learning, the learned course representations are expected to match the following three types of validity: \n(1) the agreement on expressiveness between the raw course portfolio and the learned course representations; \n(2) the consistency between the representations in each view and the unified representations; \n(3) the alignment between the course and MOOC platform representations. \nTherefore, we propose to exploit mutual information for preserving the validity of course representations. \nWe conduct extensive experiments over real-world MOOC datasets to demonstrate the effectiveness of our proposed method.", + "primary_area": "machine learning ii", + "author": "Lu Jiang; Yibin Wang; Jianan Wang; Pengyang Wang; Minghao Yin", + "authorids": "", + "aff": "School of Computer Science and Information Technology, Northeast Normal University, China; School of Computer Science and Information Technology, Northeast Normal University, China; School of Computer Science and Information Technology, Northeast Normal University, China; Department of Computer and Information Science, University of Macau, China; School of Computer Science and Information Technology, Northeast Normal University, China + Key Laboratory of Applied Statistics of MOE, Northeast Normal University, Changchun, China", + "bibtex": "@article{Jiang_Wang_Wang_Wang_Yin_2023, title={Multi-View MOOC Quality Evaluation via Information-Aware Graph Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25975}, DOI={10.1609/aaai.v37i7.25975}, abstractNote={In this paper, we study the problem of MOOC quality evaluation that is essential for improving the course materials, promoting students\u2019 learning efficiency, and benefiting user services. While achieving promising performances, current works still suffer from the complicated interactions and relationships of entities in MOOC platforms. To tackle the challenges, we formulate the problem as a course representation learning task based, and develop an Information-aware Graph Representation Learning(IaGRL) for multi-view MOOC quality evaluation. Specifically, We first build a MOOC Heterogeneous Network (HIN) to represent the interactions and relationships among entities in MOOC platforms. And then we decompose the MOOC HIN into multiple single-relation graphs based on meta-paths to depict multi-view semantics of courses. The course representation learning can be further converted to a multi-view graph representation task. Different from traditional graph representation learning, the learned course representations are expected to match the following three types of validity: (1) the agreement on expressiveness between the raw course portfolio and the learned course representations; (2) the consistency between the representations in each view and the unified representations; (3) the alignment between the course and MOOC platform representations. Therefore, we propose to exploit mutual information for preserving the validity of course representations. We conduct extensive experiments over real-world MOOC datasets to demonstrate the effectiveness of our proposed method.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Lu and Wang, Yibin and Wang, Jianan and Wang, Pengyang and Yin, Minghao}, year={2023}, month={Jun.}, pages={8070-8077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25975/25747", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25975", + "pdf_size": 939743, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11905313054260919273&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "nenu.edu.cn;nenu.edu.cn;nenu.edu.cn;um.edu.mo;nenu.edu.cn", + "email": "nenu.edu.cn;nenu.edu.cn;nenu.edu.cn;um.edu.mo;nenu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+0", + "aff_unique_norm": "Northeast Normal University;University of Macau", + "aff_unique_dep": "School of Computer Science and Information Technology;Department of Computer and Information Science", + "aff_unique_url": "http://www.nenu.edu.cn;https://www.um.edu.mo", + "aff_unique_abbr": "NENU;UM", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Changchun", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25206", + "title": "MultiAct: Long-Term 3D Human Motion Generation from Multiple Action Labels", + "track": "main", + "status": "Technical", + "abstract": "We tackle the problem of generating long-term 3D human motion from multiple action labels. Two main previous approaches, such as action- and motion-conditioned methods, have limitations to solve this problem. The action-conditioned methods generate a sequence of motion from a single action. Hence, it cannot generate long-term motions composed of multiple actions and transitions between actions. Meanwhile, the motion-conditioned methods generate future motions from initial motion. The generated future motions only depend on the past, so they are not controllable by the user's desired actions. We present MultiAct, the first framework to generate long-term 3D human motion from multiple action labels. MultiAct takes account of both action and motion conditions with a unified recurrent generation system. It repetitively takes the previous motion and action label; then, it generates a smooth transition and the motion of the given action. As a result, MultiAct produces realistic long-term motion controlled by the given sequence of multiple action labels. The code is publicly available in https://github.com/TaeryungLee/MultiAct RELEASE.", + "primary_area": "computer vision i", + "author": "Taeryung Lee; Gyeongsik Moon; Kyoung Mu Lee", + "authorids": "", + "aff": "IPAI, Seoul National University, Korea; Meta Reality Labs Research; IPAI, Seoul National University, Korea+Dept. of ECE & ASRI, Seoul National University, Korea", + "bibtex": "@article{Lee_Moon_Lee_2023, title={MultiAct: Long-Term 3D Human Motion Generation from Multiple Action Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25206}, DOI={10.1609/aaai.v37i1.25206}, abstractNote={We tackle the problem of generating long-term 3D human motion from multiple action labels. Two main previous approaches, such as action- and motion-conditioned methods, have limitations to solve this problem. The action-conditioned methods generate a sequence of motion from a single action. Hence, it cannot generate long-term motions composed of multiple actions and transitions between actions. Meanwhile, the motion-conditioned methods generate future motions from initial motion. The generated future motions only depend on the past, so they are not controllable by the user\u2019s desired actions. We present MultiAct, the first framework to generate long-term 3D human motion from multiple action labels. MultiAct takes account of both action and motion conditions with a unified recurrent generation system. It repetitively takes the previous motion and action label; then, it generates a smooth transition and the motion of the given action. As a result, MultiAct produces realistic long-term motion controlled by the given sequence of multiple action labels. The code is publicly available in https://github.com/TaeryungLee/MultiAct RELEASE.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Taeryung and Moon, Gyeongsik and Lee, Kyoung Mu}, year={2023}, month={Jun.}, pages={1231-1239} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25206/24978", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25206", + "pdf_size": 2418701, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11555892361554001818&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "snu.ac.kr;meta.com;snu.ac.kr", + "email": "snu.ac.kr;meta.com;snu.ac.kr", + "github": "https://github.com/TaeryungLee/MultiAct", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+0", + "aff_unique_norm": "Seoul National University;Meta Reality Labs", + "aff_unique_dep": "IPAI;Research", + "aff_unique_url": "https://www.snu.ac.kr;https://www.meta.com", + "aff_unique_abbr": "SNU;MRL", + "aff_campus_unique_index": "0;0+0", + "aff_campus_unique": "Seoul;", + "aff_country_unique_index": "0;1;0+0", + "aff_country_unique": "Korea;United States" + }, + { + "id": "article-26499", + "title": "MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing", + "track": "main", + "status": "Technical", + "abstract": "Text-to-SQL semantic parsing is an important NLP task, which facilitates the interaction between users and the database. Much recent progress in text-to-SQL has been driven by large-scale datasets, but most of them are centered on English. In this work, we present MultiSpider, the largest multilingual text-to-SQL semantic parsing dataset which covers seven languages (English, German, French, Spanish, Japanese, Chinese, and Vietnamese). Upon MultiSpider we further identify the lexical and structural challenges of text-to-SQL (caused by specific language properties and dialect sayings) and their intensity across different languages. Experimental results under various settings (zero-shot, monolingual and multilingual) reveal a 6.1% absolute drop in accuracy in non-English languages. Qualitative and quantitative analyses are conducted to understand the reason for the performance drop of each language. Besides the dataset, we also propose a simple schema augmentation framework SAVe (Schema-Augmentation-with-Verification), which significantly boosts the overall performance by about 1.8% and closes the 29.5% performance gap across languages.", + "primary_area": "speech natural language processing", + "author": "Longxu Dou; Yan Gao; Mingyang Pan; Dingzirui Wang; Wanxiang Che; Dechen Zhan; Jian-Guang Lou", + "authorids": "", + "aff": "Harbin Institute of Technology; Microsoft Research Asia; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; Microsoft Research Asia", + "bibtex": "@article{Dou_Gao_Pan_Wang_Che_Zhan_Lou_2023, title={MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26499}, DOI={10.1609/aaai.v37i11.26499}, abstractNote={Text-to-SQL semantic parsing is an important NLP task, which facilitates the interaction between users and the database. Much recent progress in text-to-SQL has been driven by large-scale datasets, but most of them are centered on English. In this work, we present MultiSpider, the largest multilingual text-to-SQL semantic parsing dataset which covers seven languages (English, German, French, Spanish, Japanese, Chinese, and Vietnamese). Upon MultiSpider we further identify the lexical and structural challenges of text-to-SQL (caused by specific language properties and dialect sayings) and their intensity across different languages. Experimental results under various settings (zero-shot, monolingual and multilingual) reveal a 6.1% absolute drop in accuracy in non-English languages. Qualitative and quantitative analyses are conducted to understand the reason for the performance drop of each language. Besides the dataset, we also propose a simple schema augmentation framework SAVe (Schema-Augmentation-with-Verification), which significantly boosts the overall performance by about 1.8% and closes the 29.5% performance gap across languages.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dou, Longxu and Gao, Yan and Pan, Mingyang and Wang, Dingzirui and Che, Wanxiang and Zhan, Dechen and Lou, Jian-Guang}, year={2023}, month={Jun.}, pages={12745-12753} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26499/26271", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26499", + "pdf_size": 1039789, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10429790401648419835&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff_domain": "ir.hit.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;microsoft.com", + "email": "ir.hit.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;microsoft.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;0;0;1", + "aff_unique_norm": "Harbin Institute of Technology;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "HIT;MSR Asia", + "aff_campus_unique_index": "0;1;0;0;0;0;1", + "aff_campus_unique": "Harbin;Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25711", + "title": "Multiagent MST Cover: Pleasing All Optimally via a Simple Voting Rule", + "track": "main", + "status": "Technical", + "abstract": "Given a connected graph on whose edges we can build roads to connect the nodes, a number of agents hold possibly different perspectives on which edges should be selected by assigning different edge weights. Our task is to build a minimum number of roads so that every agent has a spanning tree in the built subgraph whose weight is the same as a minimum spanning tree in the original graph. We first show that this problem is NP-hard and does not admit better than ((1-o(1)) ln k)-approximation polynomial-time algorithms unless P = NP, where k is the number of agents. We then give a simple voting algorithm with an optimal approximation ratio. Moreover, our algorithm only needs to access the agents' rankings on the edges. Finally, we extend our problem to submodular objective functions and Matroid rank constraints.", + "primary_area": "game theory and economic paradigms", + "author": "Bo Li; Xiaowei Wu; Chenyang Xu; Ruilong Zhang", + "authorids": "", + "aff": "Department of Computing, The Hong Kong Polytechnic University; IOTSC, University of Macau; Software Engineering Institute, East China Normal University + College of Computer Science, Zhejiang University; Department of Computer Science, City University of Hong Kong", + "bibtex": "@article{Li_Wu_Xu_Zhang_2023, title={Multiagent MST Cover: Pleasing All Optimally via a Simple Voting Rule}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25711}, DOI={10.1609/aaai.v37i5.25711}, abstractNote={Given a connected graph on whose edges we can build roads to connect the nodes, a number of agents hold possibly different perspectives on which edges should be selected by assigning different edge weights. Our task is to build a minimum number of roads so that every agent has a spanning tree in the built subgraph whose weight is the same as a minimum spanning tree in the original graph. We first show that this problem is NP-hard and does not admit better than ((1-o(1)) ln k)-approximation polynomial-time algorithms unless P = NP, where k is the number of agents. We then give a simple voting algorithm with an optimal approximation ratio. Moreover, our algorithm only needs to access the agents\u2019 rankings on the edges. Finally, we extend our problem to submodular objective functions and Matroid rank constraints.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Bo and Wu, Xiaowei and Xu, Chenyang and Zhang, Ruilong}, year={2023}, month={Jun.}, pages={5730-5738} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25711/25483", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25711", + "pdf_size": 183296, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:2xcaSxpElwsJ:scholar.google.com/&scioq=Multiagent+MST+Cover:+Pleasing+All+Optimally+via+a+Simple+Voting+Rule&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff_domain": "polyu.edu.hk;um.edu.mo;zju.edu.cn;my.cityu.edu.hk", + "email": "polyu.edu.hk;um.edu.mo;zju.edu.cn;my.cityu.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2+3;4", + "aff_unique_norm": "The Hong Kong Polytechnic University;University of Macau;East China Normal University;Zhejiang University;City University of Hong Kong", + "aff_unique_dep": "Department of Computing;IOTSC;Software Engineering Institute;College of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.um.edu.mo;http://www.ecnu.edu.cn;http://www.zju.edu.cn;https://www.cityu.edu.hk", + "aff_unique_abbr": "PolyU;UM;ECNU;ZJU;CityU", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Hong Kong;;Shanghai", + "aff_country_unique_index": "0;1;0+0;0", + "aff_country_unique": "China;Macau" + }, + { + "id": "article-26924", + "title": "Multimodal Deep Generative Models for Remote Medical Applications", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Visible-to-Thermal (VT) face translation is an under-studied problem of image-to-image translation that offers an AI-enabled alternative to traditional thermal sensors. Over three phases, my Doctoral Proposal explores developing multimodal deep generative solutions that can be applied towards telemedicine applications. These include the contribution of a novel Thermal Face Contrastive GAN (TFC-GAN), exploration of hybridized diffusion-GAN models, application on real clinical thermal data at the National Institutes of Health, and exploration of strategies for Federated Learning (FL) in heterogenous data settings.", + "primary_area": "", + "author": "Catherine Ordun", + "authorids": "", + "aff": "University of Maryland Baltimore County, Dept. Information Systems", + "bibtex": "@article{Ordun_2024, title={Multimodal Deep Generative Models for Remote Medical Applications}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26924}, DOI={10.1609/aaai.v37i13.26924}, abstractNote={Visible-to-Thermal (VT) face translation is an under-studied problem of image-to-image translation that offers an AI-enabled alternative to traditional thermal sensors. Over three phases, my Doctoral Proposal explores developing multimodal deep generative solutions that can be applied towards telemedicine applications. These include the contribution of a novel Thermal Face Contrastive GAN (TFC-GAN), exploration of hybridized diffusion-GAN models, application on real clinical thermal data at the National Institutes of Health, and exploration of strategies for Federated Learning (FL) in heterogenous data settings.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ordun, Catherine}, year={2024}, month={Jul.}, pages={16127-16128} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26924/26696", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26924", + "pdf_size": 1168702, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11186861958056482205&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "umbc.edu", + "email": "umbc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Maryland, Baltimore County", + "aff_unique_dep": "Department of Information Systems", + "aff_unique_url": "https://www.umbc.edu", + "aff_unique_abbr": "UMBC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Baltimore County", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26792", + "title": "Multimodal Propaganda Processing", + "track": "senior member presentation blue sky papers", + "status": "Technical", + "abstract": "Propaganda campaigns have long been used to influence public opinion via disseminating biased and/or misleading information. Despite the increasing prevalence of propaganda content on the Internet, few attempts have been made by AI researchers to analyze such content. We introduce the task of multimodal propaganda processing, where the goal is to automatically analyze propaganda content. We believe that this task presents a long-term challenge to AI researchers and that successful processing of propaganda could bring machine understanding one important step closer to human understanding. We discuss the technical challenges associated with this task and outline the steps that need to be taken to address it.", + "primary_area": "", + "author": "Vincent Ng; Shengjie Li", + "authorids": "", + "aff": "Human Language Technology Research Institute, University of Texas at Dallas; Human Language Technology Research Institute, University of Texas at Dallas", + "bibtex": "@article{Ng_Li_2024, title={Multimodal Propaganda Processing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26792}, DOI={10.1609/aaai.v37i13.26792}, abstractNote={Propaganda campaigns have long been used to influence public opinion via disseminating biased and/or misleading information. Despite the increasing prevalence of propaganda content on the Internet, few attempts have been made by AI researchers to analyze such content. We introduce the task of multimodal propaganda processing, where the goal is to automatically analyze propaganda content. We believe that this task presents a long-term challenge to AI researchers and that successful processing of propaganda could bring machine understanding one important step closer to human understanding. We discuss the technical challenges associated with this task and outline the steps that need to be taken to address it.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ng, Vincent and Li, Shengjie}, year={2024}, month={Jul.}, pages={15368-15375} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26792/26564", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26792", + "pdf_size": 4431885, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3995787718597290100&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "hlt.utdallas.edu;hlt.utdallas.edu", + "email": "hlt.utdallas.edu;hlt.utdallas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Texas at Dallas", + "aff_unique_dep": "Human Language Technology Research Institute", + "aff_unique_url": "https://www.utdallas.edu", + "aff_unique_abbr": "UT Dallas", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Dallas", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25562", + "title": "Multiple Robust Learning for Recommendation", + "track": "main", + "status": "Technical", + "abstract": "In recommender systems, a common problem is the presence of various biases in the collected data, which deteriorates the generalization ability of the recommendation models and leads to inaccurate predictions. Doubly robust (DR) learning has been studied in many tasks in RS, with the advantage that unbiased learning can be achieved when either a single imputation or a single propensity model is accurate. In this paper, we propose a multiple robust (MR) estimator that can take the advantage of multiple candidate imputation and propensity models to achieve unbiasedness. Specifically, the MR estimator is unbiased when any of the imputation or propensity models, or a linear combination of these models is accurate. Theoretical analysis shows that the proposed MR is an enhanced version of DR when only having a single imputation and propensity model, and has a smaller bias. Inspired by the generalization error bound of MR, we further propose a novel multiple robust learning approach with stabilization. We conduct extensive experiments on real-world and semi-synthetic datasets, which demonstrates the superiority of the proposed approach over state-of-the-art methods.", + "primary_area": "data mining and knowledge management", + "author": "Haoxuan Li; Quanyu Dai; Yuru Li; Yan Lyu; Zhenhua Dong; Xiao-Hua Zhou; Peng Wu", + "authorids": "", + "aff": "Peking University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Peking University; Huawei Noah\u2019s Ark Lab; Peking University+Beijing Technology and Business University; Beijing Technology and Business University", + "bibtex": "@article{Li_Dai_Li_Lyu_Dong_Zhou_Wu_2023, title={Multiple Robust Learning for Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25562}, DOI={10.1609/aaai.v37i4.25562}, abstractNote={In recommender systems, a common problem is the presence of various biases in the collected data, which deteriorates the generalization ability of the recommendation models and leads to inaccurate predictions. Doubly robust (DR) learning has been studied in many tasks in RS, with the advantage that unbiased learning can be achieved when either a single imputation or a single propensity model is accurate. In this paper, we propose a multiple robust (MR) estimator that can take the advantage of multiple candidate imputation and propensity models to achieve unbiasedness. Specifically, the MR estimator is unbiased when any of the imputation or propensity models, or a linear combination of these models is accurate. Theoretical analysis shows that the proposed MR is an enhanced version of DR when only having a single imputation and propensity model, and has a smaller bias. Inspired by the generalization error bound of MR, we further propose a novel multiple robust learning approach with stabilization. We conduct extensive experiments on real-world and semi-synthetic datasets, which demonstrates the superiority of the proposed approach over state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Haoxuan and Dai, Quanyu and Li, Yuru and Lyu, Yan and Dong, Zhenhua and Zhou, Xiao-Hua and Wu, Peng}, year={2023}, month={Jun.}, pages={4417-4425} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25562/25334", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25562", + "pdf_size": 209243, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5861448839445232266&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.pku.edu.cn;huawei.com;huawei.com;stu.pku.edu.cn;huawei.com;bicmr.pku.edu.cn;btbu.edu.cn", + "email": "stu.pku.edu.cn;huawei.com;huawei.com;stu.pku.edu.cn;huawei.com;bicmr.pku.edu.cn;btbu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;0;1;0+2;2", + "aff_unique_norm": "Peking University;Huawei;Beijing Technology and Business University", + "aff_unique_dep": ";Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.huawei.com;http://www.btbu.edu.cn", + "aff_unique_abbr": "Peking U;Huawei;BTBU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26105", + "title": "Multiplex Graph Representation Learning via Common and Private Information Mining", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised multiplex graph representation learning (SMGRL) has attracted increasing interest, but previous SMGRL methods still suffer from the following issues: (i) they focus on the common information only (but ignore the private information in graph structures) to lose some essential characteristics related to downstream tasks, and (ii) they ignore the redundant information in node representations of each graph. To solve these issues, this paper proposes a new SMGRL method by jointly mining the common information and the private information in the multiplex graph while minimizing the redundant information within node representations. Specifically, the proposed method investigates the decorrelation losses to extract the common information and minimize the redundant information, while investigating the reconstruction losses to maintain the private information. Comprehensive experimental results verify the superiority of the proposed method, on four public benchmark datasets.", + "primary_area": "machine learning iii", + "author": "Yujie Mo; Zongqian Wu; Yuhuan Chen; Xiaoshuang Shi; Heng Tao Shen; Xiaofeng Zhu", + "authorids": "", + "aff": "1School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China + 3Peng Cheng Laboratory, Shenzhen 518000, China; 4Guangxi Key Lab of Multi-Source Information Mining and Security, Guangxi Normal University, Guilin 541004, China; 1School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China; 1School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China; 1School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China + 2Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen 518000, China + 3Peng Cheng Laboratory, Shenzhen 518000, China; 1School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China + 2Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen 518000, China", + "bibtex": "@article{Mo_Wu_Chen_Shi_Shen_Zhu_2023, title={Multiplex Graph Representation Learning via Common and Private Information Mining}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26105}, DOI={10.1609/aaai.v37i8.26105}, abstractNote={Self-supervised multiplex graph representation learning (SMGRL) has attracted increasing interest, but previous SMGRL methods still suffer from the following issues: (i) they focus on the common information only (but ignore the private information in graph structures) to lose some essential characteristics related to downstream tasks, and (ii) they ignore the redundant information in node representations of each graph. To solve these issues, this paper proposes a new SMGRL method by jointly mining the common information and the private information in the multiplex graph while minimizing the redundant information within node representations. Specifically, the proposed method investigates the decorrelation losses to extract the common information and minimize the redundant information, while investigating the reconstruction losses to maintain the private information. Comprehensive experimental results verify the superiority of the proposed method, on four public benchmark datasets.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mo, Yujie and Wu, Zongqian and Chen, Yuhuan and Shi, Xiaoshuang and Shen, Heng Tao and Zhu, Xiaofeng}, year={2023}, month={Jun.}, pages={9217-9225} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26105/25877", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26105", + "pdf_size": 496324, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2866849059571047737&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;0;0;0+0+1;0+0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Peng Cheng Laboratory;Guangxi Normal University", + "aff_unique_dep": "School of Computer Science and Engineering;;Guangxi Key Lab of Multi-Source Information Mining and Security", + "aff_unique_url": "http://www.uestc.edu.cn;;", + "aff_unique_abbr": "UESTC;;", + "aff_campus_unique_index": "0+1;2;0;0;0+1+1;0+1", + "aff_campus_unique": "Chengdu;Shenzhen;Guilin", + "aff_country_unique_index": "0+0;0;0;0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25197", + "title": "Multispectral Invisible Coating: Laminated Visible-Thermal Physical Attack against Multispectral Object Detectors Using Transparent Low-E Films", + "track": "main", + "status": "Technical", + "abstract": "Multispectral object detection plays a vital role in safety-critical vision systems that require an around-the-clock operation and encounter dynamic real-world situations(e.g., self-driving cars and autonomous surveillance systems). Despite its crucial competence in safety-related applications, its security against physical attacks is severely understudied. We investigate the vulnerability of multispectral detectors against physical attacks by proposing a new physical method: Multispectral Invisible Coating. Utilizing transparent Low-e films, we realize a laminated visible-thermal physical attack by attaching Low-e films over a visible attack printing. Moreover, we apply our physical method to manufacture a Multispectral Invisible Suit that hides persons from the multiple view angles of Multispectral detectors. To simulate our attack under various surveillance scenes, we constructed a large-scale multispectral pedestrian dataset which we will release in public. Extensive experiments show that our proposed method effectively attacks the state-of-the-art multispectral detector both in the digital space and the physical world.", + "primary_area": "computer vision i", + "author": "Taeheon Kim; Youngjoon Yu; Yong Man Ro", + "authorids": "", + "aff": "Image and Video Systems Lab, KAIST, South Korea; Image and Video Systems Lab, KAIST, South Korea; Image and Video Systems Lab, KAIST, South Korea", + "bibtex": "@article{Kim_Yu_Ro_2023, title={Multispectral Invisible Coating: Laminated Visible-Thermal Physical Attack against Multispectral Object Detectors Using Transparent Low-E Films}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25197}, DOI={10.1609/aaai.v37i1.25197}, abstractNote={Multispectral object detection plays a vital role in safety-critical vision systems that require an around-the-clock operation and encounter dynamic real-world situations(e.g., self-driving cars and autonomous surveillance systems). Despite its crucial competence in safety-related applications, its security against physical attacks is severely understudied. We investigate the vulnerability of multispectral detectors against physical attacks by proposing a new physical method: Multispectral Invisible Coating. Utilizing transparent Low-e films, we realize a laminated visible-thermal physical attack by attaching Low-e films over a visible attack printing. Moreover, we apply our physical method to manufacture a Multispectral Invisible Suit that hides persons from the multiple view angles of Multispectral detectors. To simulate our attack under various surveillance scenes, we constructed a large-scale multispectral pedestrian dataset which we will release in public. Extensive experiments show that our proposed method effectively attacks the state-of-the-art multispectral detector both in the digital space and the physical world.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Taeheon and Yu, Youngjoon and Ro, Yong Man}, year={2023}, month={Jun.}, pages={1151-1159} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25197/24969", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25197", + "pdf_size": 2990731, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8811704811344420781&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "Image and Video Systems Lab", + "aff_unique_url": "https://www.kaist.edu", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25687", + "title": "Multiwinner Voting with Possibly Unavailable Candidates", + "track": "main", + "status": "Technical", + "abstract": "Selecting a committee that meets diversity and proportionality criteria is a challenging endeavor that has been studied extensively in recent years. This task becomes even more challenging when some of the selected candidates decline the invitation to join the committee. Since the unavailability of one candidate may impact the rest of the selection, inviting all candidates at the same time may lead to a suboptimal committee. Instead, invitations should be sequential and conditional on which candidates invited so far accepted the invitation: the solution to the committee selection problem is a query policy. If invitation queries are binding, they should be safe: one should not query a candidate without being sure that whatever the set of available candidates possible at that stage, her inclusion will not jeopardize committee optimality. Assuming approval-based inputs, we characterize the set of rules for which a safe query exists at every stage. In order to parallelize the invitation process, we investigate the computation of safe parallel queries, and show that it is often hard. We also study the existence of safe parallel queries with respect to proportionality axioms such as extended justified representation.", + "primary_area": "game theory and economic paradigms", + "author": "Markus Brill; Hayrullah Dindar; Jonas Israel; J\u00e9r\u00f4me Lang; Jannik Peters; Ulrike Schmidt-Kraepelin", + "authorids": "", + "aff": "Research Group Efficient Algorithms, Technische Universit\u00e4t Berlin, Germany+Department of Computer Science, University of Warwick, Coventry, UK; Research Group Efficient Algorithms, Technische Universit\u00e4t Berlin, Germany; Research Group Efficient Algorithms, Technische Universit\u00e4t Berlin, Germany; CNRS, Universit\u00e9 Paris-Dauphine, PSL, LAMSADE, France; Research Group Efficient Algorithms, Technische Universit\u00e4t Berlin, Germany; Research Group Efficient Algorithms, Technische Universit\u00e4t Berlin, Germany", + "bibtex": "@article{Brill_Dindar_Israel_Lang_Peters_Schmidt-Kraepelin_2023, title={Multiwinner Voting with Possibly Unavailable Candidates}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25687}, DOI={10.1609/aaai.v37i5.25687}, abstractNote={Selecting a committee that meets diversity and proportionality criteria is a challenging endeavor that has been studied extensively in recent years. This task becomes even more challenging when some of the selected candidates decline the invitation to join the committee. Since the unavailability of one candidate may impact the rest of the selection, inviting all candidates at the same time may lead to a suboptimal committee. Instead, invitations should be sequential and conditional on which candidates invited so far accepted the invitation: the solution to the committee selection problem is a query policy. If invitation queries are binding, they should be safe: one should not query a candidate without being sure that whatever the set of available candidates possible at that stage, her inclusion will not jeopardize committee optimality. Assuming approval-based inputs, we characterize the set of rules for which a safe query exists at every stage. In order to parallelize the invitation process, we investigate the computation of safe parallel queries, and show that it is often hard. We also study the existence of safe parallel queries with respect to proportionality axioms such as extended justified representation.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brill, Markus and Dindar, Hayrullah and Israel, Jonas and Lang, J\u00e9r\u00f4me and Peters, Jannik and Schmidt-Kraepelin, Ulrike}, year={2023}, month={Jun.}, pages={5532-5539} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25687/25459", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25687", + "pdf_size": 232291, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9860811781511226110&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "warwick.ac.uk;tu-berlin.de;tu-berlin.de;lamsade.dauphine.fr;tu-berlin.de;tu-berlin.de", + "email": "warwick.ac.uk;tu-berlin.de;tu-berlin.de;lamsade.dauphine.fr;tu-berlin.de;tu-berlin.de", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;2;0;0", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;University of Warwick;CNRS", + "aff_unique_dep": "Research Group Efficient Algorithms;Department of Computer Science;", + "aff_unique_url": "https://www.tu-berlin.de;https://warwick.ac.uk;https://www.cnrs.fr", + "aff_unique_abbr": "TU Berlin;Warwick;CNRS", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Coventry", + "aff_country_unique_index": "0+1;0;0;2;0;0", + "aff_country_unique": "Germany;United Kingdom;France" + }, + { + "id": "article-26912", + "title": "Music-to-Facial Expressions: Emotion-Based Music Visualization for the Hearing Impaired", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "While music is made to convey messages and emotions, auditory music is not equally accessible to everyone. Music visualization is a common approach to augment the listening experiences of the hearing users and to provide music experiences for the hearing-impaired. In this paper, we present a music visualization system that can turn the input of a piece of music into a series of facial expressions representative of the continuously changing sentiments in the music. The resulting facial expressions, recorded as action units, can later animate a static virtual avatar to be emotive synchronously with the music.", + "primary_area": "", + "author": "Yubo Wang; Fengzhou Pan; Danni Liu; Jiaxiong Hu", + "authorids": "", + "aff": "Washington University in St. Louis; Washington University in St. Louis; Washington University in St. Louis; Tsinghua University", + "bibtex": "@article{Wang_Pan_Liu_Hu_2024, title={Music-to-Facial Expressions: Emotion-Based Music Visualization for the Hearing Impaired}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26912}, DOI={10.1609/aaai.v37i13.26912}, abstractNote={While music is made to convey messages and emotions, auditory music is not equally accessible to everyone. Music visualization is a common approach to augment the listening experiences of the hearing users and to provide music experiences for the hearing-impaired. In this paper, we present a music visualization system that can turn the input of a piece of music into a series of facial expressions representative of the continuously changing sentiments in the music. The resulting facial expressions, recorded as action units, can later animate a static virtual avatar to be emotive synchronously with the music.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yubo and Pan, Fengzhou and Liu, Danni and Hu, Jiaxiong}, year={2024}, month={Jul.}, pages={16096-16102} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26912/26684", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26912", + "pdf_size": 800854, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6895912528877995715&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "wustl.edu;wustl.edu;wustl.edu;tsinghua.org.cn", + "email": "wustl.edu;wustl.edu;wustl.edu;tsinghua.org.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Washington University in St. Louis;Tsinghua University", + "aff_unique_dep": ";", + "aff_unique_url": "https://wustl.edu;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "WashU;THU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "St. Louis;", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26138", + "title": "Mutual-Enhanced Incongruity Learning Network for Multi-Modal Sarcasm Detection", + "track": "main", + "status": "Technical", + "abstract": "Sarcasm is a sophisticated linguistic phenomenon that is prevalent on today's social media platforms. Multi-modal sarcasm detection aims to identify whether a given sample with multi-modal information (i.e., text and image) is sarcastic. This task's key lies in capturing both inter- and intra-modal incongruities within the same context. Although existing methods have achieved compelling success, they are disturbed by irrelevant information extracted from the whole image and text, or overlooking some important information due to the incomplete input. To address these limitations, we propose a Mutual-enhanced Incongruity Learning Network for multi-modal sarcasm detection, named MILNet. In particular, we design a local semantic-guided incongruity learning module and a global incongruity learning module. Moreover, we introduce a mutual enhancement module to take advantage of the underlying consistency between the two modules to boost the performance. Extensive experiments on a widely-used dataset demonstrate the superiority of our model over cutting-edge methods.", + "primary_area": "machine learning iii", + "author": "Yang Qiao; Liqiang Jing; Xuemeng Song; Xiaolin Chen; Lei Zhu; Liqiang Nie", + "authorids": "", + "aff": "School of Computer Science and Technology, Shandong University, Qingdao, China; School of Computer Science and Technology, Shandong University, Qingdao, China; School of Computer Science and Technology, Shandong University, Qingdao, China + School of Software, Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China; School of Information Science and Engineering, Shandong Normal University, Jinan, China; School of Computer Science and Technology, Harbin Institute of Technology (Shenzhen campus), Shenzhen, China", + "bibtex": "@article{Qiao_Jing_Song_Chen_Zhu_Nie_2023, title={Mutual-Enhanced Incongruity Learning Network for Multi-Modal Sarcasm Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26138}, DOI={10.1609/aaai.v37i8.26138}, abstractNote={Sarcasm is a sophisticated linguistic phenomenon that is prevalent on today\u2019s social media platforms. Multi-modal sarcasm detection aims to identify whether a given sample with multi-modal information (i.e., text and image) is sarcastic. This task\u2019s key lies in capturing both inter- and intra-modal incongruities within the same context. Although existing methods have achieved compelling success, they are disturbed by irrelevant information extracted from the whole image and text, or overlooking some important information due to the incomplete input. To address these limitations, we propose a Mutual-enhanced Incongruity Learning Network for multi-modal sarcasm detection, named MILNet. In particular, we design a local semantic-guided incongruity learning module and a global incongruity learning module. Moreover, we introduce a mutual enhancement module to take advantage of the underlying consistency between the two modules to boost the performance. Extensive experiments on a widely-used dataset demonstrate the superiority of our model over cutting-edge methods.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qiao, Yang and Jing, Liqiang and Song, Xuemeng and Chen, Xiaolin and Zhu, Lei and Nie, Liqiang}, year={2023}, month={Jun.}, pages={9507-9515} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26138/25910", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26138", + "pdf_size": 3008206, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1474763979225404320&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.sdu.edu.cn;gmial.com;gmial.com;gmial.com;gmial.com;gmial.com", + "email": "mail.sdu.edu.cn;gmial.com;gmial.com;gmial.com;gmial.com;gmial.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+0;0;1;2", + "aff_unique_norm": "Shandong University;Shandong Normal University;Harbin Institute of Technology", + "aff_unique_dep": "School of Computer Science and Technology;School of Information Science and Engineering;School of Computer Science and Technology", + "aff_unique_url": "http://www.sdu.edu.cn;http://www.sdu.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "SDU;SDU;HIT", + "aff_campus_unique_index": "0;0;0+1;1;1;2", + "aff_campus_unique": "Qingdao;Jinan;Shenzhen", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25448", + "title": "Mx2M: Masked Cross-Modality Modeling in Domain Adaptation for 3D Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Existing methods of cross-modal domain adaptation for 3D semantic segmentation predict results only via 2D-3D complementarity that is obtained by cross-modal feature matching. However, as lacking supervision in the target domain, the complementarity is not always reliable. The results are not ideal when the domain gap is large. To solve the problem of lacking supervision, we introduce masked modeling into this task and propose a method Mx2M, which utilizes masked cross-modality modeling to reduce the large domain gap. Our Mx2M contains two components. One is the core solution, cross-modal removal and prediction (xMRP), which makes the Mx2M adapt to various scenarios and provides cross-modal self-supervision. The other is a new way of cross-modal feature matching, the dynamic cross-modal filter (DxMF) that ensures the whole method dynamically uses more suitable 2D-3D complementarity. Evaluation of the Mx2M on three DA scenarios, including Day/Night, USA/Singapore, and A2D2/SemanticKITTI, brings large improvements over previous methods on many metrics.", + "primary_area": "computer vision iii", + "author": "Boxiang Zhang; Zunran Wang; Yonggen Ling; Yuanyuan Guan; Shenghao Zhang; Wenhui Li", + "authorids": "", + "aff": "College of Computer Science and Technology, Jilin University, Changchun, China+Key Laboratory of Symbolic Computation and Knowledge Engineer, Jilin University, Changchun, China; Robotics X, Tencent, Shenzhen, China; Robotics X, Tencent, Shenzhen, China; College of Computer Science and Technology, Jilin University, Changchun, China+Key Laboratory of Symbolic Computation and Knowledge Engineer, Jilin University, Changchun, China; Robotics X, Tencent, Shenzhen, China; College of Computer Science and Technology, Jilin University, Changchun, China+Key Laboratory of Symbolic Computation and Knowledge Engineer, Jilin University, Changchun, China", + "bibtex": "@article{Zhang_Wang_Ling_Guan_Zhang_Li_2023, title={Mx2M: Masked Cross-Modality Modeling in Domain Adaptation for 3D Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25448}, DOI={10.1609/aaai.v37i3.25448}, abstractNote={Existing methods of cross-modal domain adaptation for 3D semantic segmentation predict results only via 2D-3D complementarity that is obtained by cross-modal feature matching. However, as lacking supervision in the target domain, the complementarity is not always reliable. The results are not ideal when the domain gap is large. To solve the problem of lacking supervision, we introduce masked modeling into this task and propose a method Mx2M, which utilizes masked cross-modality modeling to reduce the large domain gap. Our Mx2M contains two components. One is the core solution, cross-modal removal and prediction (xMRP), which makes the Mx2M adapt to various scenarios and provides cross-modal self-supervision. The other is a new way of cross-modal feature matching, the dynamic cross-modal filter (DxMF) that ensures the whole method dynamically uses more suitable 2D-3D complementarity. Evaluation of the Mx2M on three DA scenarios, including Day/Night, USA/Singapore, and A2D2/SemanticKITTI, brings large improvements over previous methods on many metrics.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Boxiang and Wang, Zunran and Ling, Yonggen and Guan, Yuanyuan and Zhang, Shenghao and Li, Wenhui}, year={2023}, month={Jun.}, pages={3401-3409} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25448/25220", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25448", + "pdf_size": 2179791, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8083509389881730062&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mails.jlu.edu.cn;outlook.com; ;jlu.edu.cn; ;jlu.edu.cn", + "email": "mails.jlu.edu.cn;outlook.com; ;jlu.edu.cn; ;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1;1;0+0;1;0+0", + "aff_unique_norm": "Jilin University;Tencent", + "aff_unique_dep": "College of Computer Science and Technology;Robotics X", + "aff_unique_url": "http://www.jlu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "JLU;Tencent", + "aff_campus_unique_index": "0+0;1;1;0+0;1;0+0", + "aff_campus_unique": "Changchun;Shenzhen", + "aff_country_unique_index": "0+0;0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25949", + "title": "NAS-LID: Efficient Neural Architecture Search with Local Intrinsic Dimension", + "track": "main", + "status": "Technical", + "abstract": "One-shot neural architecture search (NAS) substantially improves the search efficiency by training one supernet to estimate the performance of every possible child architecture (i.e., subnet). However, the inconsistency of characteristics among subnets incurs serious interference in the optimization, resulting in poor performance ranking correlation of subnets. Subsequent explorations decompose supernet weights via a particular criterion, e.g., gradient matching, to reduce the interference; yet they suffer from huge computational cost and low space separability. In this work, we propose a lightweight and effective local intrinsic dimension (LID)-based method NAS-LID. NAS-LID evaluates the geometrical properties of architectures by calculating the low-cost LID features layer-by-layer, and the similarity characterized by LID enjoys better separability compared with gradients, which thus effectively reduces the interference among subnets. Extensive experiments on NASBench-201 indicate that NAS-LID achieves superior performance with better efficiency. Specifically, compared to the gradient-driven method, NAS-LID can save up to 86% of GPU memory overhead when searching on NASBench-201. We also demonstrate the effectiveness of NAS-LID on ProxylessNAS and OFA spaces. Source code:https://github.com/marsggbo/NAS-LID.", + "primary_area": "machine learning i", + "author": "Xin He; Jiangchao Yao; Yuxin Wang; Zhenheng Tang; Ka Chun Cheung; Simon See; Bo Han; Xiaowen Chu", + "authorids": "", + "aff": "Hong Kong Baptist University; Shanghai Jiao Tong University; Hong Kong Baptist University; Hong Kong Baptist University; Hong Kong Baptist University + NVIDIA AI Tech Center; Shanghai Jiao Tong University + Shanghai AI Laboratory + NVIDIA AI Tech Center + Mahindra University + Coventry University; Hong Kong Baptist University; The Hong Kong University of Science and Technology (Guangzhou) + Hong Kong Baptist University", + "bibtex": "@article{He_Yao_Wang_Tang_Cheung_See_Han_Chu_2023, title={NAS-LID: Efficient Neural Architecture Search with Local Intrinsic Dimension}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25949}, DOI={10.1609/aaai.v37i6.25949}, abstractNote={One-shot neural architecture search (NAS) substantially improves the search efficiency by training one supernet to estimate the performance of every possible child architecture (i.e., subnet). However, the inconsistency of characteristics among subnets incurs serious interference in the optimization, resulting in poor performance ranking correlation of subnets. Subsequent explorations decompose supernet weights via a particular criterion, e.g., gradient matching, to reduce the interference; yet they suffer from huge computational cost and low space separability. In this work, we propose a lightweight and effective local intrinsic dimension (LID)-based method NAS-LID. NAS-LID evaluates the geometrical properties of architectures by calculating the low-cost LID features layer-by-layer, and the similarity characterized by LID enjoys better separability compared with gradients, which thus effectively reduces the interference among subnets. Extensive experiments on NASBench-201 indicate that NAS-LID achieves superior performance with better efficiency. Specifically, compared to the gradient-driven method, NAS-LID can save up to 86% of GPU memory overhead when searching on NASBench-201. We also demonstrate the effectiveness of NAS-LID on ProxylessNAS and OFA spaces. Source code:https://github.com/marsggbo/NAS-LID.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Xin and Yao, Jiangchao and Wang, Yuxin and Tang, Zhenheng and Cheung, Ka Chun and See, Simon and Han, Bo and Chu, Xiaowen}, year={2023}, month={Jun.}, pages={7839-7847} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25949/25721", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25949", + "pdf_size": 355439, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12396151821604708808&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hkbu.edu.hk;sjtu.edu.cn;comp.hkbu.edu.hk;comp.hkbu.edu.hk;hkbu.edu.hk;nvidia.com;hkbu.edu.hk;ust.hk", + "email": "hkbu.edu.hk;sjtu.edu.cn;comp.hkbu.edu.hk;comp.hkbu.edu.hk;hkbu.edu.hk;nvidia.com;hkbu.edu.hk;ust.hk", + "github": "https://github.com/marsggbo/NAS-LID", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0+2;1+3+2+4+5;0;6+0", + "aff_unique_norm": "Hong Kong Baptist University;Shanghai Jiao Tong University;NVIDIA;Shanghai AI Laboratory;Mahindra University;Coventry University;The Hong Kong University of Science and Technology", + "aff_unique_dep": ";;NVIDIA AI Tech Center;;;;", + "aff_unique_url": "https://www.hkbu.edu.hk;https://www.sjtu.edu.cn;https://www.nvidia.com;https://www.shanghai-ai-lab.com;https://www.mahindrauniversity.edu.in;https://www.coventry.ac.uk;https://www.ust.hk", + "aff_unique_abbr": "HKBU;SJTU;NVIDIA;SAIL;MU;CU;HKUST", + "aff_campus_unique_index": ";;1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;0;0;0+1;0+0+1+2+3;0;0+0", + "aff_country_unique": "China;United States;India;United Kingdom" + }, + { + "id": "article-27074", + "title": "NCTV: Neural Clamping Toolkit and Visualization for Neural Network Calibration", + "track": "demonstrations", + "status": "Technical", + "abstract": "With the advancement of deep learning technology, neural networks have demonstrated their excellent ability to provide accurate predictions in many tasks. However, a lack of consideration for neural network calibration will not gain trust from humans, even for high-accuracy models. In this regard, the gap between the confidence of the model's predictions and the actual correctness likelihood must be bridged to derive a well-calibrated model. In this paper, we introduce the Neural Clamping Toolkit, the first open-source framework designed to help developers employ state-of-the-art model-agnostic calibrated models. Furthermore, we provide animations and interactive sections in the demonstration to familiarize researchers with calibration in neural networks. A Colab tutorial on utilizing our toolkit is also introduced.", + "primary_area": "", + "author": "Lei Hsiung; Yung-Chen Tang; Pin-Yu Chen; Tsung-Yi Ho", + "authorids": "", + "aff": "National Tsing Hua University + IBM Research; National Tsing Hua University + MediaTek Inc.; IBM Research; National Tsing Hua University + The Chinese University of Hong Kong", + "bibtex": "@article{Hsiung_Tang_Chen_Ho_2024, title={NCTV: Neural Clamping Toolkit and Visualization for Neural Network Calibration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27074}, DOI={10.1609/aaai.v37i13.27074}, abstractNote={With the advancement of deep learning technology, neural networks have demonstrated their excellent ability to provide accurate predictions in many tasks. However, a lack of consideration for neural network calibration will not gain trust from humans, even for high-accuracy models. In this regard, the gap between the confidence of the model\u2019s predictions and the actual correctness likelihood must be bridged to derive a well-calibrated model. In this paper, we introduce the Neural Clamping Toolkit, the first open-source framework designed to help developers employ state-of-the-art model-agnostic calibrated models. Furthermore, we provide animations and interactive sections in the demonstration to familiarize researchers with calibration in neural networks. A Colab tutorial on utilizing our toolkit is also introduced.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hsiung, Lei and Tang, Yung-Chen and Chen, Pin-Yu and Ho, Tsung-Yi}, year={2024}, month={Jul.}, pages={16446-16448} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27074/26846", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27074", + "pdf_size": 846795, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17517431763237191409&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "m109.nthu.edu.tw;m109.nthu.edu.tw;ibm.com;cse.cuhk.edu.hk", + "email": "m109.nthu.edu.tw;m109.nthu.edu.tw;ibm.com;cse.cuhk.edu.hk", + "github": "", + "project": "hsiung.cc/NCTV", + "author_num": 4, + "aff_unique_index": "0+1;0+2;1;0+3", + "aff_unique_norm": "National Tsing Hua University;IBM;MediaTek Inc.;The Chinese University of Hong Kong", + "aff_unique_dep": ";IBM Research;;", + "aff_unique_url": "https://www.nthu.edu.tw;https://www.ibm.com/research;https://www.mediatek.com/;https://www.cuhk.edu.hk", + "aff_unique_abbr": "NTHU;IBM;MediaTek;CUHK", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+0;1;0+2", + "aff_country_unique": "Taiwan, China;United States;China" + }, + { + "id": "article-25854", + "title": "NHITS: Neural Hierarchical Interpolation for Time Series Forecasting", + "track": "main", + "status": "Technical", + "abstract": "Recent progress in neural forecasting accelerated improvements in the performance of large-scale forecasting systems. Yet, long-horizon forecasting remains a very difficult task. Two common challenges afflicting the task are the volatility of the predictions and their computational complexity. We introduce NHITS, a model which addresses both challenges by incorporating novel hierarchical interpolation and multi-rate data sampling techniques. These techniques enable the proposed method to assemble its predictions sequentially, emphasizing components with different frequencies and scales while decomposing the input signal and synthesizing the forecast. We prove that the hierarchical interpolation technique can efficiently approximate arbitrarily long horizons in the presence of smoothness. Additionally, we conduct extensive large-scale dataset experiments from the long-horizon forecasting literature, demonstrating the advantages of our method over the state-of-the-art methods, where NHITS provides an average accuracy improvement of almost 20% over the latest Transformer architectures while reducing the computation time by an order of magnitude (50 times). Our code is available at https://github.com/Nixtla/neuralforecast.", + "primary_area": "machine learning i", + "author": "Cristian Challu; Kin G. Olivares; Boris N. Oreshkin; Federico Garza Ramirez; Max Mergenthaler Canseco; Artur Dubrawski", + "authorids": "", + "aff": "Auton Lab, School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA; Auton Lab, School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA; Unity Technologies, Labs, Montreal, QC, Canada; Nixtla, Pittsburgh, PA, USA; Nixtla, Pittsburgh, PA, USA; Auton Lab, School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA", + "bibtex": "@article{Challu_Olivares_Oreshkin_Garza Ramirez_Mergenthaler Canseco_Dubrawski_2023, title={NHITS: Neural Hierarchical Interpolation for Time Series Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25854}, DOI={10.1609/aaai.v37i6.25854}, abstractNote={Recent progress in neural forecasting accelerated improvements in the performance of large-scale forecasting systems. Yet, long-horizon forecasting remains a very difficult task. Two common challenges afflicting the task are the volatility of the predictions and their computational complexity. We introduce NHITS, a model which addresses both challenges by incorporating novel hierarchical interpolation and multi-rate data sampling techniques. These techniques enable the proposed method to assemble its predictions sequentially, emphasizing components with different frequencies and scales while decomposing the input signal and synthesizing the forecast. We prove that the hierarchical interpolation technique can efficiently approximate arbitrarily long horizons in the presence of smoothness. Additionally, we conduct extensive large-scale dataset experiments from the long-horizon forecasting literature, demonstrating the advantages of our method over the state-of-the-art methods, where NHITS provides an average accuracy improvement of almost 20% over the latest Transformer architectures while reducing the computation time by an order of magnitude (50 times). Our code is available at https://github.com/Nixtla/neuralforecast.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Challu, Cristian and Olivares, Kin G. and Oreshkin, Boris N. and Garza Ramirez, Federico and Mergenthaler Canseco, Max and Dubrawski, Artur}, year={2023}, month={Jun.}, pages={6989-6997} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25854/25626", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25854", + "pdf_size": 480195, + "gs_citation": 547, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13628949333790989474&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.cmu.edu;cs.cmu.edu;unity3d.com;nixtla.io;nixtla.io;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;unity3d.com;nixtla.io;nixtla.io;cs.cmu.edu", + "github": "https://github.com/Nixtla/neuralforecast", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;2;0", + "aff_unique_norm": "Carnegie Mellon University;Unity Technologies;Nixtla", + "aff_unique_dep": "School of Computer Science;Labs;", + "aff_unique_url": "https://www.cmu.edu;https://unity.com;", + "aff_unique_abbr": "CMU;Unity;", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Pittsburgh;Montreal", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-27068", + "title": "NL2LTL \u2013 a Python Package for Converting Natural Language (NL) Instructions to Linear Temporal Logic (LTL) Formulas", + "track": "demonstrations", + "status": "Technical", + "abstract": "This is a demonstration of our newly released Python package NL2LTL which leverages the latest in natural language understanding (NLU) and large language models (LLMs) to translate natural language instructions to linear temporal logic (LTL) formulas. This allows direct translation to formal languages that a reasoning system can use, while at the same time, allowing the end-user to provide inputs in natural language without having to understand any details of an underlying \nformal language. The package comes with support for a set of default LTL patterns, corresponding to popular DECLARE templates, but is also fully extensible to new formulas and user inputs. The package is open-source and is free to use for the AI community under the MIT license. Open Source: https://github.com/IBM/nl2ltl. Video Link: https://bit.ly/3dHW5b1", + "primary_area": "", + "author": "Francesco Fuggitti; Tathagata Chakraborti", + "authorids": "", + "aff": "Sapienza University, Rome (Italy) + York University, Toronto (Canada); IBM Research, Cambridge (USA)", + "bibtex": "@article{Fuggitti_Chakraborti_2024, title={NL2LTL \u2013 a Python Package for Converting Natural Language (NL) Instructions to Linear Temporal Logic (LTL) Formulas}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27068}, DOI={10.1609/aaai.v37i13.27068}, abstractNote={This is a demonstration of our newly released Python package NL2LTL which leverages the latest in natural language understanding (NLU) and large language models (LLMs) to translate natural language instructions to linear temporal logic (LTL) formulas. This allows direct translation to formal languages that a reasoning system can use, while at the same time, allowing the end-user to provide inputs in natural language without having to understand any details of an underlying formal language. The package comes with support for a set of default LTL patterns, corresponding to popular DECLARE templates, but is also fully extensible to new formulas and user inputs. The package is open-source and is free to use for the AI community under the MIT license. Open Source: https://github.com/IBM/nl2ltl. Video Link: https://bit.ly/3dHW5b1}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fuggitti, Francesco and Chakraborti, Tathagata}, year={2024}, month={Jul.}, pages={16428-16430} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27068/26840", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27068", + "pdf_size": 228383, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12645166953733088987&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 7, + "aff_domain": "diag.uniroma1.it;ibm.com", + "email": "diag.uniroma1.it;ibm.com", + "github": "https://github.com/IBM/nl2ltl", + "project": "https://bit.ly/3dHW5b1", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "Sapienza University;York University;IBM Research", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uniroma1.it;https://yorku.ca;https://www.ibm.com/research", + "aff_unique_abbr": "Sapienza;York U;IBM", + "aff_campus_unique_index": "0+1;2", + "aff_campus_unique": "Rome;Toronto;Cambridge", + "aff_country_unique_index": "0+1;2", + "aff_country_unique": "Italy;Canada;United States" + }, + { + "id": "article-25172", + "title": "NLIP: Noise-Robust Language-Image Pre-training", + "track": "main", + "status": "Technical", + "abstract": "Large-scale cross-modal pre-training paradigms have recently shown ubiquitous success on a wide range of downstream tasks, e.g., zero-shot classification, retrieval and image captioning. However, their successes highly rely on the scale and quality of web-crawled data that naturally contain much incomplete and noisy information (e.g., wrong or irrelevant contents). Existing works either design manual rules to clean data or generate pseudo-targets as auxiliary signals for reducing noise impact, which do not explicitly tackle both the incorrect and incomplete challenges at the same time. In this paper, to automatically mitigate the impact of noise by solely mining over existing data, we propose a principled Noise-robust Language-Image Pre-training framework (NLIP) to stabilize pre-training via two schemes: noise-harmonization and noise-completion. First, in noise-harmonization scheme, NLIP estimates the noise probability of each pair according to the memorization effect of cross-modal transformers, then adopts noise-adaptive regularization to harmonize the cross-modal alignments with varying degrees. Second, in noise-completion scheme, to enrich the missing object information of text, NLIP injects a concept-conditioned cross-modal decoder to obtain semantic-consistent synthetic captions to complete noisy ones, which uses the retrieved visual concepts (i.e., objects\u2019 names) for the corresponding image to guide captioning generation. By collaboratively optimizing noise-harmonization and noise-completion schemes, our NLIP can alleviate the common noise effects during image-text pre-training in a more efficient way. Extensive experiments show the significant performance improvements of our NLIP using only 26M data over existing pre-trained models (e.g., CLIP, FILIP and BLIP) on 12 zero-shot classification datasets (e.g., +8.6% over CLIP on average accuracy), MSCOCO image captioning (e.g., +1.9 over BLIP trained with 129M data on CIDEr) and zero-shot image-text retrieval tasks.", + "primary_area": "computer vision i", + "author": "Runhui Huang; Yanxin Long; Jianhua Han; Hang Xu; Xiwen Liang; Chunjing Xu; Xiaodan Liang", + "authorids": "", + "aff": "Shenzhen campus of Sun Yat-sen University; Shenzhen campus of Sun Yat-sen University; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Shenzhen campus of Sun Yat-sen University; Huawei Noah\u2019s Ark Lab; Shenzhen campus of Sun Yat-sen University", + "bibtex": "@article{Huang_Long_Han_Xu_Liang_Xu_Liang_2023, title={NLIP: Noise-Robust Language-Image Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25172}, DOI={10.1609/aaai.v37i1.25172}, abstractNote={Large-scale cross-modal pre-training paradigms have recently shown ubiquitous success on a wide range of downstream tasks, e.g., zero-shot classification, retrieval and image captioning. However, their successes highly rely on the scale and quality of web-crawled data that naturally contain much incomplete and noisy information (e.g., wrong or irrelevant contents). Existing works either design manual rules to clean data or generate pseudo-targets as auxiliary signals for reducing noise impact, which do not explicitly tackle both the incorrect and incomplete challenges at the same time. In this paper, to automatically mitigate the impact of noise by solely mining over existing data, we propose a principled Noise-robust Language-Image Pre-training framework (NLIP) to stabilize pre-training via two schemes: noise-harmonization and noise-completion. First, in noise-harmonization scheme, NLIP estimates the noise probability of each pair according to the memorization effect of cross-modal transformers, then adopts noise-adaptive regularization to harmonize the cross-modal alignments with varying degrees. Second, in noise-completion scheme, to enrich the missing object information of text, NLIP injects a concept-conditioned cross-modal decoder to obtain semantic-consistent synthetic captions to complete noisy ones, which uses the retrieved visual concepts (i.e., objects\u2019 names) for the corresponding image to guide captioning generation. By collaboratively optimizing noise-harmonization and noise-completion schemes, our NLIP can alleviate the common noise effects during image-text pre-training in a more efficient way. Extensive experiments show the significant performance improvements of our NLIP using only 26M data over existing pre-trained models (e.g., CLIP, FILIP and BLIP) on 12 zero-shot classification datasets (e.g., +8.6% over CLIP on average accuracy), MSCOCO image captioning (e.g., +1.9 over BLIP trained with 129M data on CIDEr) and zero-shot image-text retrieval tasks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Runhui and Long, Yanxin and Han, Jianhua and Xu, Hang and Liang, Xiwen and Xu, Chunjing and Liang, Xiaodan}, year={2023}, month={Jun.}, pages={926-934} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25172/24944", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25172", + "pdf_size": 1232253, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6589402887434034541&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;huawei.com;gmail.com;gmail.com;huawei.com;gmail.com", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;huawei.com;gmail.com;gmail.com;huawei.com;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;0;1;0", + "aff_unique_norm": "Sun Yat-sen University;Huawei", + "aff_unique_dep": ";Noah\u2019s Ark Lab", + "aff_unique_url": "http://www.sysu.edu.cn/;https://www.huawei.com", + "aff_unique_abbr": "SYSU;Huawei", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25576", + "title": "NQE: N-ary Query Embedding for Complex Query Answering over Hyper-Relational Knowledge Graphs", + "track": "main", + "status": "Technical", + "abstract": "Complex query answering (CQA) is an essential task for multi-hop and logical reasoning on knowledge graphs (KGs). Currently, most approaches are limited to queries among binary relational facts and pay less attention to n-ary facts (n\u22652) containing more than two entities, which are more prevalent in the real world. Moreover, previous CQA methods can only make predictions for a few given types of queries and cannot be flexibly extended to more complex logical queries, which significantly limits their applications. To overcome these challenges, in this work, we propose a novel N-ary Query Embedding (NQE) model for CQA over hyper-relational knowledge graphs (HKGs), which include massive n-ary facts. The NQE utilizes a dual-heterogeneous Transformer encoder and fuzzy logic theory to satisfy all n-ary FOL queries, including existential quantifiers (\u2203), conjunction (\u2227), disjunction (\u2228), and negation (\u00ac). We also propose a parallel processing algorithm that can train or predict arbitrary n-ary FOL queries in a single batch, regardless of the kind of each query, with good flexibility and extensibility. In addition, we generate a new CQA dataset WD50K-NFOL, including diverse n-ary FOL queries over WD50K. Experimental results on WD50K-NFOL and other standard CQA datasets show that NQE is the state-of-the-art CQA method over HKGs with good generalization capability. Our code and dataset are publicly available.", + "primary_area": "data mining and knowledge management", + "author": "Haoran Luo; Haihong E; Yuhao Yang; Gengxian Zhou; Yikai Guo; Tianyu Yao; Zichen Tang; Xueyuan Lin; Kaiyang Wan", + "authorids": "", + "aff": "School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Automation Science and Electrical Engineering, Beihang University, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; Beijing Institute of Computer Technology and Application, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing, China", + "bibtex": "@article{Luo_E_Yang_Zhou_Guo_Yao_Tang_Lin_Wan_2023, title={NQE: N-ary Query Embedding for Complex Query Answering over Hyper-Relational Knowledge Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25576}, DOI={10.1609/aaai.v37i4.25576}, abstractNote={Complex query answering (CQA) is an essential task for multi-hop and logical reasoning on knowledge graphs (KGs). Currently, most approaches are limited to queries among binary relational facts and pay less attention to n-ary facts (n\u22652) containing more than two entities, which are more prevalent in the real world. Moreover, previous CQA methods can only make predictions for a few given types of queries and cannot be flexibly extended to more complex logical queries, which significantly limits their applications. To overcome these challenges, in this work, we propose a novel N-ary Query Embedding (NQE) model for CQA over hyper-relational knowledge graphs (HKGs), which include massive n-ary facts. The NQE utilizes a dual-heterogeneous Transformer encoder and fuzzy logic theory to satisfy all n-ary FOL queries, including existential quantifiers (\u2203), conjunction (\u2227), disjunction (\u2228), and negation (\u00ac). We also propose a parallel processing algorithm that can train or predict arbitrary n-ary FOL queries in a single batch, regardless of the kind of each query, with good flexibility and extensibility. In addition, we generate a new CQA dataset WD50K-NFOL, including diverse n-ary FOL queries over WD50K. Experimental results on WD50K-NFOL and other standard CQA datasets show that NQE is the state-of-the-art CQA method over HKGs with good generalization capability. Our code and dataset are publicly available.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Haoran and E, Haihong and Yang, Yuhao and Zhou, Gengxian and Guo, Yikai and Yao, Tianyu and Tang, Zichen and Lin, Xueyuan and Wan, Kaiyang}, year={2023}, month={Jun.}, pages={4543-4551} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25576/25348", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25576", + "pdf_size": 1890015, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3802708010342074120&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;buaa.edu.cn; ; ; ; ; ; ", + "email": "bupt.edu.cn;bupt.edu.cn;buaa.edu.cn; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;2;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Beihang University;Beijing Institute of Computer Technology and Application", + "aff_unique_dep": "School of Computer Science;School of Automation Science and Electrical Engineering;", + "aff_unique_url": "http://www.bupt.edu.cn/;http://www.buaa.edu.cn;", + "aff_unique_abbr": "BUPT;BUAA;", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25224", + "title": "NeAF: Learning Neural Angle Fields for Point Normal Estimation", + "track": "main", + "status": "Technical", + "abstract": "Normal estimation for unstructured point clouds is an important task in 3D computer vision. Current methods achieve encouraging results by mapping local patches to normal vectors or learning local surface fitting using neural networks. However, these methods are not generalized well to unseen scenarios and are sensitive to parameter settings. To resolve these issues, we propose an implicit function to learn an angle field around the normal of each point in the spherical coordinate system, which is dubbed as Neural Angle Fields (NeAF). Instead of directly predicting the normal of an input point, we predict the angle offset between the ground truth normal and a randomly sampled query normal. This strategy pushes the network to observe more diverse samples, which leads to higher prediction accuracy in a more robust manner. To predict normals from the learned angle fields at inference time, we randomly sample query vectors in a unit spherical space and take the vectors with minimal angle values as the predicted normals. To further leverage the prior learned by NeAF, we propose to refine the predicted normal vectors by minimizing the angle offsets. The experimental results with synthetic data and real scans show significant improvements over the state-of-the-art under widely used benchmarks. Project page: https://lisj575.github.io/NeAF/.", + "primary_area": "computer vision i", + "author": "Shujuan Li; Junsheng Zhou; Baorui Ma; Yu-Shen Liu; Zhizhong Han", + "authorids": "", + "aff": "School of Software, BNRist, Tsinghua University, Beijing, China; School of Software, BNRist, Tsinghua University, Beijing, China; School of Software, BNRist, Tsinghua University, Beijing, China; School of Software, BNRist, Tsinghua University, Beijing, China; Department of Computer Science, Wayne State University, Detroit, USA", + "bibtex": "@article{Li_Zhou_Ma_Liu_Han_2023, title={NeAF: Learning Neural Angle Fields for Point Normal Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25224}, DOI={10.1609/aaai.v37i1.25224}, abstractNote={Normal estimation for unstructured point clouds is an important task in 3D computer vision. Current methods achieve encouraging results by mapping local patches to normal vectors or learning local surface fitting using neural networks. However, these methods are not generalized well to unseen scenarios and are sensitive to parameter settings. To resolve these issues, we propose an implicit function to learn an angle field around the normal of each point in the spherical coordinate system, which is dubbed as Neural Angle Fields (NeAF). Instead of directly predicting the normal of an input point, we predict the angle offset between the ground truth normal and a randomly sampled query normal. This strategy pushes the network to observe more diverse samples, which leads to higher prediction accuracy in a more robust manner. To predict normals from the learned angle fields at inference time, we randomly sample query vectors in a unit spherical space and take the vectors with minimal angle values as the predicted normals. To further leverage the prior learned by NeAF, we propose to refine the predicted normal vectors by minimizing the angle offsets. The experimental results with synthetic data and real scans show significant improvements over the state-of-the-art under widely used benchmarks. Project page: https://lisj575.github.io/NeAF/.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shujuan and Zhou, Junsheng and Ma, Baorui and Liu, Yu-Shen and Han, Zhizhong}, year={2023}, month={Jun.}, pages={1396-1404} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25224/24996", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25224", + "pdf_size": 3970682, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11434438471036060309&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn;wayne.edu", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn;wayne.edu", + "github": "", + "project": "https://lisj575.github.io/NeAF/", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Tsinghua University;Wayne State University", + "aff_unique_dep": "School of Software;Department of Computer Science", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://wayne.edu", + "aff_unique_abbr": "THU;WSU", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Beijing;Detroit", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26039", + "title": "Nearest-Neighbor Sampling Based Conditional Independence Testing", + "track": "main", + "status": "Technical", + "abstract": "The conditional randomization test (CRT) was recently proposed to test whether two random variables X and Y are conditionally independent given random variables Z. The CRT assumes that the conditional distribution of X given Z is known under the null hypothesis and then it is compared to the distribution of the observed samples of the original data. The aim of this paper is to develop a novel alternative of CRT by using nearest-neighbor sampling without assuming the exact form of the distribution of X given Z. Specifically, we utilize the computationally efficient 1-nearest-neighbor to approximate the conditional distribution that encodes the null hypothesis. Then, theoretically, we show that the distribution of the generated samples is very close to the true conditional distribution in terms of total variation distance. Furthermore, we take the classifier-based conditional mutual information estimator as our test statistic. The test statistic as an empirical fundamental information theoretic quantity is able to well capture the conditional-dependence feature. We show that our proposed test is computationally very fast, while controlling type I and II errors quite well. Finally, we demonstrate the efficiency of our proposed test in both synthetic and real data analyses.", + "primary_area": "machine learning ii", + "author": "Shuai Li; Ziqi Chen; Hongtu Zhu; Christina Dan Wang; Wang Wen", + "authorids": "", + "aff": "School of Statistics, KLATASDS-MOE, East China Normal University, Shanghai, China; School of Statistics, KLATASDS-MOE, East China Normal University, Shanghai, China; Departments of Biostatistics, Statistics, Computer Science, and Genetics, The University of North Carolina at Chapel Hill, Chapel Hill, USA; Business Division, New York University Shanghai, Shanghai, China; School of Mathematics and Statistics, Central South University, Changsha, China", + "bibtex": "@article{Li_Chen_Zhu_Wang_Wen_2023, title={Nearest-Neighbor Sampling Based Conditional Independence Testing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26039}, DOI={10.1609/aaai.v37i7.26039}, abstractNote={The conditional randomization test (CRT) was recently proposed to test whether two random variables X and Y are conditionally independent given random variables Z. The CRT assumes that the conditional distribution of X given Z is known under the null hypothesis and then it is compared to the distribution of the observed samples of the original data. The aim of this paper is to develop a novel alternative of CRT by using nearest-neighbor sampling without assuming the exact form of the distribution of X given Z. Specifically, we utilize the computationally efficient 1-nearest-neighbor to approximate the conditional distribution that encodes the null hypothesis. Then, theoretically, we show that the distribution of the generated samples is very close to the true conditional distribution in terms of total variation distance. Furthermore, we take the classifier-based conditional mutual information estimator as our test statistic. The test statistic as an empirical fundamental information theoretic quantity is able to well capture the conditional-dependence feature. We show that our proposed test is computationally very fast, while controlling type I and II errors quite well. Finally, we demonstrate the efficiency of our proposed test in both synthetic and real data analyses.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shuai and Chen, Ziqi and Zhu, Hongtu and Wang, Christina Dan and Wen, Wang}, year={2023}, month={Jun.}, pages={8631-8639} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26039/25811", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26039", + "pdf_size": 385715, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7098694624711047230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "foxmail.com;fem.ecnu.edu.cn;email.unc.edu;nyu.edu;foxmail.com", + "email": "foxmail.com;fem.ecnu.edu.cn;email.unc.edu;nyu.edu;foxmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;3", + "aff_unique_norm": "East China Normal University;The University of North Carolina at Chapel Hill;New York University Shanghai;Central South University", + "aff_unique_dep": "School of Statistics;Departments of Biostatistics, Statistics, Computer Science, and Genetics;Business Division;School of Mathematics and Statistics", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.unc.edu;https://shanghai.nyu.edu;http://www.csu.edu.cn", + "aff_unique_abbr": "ECNU;UNC Chapel Hill;NYU Shanghai;CSU", + "aff_campus_unique_index": "0;0;1;0;2", + "aff_campus_unique": "Shanghai;Chapel Hill;Changsha", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26684", + "title": "Neighbor Auto-Grouping Graph Neural Networks for Handover Parameter Configuration in Cellular Network", + "track": "aaai special track", + "status": "Technical", + "abstract": "The mobile communication enabled by cellular networks is the one of the main foundations of our modern society. Optimizing the performance of cellular networks and providing massive connectivity with improved coverage and user experience has a considerable social and economic impact on our daily life. This performance relies heavily on the configuration of the network parameters. However, with the massive increase in both the size and complexity of cellular networks, network management, especially parameter configuration, is becoming complicated. The current practice, which relies largely on experts' prior knowledge, is not adequate and will require lots of domain experts and high maintenance costs. In this work, we propose a learning-based framework for handover parameter configuration. The key challenge, in this case, is to tackle the complicated dependencies between neighboring cells and jointly optimize the whole network. Our framework addresses this challenge in two ways. First, we introduce a novel approach to imitate how the network responds to different network states and parameter values, called auto-grouping graph convolutional network (AG-GCN). During the parameter configuration stage, instead of solving the global optimization problem, we design a local multi-objective optimization strategy where each cell considers several local performance metrics to balance its own performance and its neighbors. We evaluate our proposed algorithm via a simulator constructed using real network data. We demonstrate that the handover parameters our model can find, achieve better average network throughput compared to those recommended by experts as well as alternative baselines, which can bring better network quality and stability. It has the potential to massively reduce costs arising from human expert intervention and maintenance.", + "primary_area": "ai for social impact", + "author": "Mehrtash Mehrabi; Walid Masoudimansour; Yingxue Zhang; Jie Chuai; Zhitang Chen; Mark Coates; Jianye Hao; Yanhui Geng", + "authorids": "", + "aff": "Huawei Noah\u2019s Ark Lab, Montreal, Canada + University of Alberta, Edmonton, Canada; Huawei Noah\u2019s Ark Lab, Montreal, Canada; Huawei Noah\u2019s Ark Lab, Montreal, Canada; Huawei Noah\u2019s Ark Lab, Montreal, Canada; Huawei Noah\u2019s Ark Lab, Montreal, Canada; McGill University, Montreal, Canada; Huawei Noah\u2019s Ark Lab, Montreal, Canada + Tianjin University, Tianjin, China; Huawei Noah\u2019s Ark Lab, Montreal, Canada", + "bibtex": "@article{Mehrabi_Masoudimansour_Zhang_Chuai_Chen_Coates_Hao_Geng_2023, title={Neighbor Auto-Grouping Graph Neural Networks for Handover Parameter Configuration in Cellular Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26684}, DOI={10.1609/aaai.v37i12.26684}, abstractNote={The mobile communication enabled by cellular networks is the one of the main foundations of our modern society. Optimizing the performance of cellular networks and providing massive connectivity with improved coverage and user experience has a considerable social and economic impact on our daily life. This performance relies heavily on the configuration of the network parameters. However, with the massive increase in both the size and complexity of cellular networks, network management, especially parameter configuration, is becoming complicated. The current practice, which relies largely on experts\u2019 prior knowledge, is not adequate and will require lots of domain experts and high maintenance costs. In this work, we propose a learning-based framework for handover parameter configuration. The key challenge, in this case, is to tackle the complicated dependencies between neighboring cells and jointly optimize the whole network. Our framework addresses this challenge in two ways. First, we introduce a novel approach to imitate how the network responds to different network states and parameter values, called auto-grouping graph convolutional network (AG-GCN). During the parameter configuration stage, instead of solving the global optimization problem, we design a local multi-objective optimization strategy where each cell considers several local performance metrics to balance its own performance and its neighbors. We evaluate our proposed algorithm via a simulator constructed using real network data. We demonstrate that the handover parameters our model can find, achieve better average network throughput compared to those recommended by experts as well as alternative baselines, which can bring better network quality and stability. It has the potential to massively reduce costs arising from human expert intervention and maintenance.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mehrabi, Mehrtash and Masoudimansour, Walid and Zhang, Yingxue and Chuai, Jie and Chen, Zhitang and Coates, Mark and Hao, Jianye and Geng, Yanhui}, year={2023}, month={Jun.}, pages={14400-14407} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26684/26456", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26684", + "pdf_size": 392035, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:6S0y0MPwbSoJ:scholar.google.com/&scioq=Neighbor+Auto-Grouping+Graph+Neural+Networks+for+Handover+Parameter+Configuration+in+Cellular+Network&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;mcgill.ca;huawei.com;huawei.com", + "email": "huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;mcgill.ca;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0;0;0;0;2;0+3;0", + "aff_unique_norm": "Huawei Noah\u2019s Ark Lab;University of Alberta;McGill University;Tianjin University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.huawei.com/en/ai/noahs-ark-lab;https://www.ualberta.ca;https://www.mcgill.ca;http://www.tju.edu.cn", + "aff_unique_abbr": "HNAL;UAlberta;McGill;Tianjin U", + "aff_campus_unique_index": "0+1;0;0;0;0;0;0+2;0", + "aff_campus_unique": "Montreal;Edmonton;Tianjin", + "aff_country_unique_index": "0+0;0;0;0;0;0;0+1;0", + "aff_country_unique": "Canada;China" + }, + { + "id": "article-26168", + "title": "Neighbor Contrastive Learning on Learnable Graph Augmentation", + "track": "main", + "status": "Technical", + "abstract": "Recent years, graph contrastive learning (GCL), which aims to learn representations from unlabeled graphs, has made great progress. However, the existing GCL methods mostly adopt human-designed graph augmentations, which are sensitive to various graph datasets. In addition, the contrastive losses originally developed in computer vision have been directly applied to graph data, where the neighboring nodes are regarded as negatives and consequently pushed far apart from the anchor. However, this is contradictory with the homophily assumption of net-works that connected nodes often belong to the same class and should be close to each other. In this work, we propose an end-to-end automatic GCL method, named NCLA to apply neighbor contrastive learning on learnable graph augmentation. Several graph augmented views with adaptive topology are automatically learned by the multi-head graph attention mechanism, which can be compatible with various graph datasets without prior domain knowledge. In addition, a neighbor contrastive loss is devised to allow multiple positives per anchor by taking network topology as the supervised signals. Both augmentations and embeddings are learned end-to-end in the proposed NCLA. Extensive experiments on the benchmark datasets demonstrate that NCLA yields the state-of-the-art node classification performance on self-supervised GCL and even exceeds the supervised ones, when the labels are extremely limited. Our code is released at https://github.com/shenxiaocam/NCLA.", + "primary_area": "machine learning iii", + "author": "Xiao Shen; Dewang Sun; Shirui Pan; Xi Zhou; Laurence T. Yang", + "authorids": "", + "aff": "Hainan University; Hainan University; Griffith University; Hainan University; Hainan University+St. Francis Xavier University", + "bibtex": "@article{Shen_Sun_Pan_Zhou_Yang_2023, title={Neighbor Contrastive Learning on Learnable Graph Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26168}, DOI={10.1609/aaai.v37i8.26168}, abstractNote={Recent years, graph contrastive learning (GCL), which aims to learn representations from unlabeled graphs, has made great progress. However, the existing GCL methods mostly adopt human-designed graph augmentations, which are sensitive to various graph datasets. In addition, the contrastive losses originally developed in computer vision have been directly applied to graph data, where the neighboring nodes are regarded as negatives and consequently pushed far apart from the anchor. However, this is contradictory with the homophily assumption of net-works that connected nodes often belong to the same class and should be close to each other. In this work, we propose an end-to-end automatic GCL method, named NCLA to apply neighbor contrastive learning on learnable graph augmentation. Several graph augmented views with adaptive topology are automatically learned by the multi-head graph attention mechanism, which can be compatible with various graph datasets without prior domain knowledge. In addition, a neighbor contrastive loss is devised to allow multiple positives per anchor by taking network topology as the supervised signals. Both augmentations and embeddings are learned end-to-end in the proposed NCLA. Extensive experiments on the benchmark datasets demonstrate that NCLA yields the state-of-the-art node classification performance on self-supervised GCL and even exceeds the supervised ones, when the labels are extremely limited. Our code is released at https://github.com/shenxiaocam/NCLA.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Xiao and Sun, Dewang and Pan, Shirui and Zhou, Xi and Yang, Laurence T.}, year={2023}, month={Jun.}, pages={9782-9791} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26168/25940", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26168", + "pdf_size": 629574, + "gs_citation": 96, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7358921057168171347&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "163.com;hainanu.edu.cn;griffith.edu.au;hainanu.edu.cn;hainanu.edu.cn", + "email": "163.com;hainanu.edu.cn;griffith.edu.au;hainanu.edu.cn;hainanu.edu.cn", + "github": "https://github.com/shenxiaocam/NCLA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0+2", + "aff_unique_norm": "Hainan University;Griffith University;St. Francis Xavier University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.hainanu.edu.cn;https://www.griffith.edu.au;https://www.stfx.ca", + "aff_unique_abbr": "HNU;Griffith;StFX", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0+2", + "aff_country_unique": "China;Australia;Canada" + }, + { + "id": "article-26260", + "title": "Neighborhood-Regularized Self-Training for Learning with Few Labels", + "track": "main", + "status": "Technical", + "abstract": "Training deep neural networks (DNNs) with limited supervision has been a popular research topic as it can significantly alleviate the annotation burden. Self-training has been successfully applied in semi-supervised learning tasks, but one drawback of self-training is that it is vulnerable to the label noise from incorrect pseudo labels. Inspired by the fact that samples with similar labels tend to share similar representations, we develop a neighborhood-based sample selection approach to tackle the issue of noisy pseudo labels. We further stabilize self-training via aggregating the predictions from different rounds during sample selection. Experiments on eight tasks show that our proposed method outperforms the strongest self-training baseline with 1.83% and 2.51% performance gain for text and graph datasets on average. Our further analysis demonstrates that our proposed data selection strategy reduces the noise of pseudo labels by 36.8% and saves 57.3% of the time when compared with the best baseline. Our code and appendices will be uploaded to: https://github.com/ritaranx/NeST.", + "primary_area": "machine learning iv", + "author": "Ran Xu; Yue Yu; Hejie Cui; Xuan Kan; Yanqiao Zhu; Joyce Ho; Chao Zhang; Carl Yang", + "authorids": "", + "aff": "Emory University; Georgia Institute of Technology; Emory University; Emory University; University of California, Los Angeles; Emory University; Georgia Institute of Technology; Emory University", + "bibtex": "@article{Xu_Yu_Cui_Kan_Zhu_Ho_Zhang_Yang_2023, title={Neighborhood-Regularized Self-Training for Learning with Few Labels}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26260}, DOI={10.1609/aaai.v37i9.26260}, abstractNote={Training deep neural networks (DNNs) with limited supervision has been a popular research topic as it can significantly alleviate the annotation burden. Self-training has been successfully applied in semi-supervised learning tasks, but one drawback of self-training is that it is vulnerable to the label noise from incorrect pseudo labels. Inspired by the fact that samples with similar labels tend to share similar representations, we develop a neighborhood-based sample selection approach to tackle the issue of noisy pseudo labels. We further stabilize self-training via aggregating the predictions from different rounds during sample selection. Experiments on eight tasks show that our proposed method outperforms the strongest self-training baseline with 1.83% and 2.51% performance gain for text and graph datasets on average. Our further analysis demonstrates that our proposed data selection strategy reduces the noise of pseudo labels by 36.8% and saves 57.3% of the time when compared with the best baseline. Our code and appendices will be uploaded to: https://github.com/ritaranx/NeST.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Ran and Yu, Yue and Cui, Hejie and Kan, Xuan and Zhu, Yanqiao and Ho, Joyce and Zhang, Chao and Yang, Carl}, year={2023}, month={Jun.}, pages={10611-10619} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26260/26032", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26260", + "pdf_size": 3284595, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16925801075645631325&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 12, + "aff_domain": "emory.edu;gatech.edu;emory.edu;emory.edu;cs.ucla.edu;emory.edu;gatech.edu;emory.edu", + "email": "emory.edu;gatech.edu;emory.edu;emory.edu;cs.ucla.edu;emory.edu;gatech.edu;emory.edu", + "github": "https://github.com/ritaranx/NeST", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;2;0;1;0", + "aff_unique_norm": "Emory University;Georgia Institute of Technology;University of California, Los Angeles", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.emory.edu;https://www.gatech.edu;https://www.ucla.edu", + "aff_unique_abbr": "Emory;Georgia Tech;UCLA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26625", + "title": "Nested Named Entity Recognition as Building Local Hypergraphs", + "track": "main", + "status": "Technical", + "abstract": "Named entity recognition is a fundamental task in natural language processing. Based on the sequence labeling paradigm for flat named entity recognition, multiple methods have been developed to handle the nested structures. However, they either require fixed recognition order or introduce complex hypergraphs. To tackle this problem, we propose a novel model named Local Hypergraph Builder Network (LHBN) that builds multiple simpler local hypergraphs to capture named entities instead of a single complex full-size hypergraph. The proposed model has three main properties: (1) The named entities that share boundaries are captured in the same local hypergraph. (2) The boundary information is enhanced by building local hypergraphs. (3) The hypergraphs can be built bidirectionally to take advantage of the identification direction preference of different named entities. Experiments illustrate that our model outperforms previous state-of-the-art methods on four widely used nested named entity recognition datasets: ACE04, ACE05, GENIA, and KBP17. The code is available at https://github.com/yanyk13/local-hypergraph-building-network.git.", + "primary_area": "speech natural language processing", + "author": "Yukun Yan; Bingling Cai; Sen Song", + "authorids": "", + "aff": "Biomedical Department, Tsinghua University + Laboratory of Brain and Intelligence, Tsinghua University; Biomedical Department, Tsinghua University + Laboratory of Brain and Intelligence, Tsinghua University; Biomedical Department, Tsinghua University + Laboratory of Brain and Intelligence, Tsinghua University", + "bibtex": "@article{Yan_Cai_Song_2023, title={Nested Named Entity Recognition as Building Local Hypergraphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26625}, DOI={10.1609/aaai.v37i11.26625}, abstractNote={Named entity recognition is a fundamental task in natural language processing. Based on the sequence labeling paradigm for flat named entity recognition, multiple methods have been developed to handle the nested structures. However, they either require fixed recognition order or introduce complex hypergraphs. To tackle this problem, we propose a novel model named Local Hypergraph Builder Network (LHBN) that builds multiple simpler local hypergraphs to capture named entities instead of a single complex full-size hypergraph. The proposed model has three main properties: (1) The named entities that share boundaries are captured in the same local hypergraph. (2) The boundary information is enhanced by building local hypergraphs. (3) The hypergraphs can be built bidirectionally to take advantage of the identification direction preference of different named entities. Experiments illustrate that our model outperforms previous state-of-the-art methods on four widely used nested named entity recognition datasets: ACE04, ACE05, GENIA, and KBP17. The code is available at https://github.com/yanyk13/local-hypergraph-building-network.git.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Yukun and Cai, Bingling and Song, Sen}, year={2023}, month={Jun.}, pages={13878-13886} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26625/26397", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26625", + "pdf_size": 491908, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16846975172406419883&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/yanyk13/local-hypergraph-building-network.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Biomedical Department", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26378", + "title": "Networked Anti-coordination Games Meet Graphical Dynamical Systems: Equilibria and Convergence", + "track": "main", + "status": "Technical", + "abstract": "Evolutionary anti-coordination games on networks capture real-world strategic situations such as traffic routing and market competition. Two key problems concerning evolutionary games are the existence of a pure Nash equilibrium (NE) and the convergence time. In this work, we study these two problems for anti-coordination games under sequential and synchronous update schemes. For each update scheme, we examine two decision modes based on whether an agent considers its own previous action (self essential) or not (self non-essential) in choosing its next action. Using a relationship between games and dynamical systems, we show that for both update schemes, finding an NE can be done efficiently under the self non-essential mode but is computationally intractable under the self essential mode. We then identify special cases for which an NE can be obtained efficiently. For convergence time, we show that the dynamics converges in a polynomial number of steps under the synchronous scheme; for the sequential scheme, the convergence time is polynomial only under the self non-essential mode. Through experiments, we empirically examine the convergence time and the equilibria for both synthetic and real-world networks.", + "primary_area": "multiagent systems", + "author": "Zirou Qiu; Chen Chen; Madhav V. Marathe; S. S. Ravi; Daniel J. Rosenkrantz; Richard E. Stearns; Anil Vullikanti", + "authorids": "", + "aff": "Computer Science Dept., University of Virginia + Biocomplexity Institute and Initiative, University of Virginia; Biocomplexity Institute and Initiative, University of Virginia; Computer Science Dept., University of Virginia + Biocomplexity Institute and Initiative, University of Virginia; Computer Science Dept., University at Albany \u2013 SUNY + Biocomplexity Institute and Initiative, University of Virginia; Computer Science Dept., University at Albany \u2013 SUNY + Biocomplexity Institute and Initiative, University of Virginia; Computer Science Dept., University at Albany \u2013 SUNY + Biocomplexity Institute and Initiative, University of Virginia; Computer Science Dept., University of Virginia + Biocomplexity Institute and Initiative, University of Virginia", + "bibtex": "@article{Qiu_Chen_Marathe_Ravi_Rosenkrantz_Stearns_Vullikanti_2023, title={Networked Anti-coordination Games Meet Graphical Dynamical Systems: Equilibria and Convergence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26378}, DOI={10.1609/aaai.v37i10.26378}, abstractNote={Evolutionary anti-coordination games on networks capture real-world strategic situations such as traffic routing and market competition. Two key problems concerning evolutionary games are the existence of a pure Nash equilibrium (NE) and the convergence time. In this work, we study these two problems for anti-coordination games under sequential and synchronous update schemes. For each update scheme, we examine two decision modes based on whether an agent considers its own previous action (self essential) or not (self non-essential) in choosing its next action. Using a relationship between games and dynamical systems, we show that for both update schemes, finding an NE can be done efficiently under the self non-essential mode but is computationally intractable under the self essential mode. We then identify special cases for which an NE can be obtained efficiently. For convergence time, we show that the dynamics converges in a polynomial number of steps under the synchronous scheme; for the sequential scheme, the convergence time is polynomial only under the self non-essential mode. Through experiments, we empirically examine the convergence time and the equilibria for both synthetic and real-world networks.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qiu, Zirou and Chen, Chen and Marathe, Madhav V. and Ravi, S. S. and Rosenkrantz, Daniel J. and Stearns, Richard E. and Vullikanti, Anil}, year={2023}, month={Jun.}, pages={11663-11671} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26378/26150", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26378", + "pdf_size": 246757, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:iyX2PI7inBkJ:scholar.google.com/&scioq=Networked+Anti-coordination+Games+Meet+Graphical+Dynamical+Systems:+Equilibria+and+Convergence&hl=en&as_sdt=0,5", + "gs_version_total": 6, + "aff_domain": "virginia.edu;gmail.com;virginia.edu;gmail.com;gmail.com;gmail.com;virginia.edu", + "email": "virginia.edu;gmail.com;virginia.edu;gmail.com;gmail.com;gmail.com;virginia.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;0;0+0;1+0;1+0;1+0;0+0", + "aff_unique_norm": "University of Virginia;University at Albany \u2013 SUNY", + "aff_unique_dep": "Computer Science Dept.;Computer Science Dept.", + "aff_unique_url": "https://www.virginia.edu;https://www.albany.edu", + "aff_unique_abbr": "UVA;UAlbany", + "aff_campus_unique_index": ";;1;1;1;", + "aff_campus_unique": ";Albany", + "aff_country_unique_index": "0+0;0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26415", + "title": "Networked Restless Bandits with Positive Externalities", + "track": "main", + "status": "Technical", + "abstract": "Restless multi-armed bandits are often used to model budget-constrained resource allocation tasks where receipt of the resource is associated with an increased probability of a favorable state transition. Prior work assumes that individual arms only benefit if they receive the resource directly. However, many allocation tasks occur within communities and can be characterized by positive externalities that allow arms to derive partial benefit when their neighbor(s) receive the resource. We thus introduce networked restless bandits, a novel multi-armed bandit setting in which arms are both restless and embedded within a directed graph. We then present Greta, a graph-aware, Whittle index-based heuristic algorithm that can be used to efficiently construct a constrained reward-maximizing action vector at each timestep. Our empirical results demonstrate that Greta outperforms comparison policies across a range of hyperparameter values and graph topologies. Code and appendices are available at https://github.com/crherlihy/networked_restless_bandits.", + "primary_area": "planning routing and scheduling", + "author": "Christine Herlihy; John P. Dickerson", + "authorids": "", + "aff": "Department of Computer Science, University of Maryland, College Park; Department of Computer Science, University of Maryland, College Park", + "bibtex": "@article{Herlihy_Dickerson_2023, title={Networked Restless Bandits with Positive Externalities}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26415}, DOI={10.1609/aaai.v37i10.26415}, abstractNote={Restless multi-armed bandits are often used to model budget-constrained resource allocation tasks where receipt of the resource is associated with an increased probability of a favorable state transition. Prior work assumes that individual arms only benefit if they receive the resource directly. However, many allocation tasks occur within communities and can be characterized by positive externalities that allow arms to derive partial benefit when their neighbor(s) receive the resource. We thus introduce networked restless bandits, a novel multi-armed bandit setting in which arms are both restless and embedded within a directed graph. We then present Greta, a graph-aware, Whittle index-based heuristic algorithm that can be used to efficiently construct a constrained reward-maximizing action vector at each timestep. Our empirical results demonstrate that Greta outperforms comparison policies across a range of hyperparameter values and graph topologies. Code and appendices are available at https://github.com/crherlihy/networked_restless_bandits.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Herlihy, Christine and Dickerson, John P.}, year={2023}, month={Jun.}, pages={11997-12004} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26415/26187", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26415", + "pdf_size": 241976, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18080589444008734726&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "umd.edu;umd.edu", + "email": "umd.edu;umd.edu", + "github": "https://github.com/crherlihy/networked restless bandits", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Maryland, College Park", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25118", + "title": "Neural Architecture Search for Wide Spectrum Adversarial Robustness", + "track": "main", + "status": "Technical", + "abstract": "One major limitation of CNNs is that they are vulnerable to adversarial attacks. Currently, adversarial robustness in neural networks is commonly optimized with respect to a small pre-selected adversarial noise strength, causing them to have potentially limited performance when under attack by larger adversarial noises in real-world scenarios. In this research, we aim to find Neural Architectures that have improved robustness on a wide range of adversarial noise strengths through Neural Architecture Search. In detail, we propose a lightweight Adversarial Noise Estimator to reduce the high cost of generating adversarial noise with respect to different strengths. Besides, we construct an Efficient Wide Spectrum Searcher to reduce the cost of adjusting network architecture with the large adversarial validation set during the search. With the two components proposed, the number of adversarial noise strengths searched can be increased significantly while having a limited increase in search time. Extensive experiments on benchmark datasets such as CIFAR and ImageNet demonstrate that with a significantly richer search signal in robustness, our method can find architectures with improved overall robustness while having a limited impact on natural accuracy and around 40% reduction in search time compared with the naive approach of searching. Codes available at: https://github.com/zhicheng2T0/Wsr-NAS.git", + "primary_area": "computer vision i", + "author": "Zhi Cheng; Yanxi Li; Minjing Dong; Xiu Su; Shan You; Chang Xu", + "authorids": "", + "aff": "School of Computer Science, Faculty of Engineering, The University of Sydney; School of Computer Science, Faculty of Engineering, The University of Sydney; School of Computer Science, Faculty of Engineering, The University of Sydney; School of Computer Science, Faculty of Engineering, The University of Sydney; SenseTime Research; School of Computer Science, Faculty of Engineering, The University of Sydney", + "bibtex": "@article{Cheng_Li_Dong_Su_You_Xu_2023, title={Neural Architecture Search for Wide Spectrum Adversarial Robustness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25118}, DOI={10.1609/aaai.v37i1.25118}, abstractNote={One major limitation of CNNs is that they are vulnerable to adversarial attacks. Currently, adversarial robustness in neural networks is commonly optimized with respect to a small pre-selected adversarial noise strength, causing them to have potentially limited performance when under attack by larger adversarial noises in real-world scenarios. In this research, we aim to find Neural Architectures that have improved robustness on a wide range of adversarial noise strengths through Neural Architecture Search. In detail, we propose a lightweight Adversarial Noise Estimator to reduce the high cost of generating adversarial noise with respect to different strengths. Besides, we construct an Efficient Wide Spectrum Searcher to reduce the cost of adjusting network architecture with the large adversarial validation set during the search. With the two components proposed, the number of adversarial noise strengths searched can be increased significantly while having a limited increase in search time. Extensive experiments on benchmark datasets such as CIFAR and ImageNet demonstrate that with a significantly richer search signal in robustness, our method can find architectures with improved overall robustness while having a limited impact on natural accuracy and around 40% reduction in search time compared with the naive approach of searching. Codes available at: https://github.com/zhicheng2T0/Wsr-NAS.git}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Zhi and Li, Yanxi and Dong, Minjing and Su, Xiu and You, Shan and Xu, Chang}, year={2023}, month={Jun.}, pages={442-451} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25118/24890", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25118", + "pdf_size": 467795, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10850375686383825797&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;sensetime.com;sydney.edu.au", + "email": "uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;uni.sydney.edu.au;sensetime.com;sydney.edu.au", + "github": "https://github.com/zhicheng2T0/Wsr-NAS.git", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "The University of Sydney;SenseTime", + "aff_unique_dep": "School of Computer Science;SenseTime Research", + "aff_unique_url": "https://www.sydney.edu.au;https://www.sensetime.com", + "aff_unique_abbr": "USYD;SenseTime", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26441", + "title": "Neural Diffeomorphic Non-uniform B-spline Flows", + "track": "main", + "status": "Technical", + "abstract": "Normalizing flows have been successfully modeling a complex probability distribution as an invertible transformation of a simple base distribution. However, there are often applications that require more than invertibility. For instance, the computation of energies and forces in physics requires the second derivatives of the transformation to be well-defined and continuous. Smooth normalizing flows employ infinitely differentiable transformation, but with the price of slow non-analytic inverse transforms. In this work, we propose diffeomorphic non-uniform B-spline flows that are at least twice continuously differentiable while bi-Lipschitz continuous, enabling efficient parametrization while retaining analytic inverse transforms based on a sufficient condition for diffeomorphism. Firstly, we investigate the sufficient condition for C(k-2)-diffeomorphic non-uniform kth-order B-spline transformations. Then, we derive an analytic inverse transformation of the non-uniform cubic B-spline transformation for neural diffeomorphic non-uniform B-spline flows. Lastly, we performed experiments on solving the force matching problem in Boltzmann generators, demonstrating that our C2-diffeomorphic non-uniform B-spline flows yielded solutions better than previous spline flows and faster than smooth normalizing flows. Our source code is publicly available at https://github.com/smhongok/Non-uniform-B-spline-Flow.", + "primary_area": "reasoning under uncertainty", + "author": "Seongmin Hong; Se Young Chun", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Seoul National University, Republic of Korea + INMC, Interdisciplinary Program in AI, Seoul National University, Republic of Korea; Department of Electrical and Computer Engineering, Seoul National University, Republic of Korea + INMC, Interdisciplinary Program in AI, Seoul National University, Republic of Korea", + "bibtex": "@article{Hong_Chun_2023, title={Neural Diffeomorphic Non-uniform B-spline Flows}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26441}, DOI={10.1609/aaai.v37i10.26441}, abstractNote={Normalizing flows have been successfully modeling a complex probability distribution as an invertible transformation of a simple base distribution. However, there are often applications that require more than invertibility. For instance, the computation of energies and forces in physics requires the second derivatives of the transformation to be well-defined and continuous. Smooth normalizing flows employ infinitely differentiable transformation, but with the price of slow non-analytic inverse transforms. In this work, we propose diffeomorphic non-uniform B-spline flows that are at least twice continuously differentiable while bi-Lipschitz continuous, enabling efficient parametrization while retaining analytic inverse transforms based on a sufficient condition for diffeomorphism. Firstly, we investigate the sufficient condition for C(k-2)-diffeomorphic non-uniform kth-order B-spline transformations. Then, we derive an analytic inverse transformation of the non-uniform cubic B-spline transformation for neural diffeomorphic non-uniform B-spline flows. Lastly, we performed experiments on solving the force matching problem in Boltzmann generators, demonstrating that our C2-diffeomorphic non-uniform B-spline flows yielded solutions better than previous spline flows and faster than smooth normalizing flows. Our source code is publicly available at https://github.com/smhongok/Non-uniform-B-spline-Flow.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hong, Seongmin and Chun, Se Young}, year={2023}, month={Jun.}, pages={12225-12233} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26441/26213", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26441", + "pdf_size": 1879735, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13943799934134381056&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr", + "github": "https://github.com/smhongok/Non-uniform-B-spline-Flow", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "Seoul National University", + "aff_unique_dep": "Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.snu.ac.kr", + "aff_unique_abbr": "SNU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Seoul", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26496", + "title": "Neural Dynamic Focused Topic Model", + "track": "main", + "status": "Technical", + "abstract": "Topic models and all their variants analyse text by learning meaningful representations through word co-occurrences. As pointed out by previous work, such models implicitly assume that the probability of a topic to be active and its proportion within each document are positively correlated. This correlation can be strongly detrimental in the case of documents created over time, simply because recent documents are likely better described by new and hence rare topics. In this work we leverage recent advances in neural variational inference and present an alternative neural approach to the dynamic Focused Topic Model. Indeed, we develop a neural model for topic evolution which exploits sequences of Bernoulli random variables in order to track the appearances of topics, thereby decoupling their activities from their proportions. We evaluate our model on three different datasets (the UN general debates, the collection of NeurIPS papers, and the ACL Anthology dataset) and show that it (i) outperforms state-of-the-art topic models in generalization tasks and (ii) performs comparably to them on prediction tasks, while employing roughly the same number of parameters, and converging about two times faster.", + "primary_area": "speech natural language processing", + "author": "Kostadin Cvejoski; Rams\u00e9s J. S\u00e1nchez; C\u00e9sar Ojeda", + "authorids": "", + "aff": "Lamarr-Institute for Machine Learning and Artificial Intelligence + Fraunhofer-Institute for Intelligent Analysis and Information Systems (IAIS); Lamarr-Institute for Machine Learning and Artificial Intelligence + BIT University of Bonn; University of Potsdam", + "bibtex": "@article{Cvejoski_S\u00e1nchez_Ojeda_2023, title={Neural Dynamic Focused Topic Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26496}, DOI={10.1609/aaai.v37i11.26496}, abstractNote={Topic models and all their variants analyse text by learning meaningful representations through word co-occurrences. As pointed out by previous work, such models implicitly assume that the probability of a topic to be active and its proportion within each document are positively correlated. This correlation can be strongly detrimental in the case of documents created over time, simply because recent documents are likely better described by new and hence rare topics. In this work we leverage recent advances in neural variational inference and present an alternative neural approach to the dynamic Focused Topic Model. Indeed, we develop a neural model for topic evolution which exploits sequences of Bernoulli random variables in order to track the appearances of topics, thereby decoupling their activities from their proportions. We evaluate our model on three different datasets (the UN general debates, the collection of NeurIPS papers, and the ACL Anthology dataset) and show that it (i) outperforms state-of-the-art topic models in generalization tasks and (ii) performs comparably to them on prediction tasks, while employing roughly the same number of parameters, and converging about two times faster.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cvejoski, Kostadin and S\u00e1nchez, Rams\u00e9s J. and Ojeda, C\u00e9sar}, year={2023}, month={Jun.}, pages={12719-12727} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26496/26268", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26496", + "pdf_size": 255551, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7583164556385469980&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "iais.fraunhofer.de;bit.uni-bonn.de;uni-potsdam.de", + "email": "iais.fraunhofer.de;bit.uni-bonn.de;uni-potsdam.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+2;3", + "aff_unique_norm": "Lamarr-Institute for Machine Learning and Artificial Intelligence;Fraunhofer-Institute for Intelligent Analysis and Information Systems;University of Bonn;University of Potsdam", + "aff_unique_dep": "Machine Learning and Artificial Intelligence;Intelligent Analysis and Information Systems;;", + "aff_unique_url": ";https://www.iais.fraunhofer.de/;https://www.uni-bonn.de;https://www.uni-potsdam.de", + "aff_unique_abbr": ";IAIS;Uni Bonn;UP", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;1", + "aff_country_unique": "United States;Germany" + }, + { + "id": "article-26969", + "title": "Neural Implicit Surface Reconstruction from Noisy Camera Observations (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Representing 3D objects and scenes with neural radiance fields has become very popular over the last years. Recently, surface-based representations have been proposed, that allow to reconstruct 3D objects from simple photographs. However, most current techniques require an accurate camera calibration, i.e. camera parameters corresponding to each image, which is often a difficult task to do in real-life situations. To this end, we propose a method for learning 3D surfaces from noisy camera parameters. We show that we can learn camera parameters together with learning the surface representation, and demonstrate good quality 3D surface reconstruction even with noisy camera observations.", + "primary_area": "", + "author": "Sarthak Gupta; Patrik Huber", + "authorids": "", + "aff": "Indian Institute of Technology Roorkee, Roorkee, Uttarakhand, India - 247667 + University of York, Deramore Lane, Heslington, York, YO10 5GH, United Kingdom; University of York, Deramore Lane, Heslington, York, YO10 5GH, United Kingdom", + "bibtex": "@article{Gupta_Huber_2024, title={Neural Implicit Surface Reconstruction from Noisy Camera Observations (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26969}, DOI={10.1609/aaai.v37i13.26969}, abstractNote={Representing 3D objects and scenes with neural radiance fields has become very popular over the last years. Recently, surface-based representations have been proposed, that allow to reconstruct 3D objects from simple photographs. However, most current techniques require an accurate camera calibration, i.e. camera parameters corresponding to each image, which is often a difficult task to do in real-life situations. To this end, we propose a method for learning 3D surfaces from noisy camera parameters. We show that we can learn camera parameters together with learning the surface representation, and demonstrate good quality 3D surface reconstruction even with noisy camera observations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gupta, Sarthak and Huber, Patrik}, year={2024}, month={Jul.}, pages={16218-16219} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26969/26741", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26969", + "pdf_size": 817449, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:xkFMoiWLC_AJ:scholar.google.com/&scioq=Neural+Implicit+Surface+Reconstruction+from+Noisy+Camera+Observations+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff_domain": "gmail.com;york.ac.uk", + "email": "gmail.com;york.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Indian Institute of Technology Roorkee;University of York", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitr.ac.in;https://www.york.ac.uk", + "aff_unique_abbr": "IIT Roorkee;York", + "aff_campus_unique_index": "0+1;1", + "aff_campus_unique": "Roorkee;York", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "India;United Kingdom" + }, + { + "id": "article-26315", + "title": "Neural Integro-Differential Equations", + "track": "main", + "status": "Technical", + "abstract": "Modeling continuous dynamical systems from discretely sampled observations is a fundamental problem in data science. Often, such dynamics are the result of non-local processes that present an integral over time. As such, these systems are modeled with Integro-Differential Equations (IDEs); generalizations of differential equations that comprise both an integral and a differential component. For example, brain dynamics are not accurately modeled by differential equations since their behavior is non-Markovian, i.e. dynamics are in part dictated by history. Here, we introduce the Neural IDE (NIDE), a novel deep learning framework based on the theory of IDEs where integral operators are learned using neural networks. \n We test NIDE on several toy and brain activity datasets and demonstrate that NIDE outperforms other models. These tasks include time extrapolation as well as predicting dynamics from unseen initial conditions, which we test on whole-cortex activity recordings in freely behaving mice. Further, we show that NIDE can decompose dynamics into their Markovian and non-Markovian constituents, via the learned integral operator, which we test on fMRI brain activity recordings of people on ketamine. Finally, the integrand of the integral operator provides a latent space that gives insight into the underlying dynamics, which we demonstrate on wide-field brain imaging recordings. Altogether, NIDE is a novel approach that enables modeling of complex non-local dynamics with neural networks.", + "primary_area": "machine learning iv", + "author": "Emanuele Zappala; Antonio H. de O. Fonseca; Andrew H. Moberly; Michael J. Higley; Chadi Abdallah; Jessica A. Cardin; David van Dijk", + "authorids": "", + "aff": "Yale University; Yale University; Yale University; Yale University; Baylor College of Medicine; Yale University; Yale University", + "bibtex": "@article{Zappala_O. Fonseca_Moberly_Higley_Abdallah_Cardin_van Dijk_2023, title={Neural Integro-Differential Equations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26315}, DOI={10.1609/aaai.v37i9.26315}, abstractNote={Modeling continuous dynamical systems from discretely sampled observations is a fundamental problem in data science. Often, such dynamics are the result of non-local processes that present an integral over time. As such, these systems are modeled with Integro-Differential Equations (IDEs); generalizations of differential equations that comprise both an integral and a differential component. For example, brain dynamics are not accurately modeled by differential equations since their behavior is non-Markovian, i.e. dynamics are in part dictated by history. Here, we introduce the Neural IDE (NIDE), a novel deep learning framework based on the theory of IDEs where integral operators are learned using neural networks. We test NIDE on several toy and brain activity datasets and demonstrate that NIDE outperforms other models. These tasks include time extrapolation as well as predicting dynamics from unseen initial conditions, which we test on whole-cortex activity recordings in freely behaving mice. Further, we show that NIDE can decompose dynamics into their Markovian and non-Markovian constituents, via the learned integral operator, which we test on fMRI brain activity recordings of people on ketamine. Finally, the integrand of the integral operator provides a latent space that gives insight into the underlying dynamics, which we demonstrate on wide-field brain imaging recordings. Altogether, NIDE is a novel approach that enables modeling of complex non-local dynamics with neural networks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zappala, Emanuele and O. Fonseca, Antonio H. de and Moberly, Andrew H. and Higley, Michael J. and Abdallah, Chadi and Cardin, Jessica A. and van Dijk, David}, year={2023}, month={Jun.}, pages={11104-11112} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26315/26087", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26315", + "pdf_size": 956298, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8171224902667948726&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "yale.edu;yale.edu;yale.edu;yale.edu;bcm.edu;yale.edu;yale.edu", + "email": "yale.edu;yale.edu;yale.edu;yale.edu;bcm.edu;yale.edu;yale.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;0;0", + "aff_unique_norm": "Yale University;Baylor College of Medicine", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.yale.edu;https://www.bcm.edu", + "aff_unique_abbr": "Yale;BCM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27010", + "title": "Neural Language Model Based Attentive Term Dependence Model for Verbose Query (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The query-document term matching plays an important role in information retrieval. However, the retrieval performance degrades when the documents get matched with the extraneous terms of the query which frequently arises in verbose queries. To address this problem, we generate the dense vector of\nthe entire query and individual query terms using the pre-trained BERT (Bidirectional Encoder Representations from Transformers) model and subsequently analyze their relation to focus on the central terms. We then propose a context-aware attentive extension of unsupervised Markov Random Field-based sequential term dependence model that explicitly pays more attention to those contextually central terms. The proposed model utilizes the strengths of the pre-trained large language model for estimating the attention weight of terms and rank the documents in a single pass without any supervision.", + "primary_area": "", + "author": "Dipannita Podder; Jiaul H. Paik; Pabitra Mitra", + "authorids": "", + "aff": "Indian Institute of Technology Kharagpur, India - 721302; Indian Institute of Technology Kharagpur, India - 721302; Indian Institute of Technology Kharagpur, India - 721302", + "bibtex": "@article{Podder_Paik_Mitra_2024, title={Neural Language Model Based Attentive Term Dependence Model for Verbose Query (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27010}, DOI={10.1609/aaai.v37i13.27010}, abstractNote={The query-document term matching plays an important role in information retrieval. However, the retrieval performance degrades when the documents get matched with the extraneous terms of the query which frequently arises in verbose queries. To address this problem, we generate the dense vector of\nthe entire query and individual query terms using the pre-trained BERT (Bidirectional Encoder Representations from Transformers) model and subsequently analyze their relation to focus on the central terms. We then propose a context-aware attentive extension of unsupervised Markov Random Field-based sequential term dependence model that explicitly pays more attention to those contextually central terms. The proposed model utilizes the strengths of the pre-trained large language model for estimating the attention weight of terms and rank the documents in a single pass without any supervision.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Podder, Dipannita and Paik, Jiaul H. and Mitra, Pabitra}, year={2024}, month={Jul.}, pages={16300-16301} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27010/26782", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27010", + "pdf_size": 77170, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=534557185303192803&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "iitkgp.ac.in;cet.iitkgp.ac.in;cse.iitkgp.ac.in", + "email": "iitkgp.ac.in;cet.iitkgp.ac.in;cse.iitkgp.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Kharagpur", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitkgp.ac.in", + "aff_unique_abbr": "IIT Kharagpur", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Kharagpur", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26772", + "title": "Neural Policy Safety Verification via Predicate Abstraction: CEGAR", + "track": "aaai special track", + "status": "Technical", + "abstract": "Neural networks (NN) are an increasingly important representation of action policies pi. Recent work has extended predicate abstraction to prove safety of such pi, through policy predicate abstraction (PPA) which over-approximates the state space subgraph induced by pi. The advantage of PPA is that reasoning about the NN \u2013 calls to SMT solvers \u2013 is required only locally, at individual abstract state transitions, in contrast to bounded model checking (BMC) where SMT must reason globally about sequences of NN decisions. Indeed, it has been shown that PPA can outperform a simple BMC implementation. However, the abstractions underlying these results (i.e., the abstraction predicates) were supplied manually. Here we automate this step. We extend counterexample guided abstraction refinement (CEGAR) to PPA. This involves dealing with a new source of spuriousness in abstract unsafe paths, pertaining not to transition behavior but to the decisions of the neural network pi. We introduce two methods tackling this issue based on the states involved, and we show that global SMT calls deciding spuriousness exactly can be avoided. We devise algorithmic enhancements leveraging incremental computation and heuristic search. We show empirically that the resulting verification tool has significant advantages over an encoding into the state-of-the-art model checker nuXmv. In particular, ours is the only approach in our experiments that succeeds in proving policies safe.", + "primary_area": "safe and robust ai", + "author": "Marcel Vinzent; Siddhant Sharma; J\u00f6erg Hoffmann", + "authorids": "", + "aff": "Saarland University, Saarland Informatics Campus, Saarbr \u00a8ucken, Germany; Department of Electrical Engineering, Indian Institute of Technology Delhi, New Delhi, India; Saarland University, Saarland Informatics Campus, Saarbr \u00a8ucken, Germany + German Research Center for Artificial Intelligence (DFKI), Saarbr \u00a8ucken, Germany", + "bibtex": "@article{Vinzent_Sharma_Hoffmann_2023, title={Neural Policy Safety Verification via Predicate Abstraction: CEGAR}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26772}, DOI={10.1609/aaai.v37i12.26772}, abstractNote={Neural networks (NN) are an increasingly important representation of action policies pi. Recent work has extended predicate abstraction to prove safety of such pi, through policy predicate abstraction (PPA) which over-approximates the state space subgraph induced by pi. The advantage of PPA is that reasoning about the NN \u2013 calls to SMT solvers \u2013 is required only locally, at individual abstract state transitions, in contrast to bounded model checking (BMC) where SMT must reason globally about sequences of NN decisions. Indeed, it has been shown that PPA can outperform a simple BMC implementation. However, the abstractions underlying these results (i.e., the abstraction predicates) were supplied manually. Here we automate this step. We extend counterexample guided abstraction refinement (CEGAR) to PPA. This involves dealing with a new source of spuriousness in abstract unsafe paths, pertaining not to transition behavior but to the decisions of the neural network pi. We introduce two methods tackling this issue based on the states involved, and we show that global SMT calls deciding spuriousness exactly can be avoided. We devise algorithmic enhancements leveraging incremental computation and heuristic search. We show empirically that the resulting verification tool has significant advantages over an encoding into the state-of-the-art model checker nuXmv. In particular, ours is the only approach in our experiments that succeeds in proving policies safe.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vinzent, Marcel and Sharma, Siddhant and Hoffmann, J\u00f6erg}, year={2023}, month={Jun.}, pages={15188-15196} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26772/26544", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26772", + "pdf_size": 216889, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2576267438092805665&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.uni-saarland.de;ee.iitd.ac.in;cs.uni-saarland.de", + "email": "cs.uni-saarland.de;ee.iitd.ac.in;cs.uni-saarland.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "Saarland University;Indian Institute of Technology Delhi;German Research Center for Artificial Intelligence", + "aff_unique_dep": "Saarland Informatics Campus;Department of Electrical Engineering;", + "aff_unique_url": "https://www.uni-saarland.de;https://www.iitdelhi.ac.in;https://www.dFKI.de", + "aff_unique_abbr": "UdS;IIT Delhi;DFKI", + "aff_campus_unique_index": "0;1;0+0", + "aff_campus_unique": "Saarbr\u00fccken;New Delhi", + "aff_country_unique_index": "0;1;0+0", + "aff_country_unique": "Germany;India" + }, + { + "id": "article-25966", + "title": "Neural Representations Reveal Distinct Modes of Class Fitting in Residual Convolutional Networks", + "track": "main", + "status": "Technical", + "abstract": "We leverage probabilistic models of neural representations to investigate how residual networks fit classes. To this end, we estimate class-conditional density models for representations learned by deep ResNets. We then use these models to characterize distributions of representations across learned classes. Surprisingly, we find that classes in the investigated models are not fitted in a uniform way. On the contrary: we uncover two groups of classes that are fitted with markedly different distributions of representations. These distinct modes of class-fitting are evident only in the deeper layers of the investigated models, indicating that they are not related to low-level image features. We show that the uncovered structure in neural representations correlate with memorization of training examples and adversarial robustness. Finally, we compare class-conditional distributions of neural representations between memorized and typical examples. This allows us to uncover where in the network structure class labels arise for memorized and standard inputs.", + "primary_area": "machine learning ii", + "author": "Micha\u0142 Jamro\u017c; Marcin Kurdziel", + "authorids": "", + "aff": "AGH University of Science and Technology, Krakow, Poland; AGH University of Science and Technology, Krakow, Poland", + "bibtex": "@article{Jamro\u017c_Kurdziel_2023, title={Neural Representations Reveal Distinct Modes of Class Fitting in Residual Convolutional Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25966}, DOI={10.1609/aaai.v37i7.25966}, abstractNote={We leverage probabilistic models of neural representations to investigate how residual networks fit classes. To this end, we estimate class-conditional density models for representations learned by deep ResNets. We then use these models to characterize distributions of representations across learned classes. Surprisingly, we find that classes in the investigated models are not fitted in a uniform way. On the contrary: we uncover two groups of classes that are fitted with markedly different distributions of representations. These distinct modes of class-fitting are evident only in the deeper layers of the investigated models, indicating that they are not related to low-level image features. We show that the uncovered structure in neural representations correlate with memorization of training examples and adversarial robustness. Finally, we compare class-conditional distributions of neural representations between memorized and typical examples. This allows us to uncover where in the network structure class labels arise for memorized and standard inputs.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jamro\u017c, Micha\u0142 and Kurdziel, Marcin}, year={2023}, month={Jun.}, pages={7988-7995} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25966/25738", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25966", + "pdf_size": 303729, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4117985767838379941&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "agh.edu.pl;agh.edu.pl", + "email": "agh.edu.pl;agh.edu.pl", + "github": "https://github.com/mjamroz90/dnn-class-fitting", + "project": "https://arxiv.org/abs/2212.00771", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "AGH University of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.agh.edu.pl", + "aff_unique_abbr": "AGH", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Krakow", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Poland" + }, + { + "id": "article-26184", + "title": "Neural Spline Search for Quantile Probabilistic Modeling", + "track": "main", + "status": "Technical", + "abstract": "Accurate estimation of output quantiles is crucial in many use cases, where it is desired to model the range of possibility. Modeling target distribution at arbitrary quantile levels and at arbitrary input attribute levels are important to offer a comprehensive picture of the data, and requires the quantile function to be expressive enough. The quantile function describing the target distribution using quantile levels is critical for quantile regression. Although various parametric forms for the distributions (that the quantile function specifies) can be adopted, an everlasting problem is selecting the most appropriate one that can properly approximate the data distributions. In this paper, we propose a non-parametric and data-driven approach,\nNeural Spline Search (NSS), to represent the observed data distribution without parametric assumptions. NSS is flexible and expressive for modeling data distributions by transforming the inputs with a series of monotonic spline regressions guided by symbolic operators. We demonstrate that NSS outperforms previous methods on synthetic, real-world regression and time-series forecasting tasks.", + "primary_area": "machine learning iii", + "author": "Ruoxi Sun; Chun-Liang Li; Sercan \u00d6. Arik; Michael W. Dusenberry; Chen-Yu Lee; Tomas Pfister", + "authorids": "", + "aff": "Google Cloud AI; Google Cloud AI; Google Cloud AI; Google Research, Brain Team; Google Cloud AI; Google Cloud AI", + "bibtex": "@article{Sun_Li_Arik_Dusenberry_Lee_Pfister_2023, title={Neural Spline Search for Quantile Probabilistic Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26184}, DOI={10.1609/aaai.v37i8.26184}, abstractNote={Accurate estimation of output quantiles is crucial in many use cases, where it is desired to model the range of possibility. Modeling target distribution at arbitrary quantile levels and at arbitrary input attribute levels are important to offer a comprehensive picture of the data, and requires the quantile function to be expressive enough. The quantile function describing the target distribution using quantile levels is critical for quantile regression. Although various parametric forms for the distributions (that the quantile function specifies) can be adopted, an everlasting problem is selecting the most appropriate one that can properly approximate the data distributions. In this paper, we propose a non-parametric and data-driven approach,\nNeural Spline Search (NSS), to represent the observed data distribution without parametric assumptions. NSS is flexible and expressive for modeling data distributions by transforming the inputs with a series of monotonic spline regressions guided by symbolic operators. We demonstrate that NSS outperforms previous methods on synthetic, real-world regression and time-series forecasting tasks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Ruoxi and Li, Chun-Liang and Arik, Sercan \u00d6. and Dusenberry, Michael W. and Lee, Chen-Yu and Pfister, Tomas}, year={2023}, month={Jun.}, pages={9927-9934} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26184/25956", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26184", + "pdf_size": 809160, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8309977659312239424&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Cloud AI", + "aff_unique_url": "https://cloud.google.com/ai", + "aff_unique_abbr": "Google Cloud AI", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26432", + "title": "Neural TSP Solver with Progressive Distillation", + "track": "main", + "status": "Technical", + "abstract": "Travelling salesman problem (TSP) is NP-Hard with exponential search space. Recently, the adoption of encoder-decoder models as neural TSP solvers has emerged as an attractive topic because they can instantly obtain near-optimal results for small-scale instances. Nevertheless, their training efficiency and solution quality degrade dramatically when dealing with large-scale problems. To address the issue, we propose a novel progressive distillation framework, by adopting curriculum learning to train TSP samples in increasing order of their problem size and progressively distilling high-level knowledge from small models to large models via a distillation loss. In other words, the trained small models are used as the teacher network to guide action selection when training large models. To accelerate training speed, we also propose a Delaunary-graph based action mask and a new attention-based decoder to reduce decoding cost. Experimental results show that our approach establishes clear advantages over existing encoder-decoder models in terms of training effectiveness and solution quality. In addition, we validate its usefulness as an initial solution generator for the state-of-the-art TSP solvers, whose probability of obtaining the optimal solution can be further improved in such a hybrid manner.", + "primary_area": "planning routing and scheduling", + "author": "Dongxiang Zhang; Ziyang Xiao; Yuan Wang; Mingli Song; Gang Chen", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; School of Business, Singapore University of Social Sciences; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Zhang_Xiao_Wang_Song_Chen_2023, title={Neural TSP Solver with Progressive Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26432}, DOI={10.1609/aaai.v37i10.26432}, abstractNote={Travelling salesman problem (TSP) is NP-Hard with exponential search space. Recently, the adoption of encoder-decoder models as neural TSP solvers has emerged as an attractive topic because they can instantly obtain near-optimal results for small-scale instances. Nevertheless, their training efficiency and solution quality degrade dramatically when dealing with large-scale problems. To address the issue, we propose a novel progressive distillation framework, by adopting curriculum learning to train TSP samples in increasing order of their problem size and progressively distilling high-level knowledge from small models to large models via a distillation loss. In other words, the trained small models are used as the teacher network to guide action selection when training large models. To accelerate training speed, we also propose a Delaunary-graph based action mask and a new attention-based decoder to reduce decoding cost. Experimental results show that our approach establishes clear advantages over existing encoder-decoder models in terms of training effectiveness and solution quality. In addition, we validate its usefulness as an initial solution generator for the state-of-the-art TSP solvers, whose probability of obtaining the optimal solution can be further improved in such a hybrid manner.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Dongxiang and Xiao, Ziyang and Wang, Yuan and Song, Mingli and Chen, Gang}, year={2023}, month={Jun.}, pages={12147-12154} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26432/26204", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26432", + "pdf_size": 329433, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7257265373215680517&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;zju.edu.cn;suss.edu.sg;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;suss.edu.sg;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Zhejiang University;Singapore University of Social Sciences", + "aff_unique_dep": "College of Computer Science and Technology;School of Business", + "aff_unique_url": "http://www.zju.edu.cn;https://www.suss.edu.sg", + "aff_unique_abbr": "ZJU;Suss", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25806", + "title": "Neurosymbolic Reasoning and Learning with Restricted Boltzmann Machines", + "track": "main", + "status": "Technical", + "abstract": "Knowledge representation and reasoning in neural networks has been a long-standing endeavour which has attracted much attention recently. The principled integration of reasoning and learning in neural networks is a main objective of the area of neurosymbolic Artificial Intelligence. In this paper, a neurosymbolic system is introduced that can represent any propositional logic formula. A proof of equivalence is presented showing that energy minimization in restricted Boltzmann machines corresponds to logical reasoning. We demonstrate the application of our approach empirically on logical reasoning and learning from data and knowledge. Experimental results show that reasoning can be performed effectively for a class of logical formulae. Learning from data and knowledge is also evaluated in comparison with learning of logic programs using neural networks. The results show that our approach can improve on state-of-the-art neurosymbolic systems. The theorems and empirical results presented in this paper are expected to reignite the research on the use of neural networks as massively-parallel models for logical reasoning and promote the principled integration of reasoning and learning in deep networks.", + "primary_area": "knowledge representation and reasoning", + "author": "Son N. Tran; Artur d'Avila Garcez", + "authorids": "", + "aff": "The University of Tasmania, Launceston, Tasmania, 7248, Australia; City, University of London, Northampton Square, London, EC1V 0HB, UK", + "bibtex": "@article{Tran_Garcez_2023, title={Neurosymbolic Reasoning and Learning with Restricted Boltzmann Machines}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25806}, DOI={10.1609/aaai.v37i5.25806}, abstractNote={Knowledge representation and reasoning in neural networks has been a long-standing endeavour which has attracted much attention recently. The principled integration of reasoning and learning in neural networks is a main objective of the area of neurosymbolic Artificial Intelligence. In this paper, a neurosymbolic system is introduced that can represent any propositional logic formula. A proof of equivalence is presented showing that energy minimization in restricted Boltzmann machines corresponds to logical reasoning. We demonstrate the application of our approach empirically on logical reasoning and learning from data and knowledge. Experimental results show that reasoning can be performed effectively for a class of logical formulae. Learning from data and knowledge is also evaluated in comparison with learning of logic programs using neural networks. The results show that our approach can improve on state-of-the-art neurosymbolic systems. The theorems and empirical results presented in this paper are expected to reignite the research on the use of neural networks as massively-parallel models for logical reasoning and promote the principled integration of reasoning and learning in deep networks.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tran, Son N. and Garcez, Artur d\u2019Avila}, year={2023}, month={Jun.}, pages={6558-6565} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25806/25578", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25806", + "pdf_size": 937915, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1395634833192841822&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "utas.edu.au;city.ac.uk", + "email": "utas.edu.au;city.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Tasmania;City, University of London", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.utas.edu.au;https://www.city.ac.uk", + "aff_unique_abbr": "UTAS;City, University of London", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Launceston;London", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Australia;United Kingdom" + }, + { + "id": "article-26841", + "title": "NewsPanda: Media Monitoring for Timely Conservation Action", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Non-governmental organizations for environmental conservation have a significant interest in monitoring conservation-related media and getting timely updates about infrastructure construction projects as they may cause massive impact to key conservation areas. Such monitoring, however, is difficult and time-consuming. We introduce NewsPanda, a toolkit which automatically detects and analyzes online articles related to environmental conservation and infrastructure construction. We fine-tune a BERT-based model using active learning methods and noise correction algorithms to identify articles that are relevant to conservation and infrastructure construction. For the identified articles, we perform further analysis, extracting keywords and finding potentially related sources. NewsPanda has been successfully deployed by the World Wide Fund for Nature teams in the UK, India, and Nepal since February 2022. It currently monitors over 80,000 websites and 1,074 conservation sites across India and Nepal, saving more than 30 hours of human efforts weekly. We have now scaled it up to cover 60,000 conservation sites globally.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Sedrick Scott Keh; Zheyuan Ryan Shi; David J. Patterson; Nirmal Bhagabati; Karun Dewan; Areendran Gopala; Pablo Izquierdo; Debojyoti Mallick; Ambika Sharma; Pooja Shrestha; Fei Fang", + "authorids": "", + "aff": "Carnegie Mellon University; Carnegie Mellon University + 98Connect; World Wide Fund for Nature; United States Agency for International Development; World Wide Fund for Nature; World Wide Fund for Nature; World Wide Fund for Nature; World Wide Fund for Nature; World Wide Fund for Nature; World Wide Fund for Nature; Carnegie Mellon University", + "bibtex": "@article{Keh_Shi_Patterson_Bhagabati_Dewan_Gopala_Izquierdo_Mallick_Sharma_Shrestha_Fang_2024, title={NewsPanda: Media Monitoring for Timely Conservation Action}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26841}, DOI={10.1609/aaai.v37i13.26841}, abstractNote={Non-governmental organizations for environmental conservation have a significant interest in monitoring conservation-related media and getting timely updates about infrastructure construction projects as they may cause massive impact to key conservation areas. Such monitoring, however, is difficult and time-consuming. We introduce NewsPanda, a toolkit which automatically detects and analyzes online articles related to environmental conservation and infrastructure construction. We fine-tune a BERT-based model using active learning methods and noise correction algorithms to identify articles that are relevant to conservation and infrastructure construction. For the identified articles, we perform further analysis, extracting keywords and finding potentially related sources. NewsPanda has been successfully deployed by the World Wide Fund for Nature teams in the UK, India, and Nepal since February 2022. It currently monitors over 80,000 websites and 1,074 conservation sites across India and Nepal, saving more than 30 hours of human efforts weekly. We have now scaled it up to cover 60,000 conservation sites globally.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Keh, Sedrick Scott and Shi, Zheyuan Ryan and Patterson, David J. and Bhagabati, Nirmal and Dewan, Karun and Gopala, Areendran and Izquierdo, Pablo and Mallick, Debojyoti and Sharma, Ambika and Shrestha, Pooja and Fang, Fei}, year={2024}, month={Jul.}, pages={15528-15536} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26841/26613", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26841", + "pdf_size": 2381209, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2109898064195399180&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "; ; ; ; ; ; ; ; ; ; ", + "email": "; ; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0+1;2;3;2;2;2;2;2;2;0", + "aff_unique_norm": "Carnegie Mellon University;98Connect;World Wide Fund for Nature;United States Agency for International Development", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cmu.edu;;https://www.wwf.org;https://www.usaid.gov", + "aff_unique_abbr": "CMU;;WWF;USAID", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;2;0;2;2;2;2;2;2;0", + "aff_country_unique": "United States;;International" + }, + { + "id": "article-25608", + "title": "Next POI Recommendation with Dynamic Graph and Explicit Dependency", + "track": "main", + "status": "Technical", + "abstract": "Next Point-Of-Interest (POI) recommendation plays an important role in various location-based services. Its main objective is to predict the user's next interested POI based on her previous check-in information. Most existing methods directly use users' historical check-in trajectories to construct various graphs to assist sequential models to complete this task. However, as users' check-in data is extremely sparse, it is difficult to capture the potential relations between POIs by directly using these check-in data. To this end, we propose the Sequence-based Neighbour search and Prediction Model (SNPM) for next POI recommendation. In SNPM, the RotatE knowledge graph embedding and Eigenmap methods are used to extract POI relationships implied in check-in data, and build the POI similarity graph. Then, we enhance the model's generalized representations of POIs' general features by aggregating similar POIs. As the context is typically rich and valuable when making Next POI predictions, the sequence model selects which POIs to aggregate not only depends on the current state, but also needs to consider the previous POI sequence. Therefore, we construct a Sequence-based, Dynamic Neighbor Graph (SDNG) to find the similarity neighbourhood and develop a Multi-Step Dependency Prediction model (MSDP) inspired by RotatE, which explicitly leverage information from previous states. We evaluate the proposed model on two real-world datasets, and the experimental results show that the proposed method significantly outperforms existing state-of-the-art POI recommendation methods.", + "primary_area": "data mining and knowledge management", + "author": "Feiyu Yin; Yong Liu; Zhiqi Shen; Lisi Chen; Shuo Shang; Peng Han", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; Nanyang Technological University, Singapore; Nanyang Technological University, Singapore; University of Electronic Science and Technology of China; Sichuan Artificial Intelligence Research Institute, Yibin, 644000, China + University of Electronic Science and Technology of China; University of Electronic Science and Technology of China", + "bibtex": "@article{Yin_Liu_Shen_Chen_Shang_Han_2023, title={Next POI Recommendation with Dynamic Graph and Explicit Dependency}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25608}, DOI={10.1609/aaai.v37i4.25608}, abstractNote={Next Point-Of-Interest (POI) recommendation plays an important role in various location-based services. Its main objective is to predict the user\u2019s next interested POI based on her previous check-in information. Most existing methods directly use users\u2019 historical check-in trajectories to construct various graphs to assist sequential models to complete this task. However, as users\u2019 check-in data is extremely sparse, it is difficult to capture the potential relations between POIs by directly using these check-in data. To this end, we propose the Sequence-based Neighbour search and Prediction Model (SNPM) for next POI recommendation. In SNPM, the RotatE knowledge graph embedding and Eigenmap methods are used to extract POI relationships implied in check-in data, and build the POI similarity graph. Then, we enhance the model\u2019s generalized representations of POIs\u2019 general features by aggregating similar POIs. As the context is typically rich and valuable when making Next POI predictions, the sequence model selects which POIs to aggregate not only depends on the current state, but also needs to consider the previous POI sequence. Therefore, we construct a Sequence-based, Dynamic Neighbor Graph (SDNG) to find the similarity neighbourhood and develop a Multi-Step Dependency Prediction model (MSDP) inspired by RotatE, which explicitly leverage information from previous states. We evaluate the proposed model on two real-world datasets, and the experimental results show that the proposed method significantly outperforms existing state-of-the-art POI recommendation methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yin, Feiyu and Liu, Yong and Shen, Zhiqi and Chen, Lisi and Shang, Shuo and Han, Peng}, year={2023}, month={Jun.}, pages={4827-4834} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25608/25380", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25608", + "pdf_size": 368166, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10756725915631271974&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "std.uestc.edu.cn;ntu.edu.sg;ntu.edu.sg;e.ntu.edu.sg;gmail.com;foxmail.com", + "email": "std.uestc.edu.cn;ntu.edu.sg;ntu.edu.sg;e.ntu.edu.sg;gmail.com;foxmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;0;2+0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Nanyang Technological University;Sichuan Artificial Intelligence Research Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.ntu.edu.sg;", + "aff_unique_abbr": "UESTC;NTU;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Yibin", + "aff_country_unique_index": "0;1;1;0;0+0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26701", + "title": "Noise Based Deepfake Detection via Multi-Head Relative-Interaction", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deepfake brings huge and potential negative impacts to our daily lives. As the real-life Deepfake videos circulated on the Internet become more authentic, most existing detection algorithms have failed since few visual differences can be observed between an authentic video and a Deepfake one. However, the forensic traces are always retained within the synthesized videos. In this study, we present a noise-based Deepfake detection model, NoiseDF for short, which focuses on the underlying forensic noise traces left behind the Deepfake videos. In particular, we enhance the RIDNet denoiser to extract noise traces and features from the cropped face and background squares of the video image frames. Meanwhile, we devise a novel Multi-Head Relative-Interaction method to evaluate the degree of interaction between the faces and backgrounds that plays a pivotal role in the Deepfake detection task. Besides outperforming the state-of-the-art models, the visualization of the extracted Deepfake forensic noise traces has further displayed the evidence and proved the robustness of our approach.", + "primary_area": "ai for social impact", + "author": "Tianyi Wang; Kam Pui Chow", + "authorids": "", + "aff": "Department of Computer Science, The University of Hong Kong, Hong Kong, China; Department of Computer Science, The University of Hong Kong, Hong Kong, China", + "bibtex": "@article{Wang_Chow_2023, title={Noise Based Deepfake Detection via Multi-Head Relative-Interaction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26701}, DOI={10.1609/aaai.v37i12.26701}, abstractNote={Deepfake brings huge and potential negative impacts to our daily lives. As the real-life Deepfake videos circulated on the Internet become more authentic, most existing detection algorithms have failed since few visual differences can be observed between an authentic video and a Deepfake one. However, the forensic traces are always retained within the synthesized videos. In this study, we present a noise-based Deepfake detection model, NoiseDF for short, which focuses on the underlying forensic noise traces left behind the Deepfake videos. In particular, we enhance the RIDNet denoiser to extract noise traces and features from the cropped face and background squares of the video image frames. Meanwhile, we devise a novel Multi-Head Relative-Interaction method to evaluate the degree of interaction between the faces and backgrounds that plays a pivotal role in the Deepfake detection task. Besides outperforming the state-of-the-art models, the visualization of the extracted Deepfake forensic noise traces has further displayed the evidence and proved the robustness of our approach.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Tianyi and Chow, Kam Pui}, year={2023}, month={Jun.}, pages={14548-14556} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26701/26473", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26701", + "pdf_size": 7248941, + "gs_citation": 76, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16594774609343768776&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 2, + "aff_domain": "cs.hku.hk;cs.hku.hk", + "email": "cs.hku.hk;cs.hku.hk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The University of Hong Kong", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.hku.hk", + "aff_unique_abbr": "HKU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Hong Kong", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26231", + "title": "Non-IID Transfer Learning on Graphs", + "track": "main", + "status": "Technical", + "abstract": "Transfer learning refers to the transfer of knowledge or information from a relevant source domain to a target domain. However, most existing transfer learning theories and algorithms focus on IID tasks, where the source/target samples are assumed to be independent and identically distributed. Very little effort is devoted to theoretically studying the knowledge transferability on non-IID tasks, e.g., cross-network mining. To bridge the gap, in this paper, we propose rigorous generalization bounds and algorithms for cross-network transfer learning from a source graph to a target graph. The crucial idea is to characterize the cross-network knowledge transferability from the perspective of the Weisfeiler-Lehman graph isomorphism test. To this end, we propose a novel Graph Subtree Discrepancy to measure the graph distribution shift between source and target graphs. Then the generalization error bounds on cross-network transfer learning, including both cross-network node classification and link prediction tasks, can be derived in terms of the source knowledge and the Graph Subtree Discrepancy across domains. This thereby motivates us to propose a generic graph adaptive network (GRADE) to minimize the distribution shift between source and target graphs for cross-network transfer learning. Experimental results verify the effectiveness and efficiency of our GRADE framework on both cross-network node classification and cross-domain recommendation tasks.", + "primary_area": "machine learning iv", + "author": "Jun Wu; Jingrui He; Elizabeth Ainsworth", + "authorids": "", + "aff": "University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign + USDA ARS Global Change and Photosynthesis Research Unit", + "bibtex": "@article{Wu_He_Ainsworth_2023, title={Non-IID Transfer Learning on Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26231}, DOI={10.1609/aaai.v37i9.26231}, abstractNote={Transfer learning refers to the transfer of knowledge or information from a relevant source domain to a target domain. However, most existing transfer learning theories and algorithms focus on IID tasks, where the source/target samples are assumed to be independent and identically distributed. Very little effort is devoted to theoretically studying the knowledge transferability on non-IID tasks, e.g., cross-network mining. To bridge the gap, in this paper, we propose rigorous generalization bounds and algorithms for cross-network transfer learning from a source graph to a target graph. The crucial idea is to characterize the cross-network knowledge transferability from the perspective of the Weisfeiler-Lehman graph isomorphism test. To this end, we propose a novel Graph Subtree Discrepancy to measure the graph distribution shift between source and target graphs. Then the generalization error bounds on cross-network transfer learning, including both cross-network node classification and link prediction tasks, can be derived in terms of the source knowledge and the Graph Subtree Discrepancy across domains. This thereby motivates us to propose a generic graph adaptive network (GRADE) to minimize the distribution shift between source and target graphs for cross-network transfer learning. Experimental results verify the effectiveness and efficiency of our GRADE framework on both cross-network node classification and cross-domain recommendation tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Jun and He, Jingrui and Ainsworth, Elizabeth}, year={2023}, month={Jun.}, pages={10342-10350} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26231/26003", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26231", + "pdf_size": 1699495, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7071226931104404734&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;United States Department of Agriculture - Agricultural Research Service", + "aff_unique_dep": ";Global Change and Photosynthesis Research Unit", + "aff_unique_url": "https://illinois.edu;https://www.ars.usda.gov", + "aff_unique_abbr": "UIUC;USDA ARS", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26916", + "title": "Non-exponential Reward Discounting in Reinforcement Learning", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Reinforcement learning methods typically discount future rewards using an exponential scheme to achieve theoretical convergence guarantees. Studies from neuroscience, psychology, and economics suggest that human and animal behavior is better captured by the hyperbolic discounting model. Hyperbolic discounting has recently been studied in deep reinforcement learning and has shown promising results. However, this area of research is seemingly understudied, with most extant and continuing research using the standard exponential discounting formulation. My dissertation examines the effects of non-exponential discounting functions (such as hyperbolic) on an agent's learning and aims to investigate their impact on multi-agent systems and generalization tasks. A key objective of this study is to link the discounting rate to an agent's approximation of the underlying hazard rate of its environment through survival analysis.", + "primary_area": "", + "author": "Raja Farrukh Ali", + "authorids": "", + "aff": "Department of Computer Science, Kansas State University", + "bibtex": "@article{Ali_2024, title={Non-exponential Reward Discounting in Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26916}, DOI={10.1609/aaai.v37i13.26916}, abstractNote={Reinforcement learning methods typically discount future rewards using an exponential scheme to achieve theoretical convergence guarantees. Studies from neuroscience, psychology, and economics suggest that human and animal behavior is better captured by the hyperbolic discounting model. Hyperbolic discounting has recently been studied in deep reinforcement learning and has shown promising results. However, this area of research is seemingly understudied, with most extant and continuing research using the standard exponential discounting formulation. My dissertation examines the effects of non-exponential discounting functions (such as hyperbolic) on an agent\u2019s learning and aims to investigate their impact on multi-agent systems and generalization tasks. A key objective of this study is to link the discounting rate to an agent\u2019s approximation of the underlying hazard rate of its environment through survival analysis.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Raja Farrukh}, year={2024}, month={Jul.}, pages={16111-16112} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26916/26688", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26916", + "pdf_size": 63889, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9432077145045185678&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ksu.edu", + "email": "ksu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Kansas State University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.k-state.edu", + "aff_unique_abbr": "K-State", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25893", + "title": "Non-reversible Parallel Tempering for Deep Posterior Approximation", + "track": "main", + "status": "Technical", + "abstract": "Parallel tempering (PT), also known as replica exchange, is the go-to workhorse for simulations of multi-modal distributions. The key to the success of PT is to adopt efficient swap schemes. The popular deterministic even-odd (DEO) scheme exploits the non-reversibility property and has successfully reduced the communication cost from quadratic to linear given the sufficiently many chains. However, such an innovation largely disappears in big data due to the limited chains and few bias-corrected swaps. To handle this issue, we generalize the DEO scheme to promote non-reversibility and propose a few solutions to tackle the underlying bias caused by the geometric stopping time. Notably, in big data scenarios, we obtain a nearly linear communication cost based on the optimal window size. In addition, we also adopt stochastic gradient descent (SGD) with large and constant learning rates as exploration kernels. Such a user-friendly nature enables us to conduct approximation tasks for complex posteriors without much tuning costs.", + "primary_area": "machine learning i", + "author": "Wei Deng; Qian Zhang; Qi Feng; Faming Liang; Guang Lin", + "authorids": "", + "aff": "Purdue University; Purdue University; University of Michigan; Purdue University; Purdue University", + "bibtex": "@article{Deng_Zhang_Feng_Liang_Lin_2023, title={Non-reversible Parallel Tempering for Deep Posterior Approximation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25893}, DOI={10.1609/aaai.v37i6.25893}, abstractNote={Parallel tempering (PT), also known as replica exchange, is the go-to workhorse for simulations of multi-modal distributions. The key to the success of PT is to adopt efficient swap schemes. The popular deterministic even-odd (DEO) scheme exploits the non-reversibility property and has successfully reduced the communication cost from quadratic to linear given the sufficiently many chains. However, such an innovation largely disappears in big data due to the limited chains and few bias-corrected swaps. To handle this issue, we generalize the DEO scheme to promote non-reversibility and propose a few solutions to tackle the underlying bias caused by the geometric stopping time. Notably, in big data scenarios, we obtain a nearly linear communication cost based on the optimal window size. In addition, we also adopt stochastic gradient descent (SGD) with large and constant learning rates as exploration kernels. Such a user-friendly nature enables us to conduct approximation tasks for complex posteriors without much tuning costs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Wei and Zhang, Qian and Feng, Qi and Liang, Faming and Lin, Guang}, year={2023}, month={Jun.}, pages={7332-7339} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25893/25665", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25893", + "pdf_size": 7904794, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12392231492976881694&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com; ; ;purdue.edu; ", + "email": "gmail.com; ; ;purdue.edu; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Purdue University;University of Michigan", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.purdue.edu;https://www.umich.edu", + "aff_unique_abbr": "Purdue;UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25901", + "title": "Non-stationary Risk-Sensitive Reinforcement Learning: Near-Optimal Dynamic Regret, Adaptive Detection, and Separation Design", + "track": "main", + "status": "Technical", + "abstract": "We study risk-sensitive reinforcement learning (RL) based on an entropic risk measure in episodic non-stationary Markov decision processes (MDPs). Both the reward functions and the state transition kernels are unknown and allowed to vary arbitrarily over time with a budget on their cumulative variations. When this variation budget is known a prior, we propose two restart-based algorithms, namely Restart-RSMB and Restart-RSQ, and establish their dynamic regrets. Based on these results, we further present a meta-algorithm that does not require any prior knowledge of the variation budget and can adaptively detect the non-stationarity on the exponential value functions. A dynamic regret lower bound is then established for non-stationary risk-sensitive RL to certify the near-optimality of the proposed algorithms. Our results also show that the risk control and the handling of the non-stationarity can be separately designed in the algorithm if the variation budget is known a prior, while the non-stationary detection mechanism in the adaptive algorithm depends on the risk parameter. This work offers the first non-asymptotic theoretical analyses for the non-stationary risk-sensitive RL in the literature.", + "primary_area": "machine learning i", + "author": "Yuhao Ding; Ming Jin; Javad Lavaei", + "authorids": "", + "aff": "UC Berkeley, Department of Industrial Engineering and Operations Research; Virginia Tech, Department of Electrical and Computer Engineering; UC Berkeley, Department of Industrial Engineering and Operations Research", + "bibtex": "@article{Ding_Jin_Lavaei_2023, title={Non-stationary Risk-Sensitive Reinforcement Learning: Near-Optimal Dynamic Regret, Adaptive Detection, and Separation Design}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25901}, DOI={10.1609/aaai.v37i6.25901}, abstractNote={We study risk-sensitive reinforcement learning (RL) based on an entropic risk measure in episodic non-stationary Markov decision processes (MDPs). Both the reward functions and the state transition kernels are unknown and allowed to vary arbitrarily over time with a budget on their cumulative variations. When this variation budget is known a prior, we propose two restart-based algorithms, namely Restart-RSMB and Restart-RSQ, and establish their dynamic regrets. Based on these results, we further present a meta-algorithm that does not require any prior knowledge of the variation budget and can adaptively detect the non-stationarity on the exponential value functions. A dynamic regret lower bound is then established for non-stationary risk-sensitive RL to certify the near-optimality of the proposed algorithms. Our results also show that the risk control and the handling of the non-stationarity can be separately designed in the algorithm if the variation budget is known a prior, while the non-stationary detection mechanism in the adaptive algorithm depends on the risk parameter. This work offers the first non-asymptotic theoretical analyses for the non-stationary risk-sensitive RL in the literature.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Yuhao and Jin, Ming and Lavaei, Javad}, year={2023}, month={Jun.}, pages={7405-7413} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25901/25673", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25901", + "pdf_size": 224985, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13957299966607968843&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "berkeley.edu;vt.edu;berkeley.edu", + "email": "berkeley.edu;vt.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of California, Berkeley;Virginia Tech", + "aff_unique_dep": "Department of Industrial Engineering and Operations Research;Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.berkeley.edu;https://www.vt.edu", + "aff_unique_abbr": "UC Berkeley;VT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Berkeley;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25834", + "title": "Normalizing Flow Ensembles for Rich Aleatoric and Epistemic Uncertainty Modeling", + "track": "main", + "status": "Technical", + "abstract": "In this work, we demonstrate how to reliably estimate epistemic uncertainty while maintaining the flexibility needed to capture complicated aleatoric distributions. To this end, we propose an ensemble of Normalizing Flows (NF), which are state-of-the-art in modeling aleatoric uncertainty. The ensembles are created via sets of fixed dropout masks, making them less expensive than creating separate NF models. We demonstrate how to leverage the unique structure of NFs, base distributions, to estimate aleatoric uncertainty without relying on samples, provide a comprehensive set of baselines, and derive unbiased estimates for differential entropy. The methods were applied to a variety of experiments, commonly used to benchmark aleatoric and epistemic uncertainty estimation: 1D sinusoidal data, 2D windy grid-world (Wet Chicken), Pendulum, and Hopper. In these experiments, we setup an active learning framework and evaluate each model's capability at measuring aleatoric and epistemic uncertainty. The results show the advantages of using NF ensembles in capturing complicated aleatoric while maintaining accurate epistemic uncertainty estimates.", + "primary_area": "machine learning i", + "author": "Lucas Berry; David Meger", + "authorids": "", + "aff": "McGill University, Centre for Intelligent Machines, Montreal, Canada; McGill University, Centre for Intelligent Machines, Montreal, Canada", + "bibtex": "@article{Berry_Meger_2023, title={Normalizing Flow Ensembles for Rich Aleatoric and Epistemic Uncertainty Modeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25834}, DOI={10.1609/aaai.v37i6.25834}, abstractNote={In this work, we demonstrate how to reliably estimate epistemic uncertainty while maintaining the flexibility needed to capture complicated aleatoric distributions. To this end, we propose an ensemble of Normalizing Flows (NF), which are state-of-the-art in modeling aleatoric uncertainty. The ensembles are created via sets of fixed dropout masks, making them less expensive than creating separate NF models. We demonstrate how to leverage the unique structure of NFs, base distributions, to estimate aleatoric uncertainty without relying on samples, provide a comprehensive set of baselines, and derive unbiased estimates for differential entropy. The methods were applied to a variety of experiments, commonly used to benchmark aleatoric and epistemic uncertainty estimation: 1D sinusoidal data, 2D windy grid-world (Wet Chicken), Pendulum, and Hopper. In these experiments, we setup an active learning framework and evaluate each model\u2019s capability at measuring aleatoric and epistemic uncertainty. The results show the advantages of using NF ensembles in capturing complicated aleatoric while maintaining accurate epistemic uncertainty estimates.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Berry, Lucas and Meger, David}, year={2023}, month={Jun.}, pages={6806-6814} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25834/25606", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25834", + "pdf_size": 1133053, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13245282135270964978&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.mcgill.ca; ", + "email": "mail.mcgill.ca; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "McGill University", + "aff_unique_dep": "Centre for Intelligent Machines", + "aff_unique_url": "https://www.mcgill.ca", + "aff_unique_abbr": "McGill", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Montreal", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25207", + "title": "Not All Neighbors Matter: Point Distribution-Aware Pruning for 3D Point Cloud", + "track": "main", + "status": "Technical", + "abstract": "Applying deep neural networks to 3D point cloud processing has demonstrated a rapid pace of advancement in those domains where 3D geometry information can greatly boost task performance, such as AR/VR, robotics, and autonomous driving. However, as the size of both the neural network model and 3D point cloud continues to scale, reducing the entailed computation and memory access overhead is a primary challenge to meet strict latency and energy constraints of practical applications. This paper proposes a new weight pruning technique for 3D point cloud based on spatial point distribution. We identify that particular groups of neighborhood voxels in 3D point cloud contribute more frequently to actual output features than others. Based on this observation, we propose to selectively prune less contributing groups of neighborhood voxels first to reduce the computation overhead while minimizing the impact on model accuracy. We apply our proposal to three representative sparse 3D convolution libraries. Our proposal reduces the inference latency by 1.60\u00d7 on average and energy consumption by 1.74\u00d7 on NVIDIA GV100 GPU with no loss in accuracy metric", + "primary_area": "computer vision i", + "author": "Yejin Lee; Donghyun Lee; JungUk Hong; Jae W. Lee; Hongil Yoon", + "authorids": "", + "aff": "Seoul National University; Seoul National University; Seoul National University; Seoul National University; Google", + "bibtex": "@article{Lee_Lee_Hong_Lee_Yoon_2023, title={Not All Neighbors Matter: Point Distribution-Aware Pruning for 3D Point Cloud}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25207}, DOI={10.1609/aaai.v37i1.25207}, abstractNote={Applying deep neural networks to 3D point cloud processing has demonstrated a rapid pace of advancement in those domains where 3D geometry information can greatly boost task performance, such as AR/VR, robotics, and autonomous driving. However, as the size of both the neural network model and 3D point cloud continues to scale, reducing the entailed computation and memory access overhead is a primary challenge to meet strict latency and energy constraints of practical applications. This paper proposes a new weight pruning technique for 3D point cloud based on spatial point distribution. We identify that particular groups of neighborhood voxels in 3D point cloud contribute more frequently to actual output features than others. Based on this observation, we propose to selectively prune less contributing groups of neighborhood voxels first to reduce the computation overhead while minimizing the impact on model accuracy. We apply our proposal to three representative sparse 3D convolution libraries. Our proposal reduces the inference latency by 1.60\u00d7 on average and energy consumption by 1.74\u00d7 on NVIDIA GV100 GPU with no loss in accuracy metric}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Yejin and Lee, Donghyun and Hong, JungUk and Lee, Jae W. and Yoon, Hongil}, year={2023}, month={Jun.}, pages={1240-1249} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25207/24979", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25207", + "pdf_size": 477546, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12310003958116023144&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr;google.com", + "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Seoul National University;Google", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.snu.ac.kr;https://www.google.com", + "aff_unique_abbr": "SNU;Google", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-27003", + "title": "Novel Intent Detection and Active Learning Based Classification (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Novel intent class detection is an important problem in real world scenario for conversational agents for continuous interaction. Several research works have been done to detect novel intents in a mono-lingual (primarily English) texts and\nimages. But, current systems lack an end-to-end universal framework to detect novel intents across various different languages with less human annotation effort for mis-classified and system rejected samples. This paper proposes\nNIDAL (Novel Intent Detection and Active Learning based\nclassification), a semi-supervised framework to detect novel\nintents while reducing human annotation cost. Empirical results on various benchmark datasets demonstrate that this system outperforms the baseline methods by more than 10%\nmargin for accuracy and macro-F1. The system achieves this while maintaining overall annotation cost to be just ~6-10% of the unlabeled data available to the system.", + "primary_area": "", + "author": "Ankan Mullick", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Indian Institute of Technology Kharagpur, India", + "bibtex": "@article{Mullick_2024, title={Novel Intent Detection and Active Learning Based Classification (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27003}, DOI={10.1609/aaai.v37i13.27003}, abstractNote={Novel intent class detection is an important problem in real world scenario for conversational agents for continuous interaction. Several research works have been done to detect novel intents in a mono-lingual (primarily English) texts and\nimages. But, current systems lack an end-to-end universal framework to detect novel intents across various different languages with less human annotation effort for mis-classified and system rejected samples. This paper proposes\nNIDAL (Novel Intent Detection and Active Learning based\nclassification), a semi-supervised framework to detect novel\nintents while reducing human annotation cost. Empirical results on various benchmark datasets demonstrate that this system outperforms the baseline methods by more than 10%\nmargin for accuracy and macro-F1. The system achieves this while maintaining overall annotation cost to be just ~6-10% of the unlabeled data available to the system.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mullick, Ankan}, year={2024}, month={Jul.}, pages={16286-16287} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27003/26775", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27003", + "pdf_size": 72775, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=460164222990393278&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "kgpian.iitkgp.ac.in", + "email": "kgpian.iitkgp.ac.in", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Indian Institute of Technology Kharagpur", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitkgp.ac.in", + "aff_unique_abbr": "IIT Kharagpur", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Kharagpur", + "aff_country_unique_index": "0", + "aff_country_unique": "India" + }, + { + "id": "article-25258", + "title": "Novel Motion Patterns Matter for Practical Skeleton-Based Action Recognition", + "track": "main", + "status": "Technical", + "abstract": "Most skeleton-based action recognition methods assume that the same type of action samples in the training set and the test set share similar motion patterns. However, action samples in real scenarios usually contain novel motion patterns which are not involved in the training set. As it is laborious to collect sufficient training samples to enumerate various types of novel motion patterns, this paper presents a practical skeleton-based action recognition task where the training set contains common motion patterns of action samples and the test set contains action samples that suffer from novel motion patterns. For this task, we present a Mask Graph Convolutional Network (Mask-GCN) to focus on learning action-specific skeleton joints that mainly convey action information meanwhile masking action-agnostic skeleton joints that convey rare action information and suffer more from novel motion patterns. Specifically, we design a policy network to learn layer-wise body masks to construct masked adjacency matrices, which guide a GCN-based backbone to learn stable yet informative action features from dynamic graph structure. Extensive experiments on our newly collected dataset verify that Mask-GCN outperforms most GCN-based methods when testing with various novel motion patterns.", + "primary_area": "computer vision ii", + "author": "Mengyuan Liu; Fanyang Meng; Chen Chen; Songtao Wu", + "authorids": "", + "aff": "Key Laboratory of Machine Perception, Peking University, Shenzhen Graduate School; Peng Cheng Laboratory; University of Central Florida; Sony R&D Center China", + "bibtex": "@article{Liu_Meng_Chen_Wu_2023, title={Novel Motion Patterns Matter for Practical Skeleton-Based Action Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25258}, DOI={10.1609/aaai.v37i2.25258}, abstractNote={Most skeleton-based action recognition methods assume that the same type of action samples in the training set and the test set share similar motion patterns. However, action samples in real scenarios usually contain novel motion patterns which are not involved in the training set. As it is laborious to collect sufficient training samples to enumerate various types of novel motion patterns, this paper presents a practical skeleton-based action recognition task where the training set contains common motion patterns of action samples and the test set contains action samples that suffer from novel motion patterns. For this task, we present a Mask Graph Convolutional Network (Mask-GCN) to focus on learning action-specific skeleton joints that mainly convey action information meanwhile masking action-agnostic skeleton joints that convey rare action information and suffer more from novel motion patterns. Specifically, we design a policy network to learn layer-wise body masks to construct masked adjacency matrices, which guide a GCN-based backbone to learn stable yet informative action features from dynamic graph structure. Extensive experiments on our newly collected dataset verify that Mask-GCN outperforms most GCN-based methods when testing with various novel motion patterns.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Mengyuan and Meng, Fanyang and Chen, Chen and Wu, Songtao}, year={2023}, month={Jun.}, pages={1701-1709} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25258/25030", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25258", + "pdf_size": 884340, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17841158074769242082&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;pcl.ac.cn;crcv.ucf.edu;sony.com", + "email": "gmail.com;pcl.ac.cn;crcv.ucf.edu;sony.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory;University of Central Florida;Sony", + "aff_unique_dep": "Key Laboratory of Machine Perception;;;R&D Center", + "aff_unique_url": "http://www.pku.edu.cn;http://www.pcl.ac.cn;https://www.ucf.edu;https://www.sony.com", + "aff_unique_abbr": "PKU;PCL;UCF;Sony", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Shenzhen Graduate School;", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26445", + "title": "Novel Ordering-Based Approaches for Causal Structure Learning in the Presence of Unobserved Variables", + "track": "main", + "status": "Technical", + "abstract": "We propose ordering-based approaches for learning the maximal ancestral graph (MAG) of a structural equation model (SEM) up to its Markov equivalence class (MEC) in the presence of unobserved variables. Existing ordering-based methods in the literature recover a graph through learning a causal order (c-order). We advocate for a novel order called removable order (r-order) as they are advantageous over c-orders for structure learning. This is because r-orders are the minimizers of an appropriately defined optimization problem that could be either solved exactly (using a reinforcement learning approach) or approximately (using a hill-climbing search). Moreover, the r-orders (unlike c-orders) are invariant among all the graphs in a MEC and include c-orders as a subset. Given that set of r-orders is often significantly larger than the set of c-orders, it is easier for the optimization problem to find an r-order instead of a c-order. We evaluate the performance and the scalability of our proposed approaches on both real-world and randomly generated networks.", + "primary_area": "reasoning under uncertainty", + "author": "Ehsan Mokhtarian; Mohmmadsadegh Khorasani; Jalal Etesami; Negar Kiyavash", + "authorids": "", + "aff": "School of Computer and Communication Sciences EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences EPFL, Lausanne, Switzerland + College of Management of Technology EPFL, Lausanne, Switzerland", + "bibtex": "@article{Mokhtarian_Khorasani_Etesami_Kiyavash_2023, title={Novel Ordering-Based Approaches for Causal Structure Learning in the Presence of Unobserved Variables}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26445}, DOI={10.1609/aaai.v37i10.26445}, abstractNote={We propose ordering-based approaches for learning the maximal ancestral graph (MAG) of a structural equation model (SEM) up to its Markov equivalence class (MEC) in the presence of unobserved variables. Existing ordering-based methods in the literature recover a graph through learning a causal order (c-order). We advocate for a novel order called removable order (r-order) as they are advantageous over c-orders for structure learning. This is because r-orders are the minimizers of an appropriately defined optimization problem that could be either solved exactly (using a reinforcement learning approach) or approximately (using a hill-climbing search). Moreover, the r-orders (unlike c-orders) are invariant among all the graphs in a MEC and include c-orders as a subset. Given that set of r-orders is often significantly larger than the set of c-orders, it is easier for the optimization problem to find an r-order instead of a c-order. We evaluate the performance and the scalability of our proposed approaches on both real-world and randomly generated networks.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mokhtarian, Ehsan and Khorasani, Mohmmadsadegh and Etesami, Jalal and Kiyavash, Negar}, year={2023}, month={Jun.}, pages={12260-12268} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26445/26217", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26445", + "pdf_size": 1327108, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5783234613598095899&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch", + "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL);\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": "School of Computer and Communication Sciences;College of Management of Technology", + "aff_unique_url": "https://www.epfl.ch;https://www.epfl.ch", + "aff_unique_abbr": "EPFL;EPFL", + "aff_campus_unique_index": "0;0;0;0+0", + "aff_campus_unique": "Lausanne", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "Switzerland" + }, + { + "id": "article-25682", + "title": "Now We\u2019re Talking: Better Deliberation Groups through Submodular Optimization", + "track": "main", + "status": "Technical", + "abstract": "Citizens\u2019 assemblies are groups of randomly selected constituents who are tasked with providing recommendations on policy questions. Assembly members form their recommendations through a sequence of discussions in small groups (deliberation), in which group members exchange arguments and experiences. We seek to support this process through optimization, by studying how to assign participants to discussion groups over multiple sessions, in a way that maximizes interaction between participants and satisfies diversity constraints within each group. Since repeated meetings between a given pair of participants have diminishing marginal returns, we capture interaction through a submodular function, which is approximately optimized by a greedy algorithm making calls to an ILP solver. This framework supports different submodular objective functions, and we identify sensible options, but we also show it is not necessary to commit to a particular choice: Our main theoretical result is a (practically efficient) algorithm that simultaneously approximates every possible objective function of the form we are interested in. Experiments with data from real citizens' assemblies demonstrate that our approach substantially outperforms the heuristic algorithm currently used by practitioners.", + "primary_area": "game theory and economic paradigms", + "author": "Jake Barrett; Kobi Gal; Paul G\u00f6lz; Rose M. Hong; Ariel D. Procaccia", + "authorids": "", + "aff": "University of Edinburgh; University of Edinburgh + Ben-Gurion University of the Negev; Harvard University; Harvard University; Harvard University", + "bibtex": "@article{Barrett_Gal_G\u00f6lz_Hong_Procaccia_2023, title={Now We\u2019re Talking: Better Deliberation Groups through Submodular Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25682}, DOI={10.1609/aaai.v37i5.25682}, abstractNote={Citizens\u2019 assemblies are groups of randomly selected constituents who are tasked with providing recommendations on policy questions. Assembly members form their recommendations through a sequence of discussions in small groups (deliberation), in which group members exchange arguments and experiences. We seek to support this process through optimization, by studying how to assign participants to discussion groups over multiple sessions, in a way that maximizes interaction between participants and satisfies diversity constraints within each group. Since repeated meetings between a given pair of participants have diminishing marginal returns, we capture interaction through a submodular function, which is approximately optimized by a greedy algorithm making calls to an ILP solver. This framework supports different submodular objective functions, and we identify sensible options, but we also show it is not necessary to commit to a particular choice: Our main theoretical result is a (practically efficient) algorithm that simultaneously approximates every possible objective function of the form we are interested in. Experiments with data from real citizens\u2019 assemblies demonstrate that our approach substantially outperforms the heuristic algorithm currently used by practitioners.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Barrett, Jake and Gal, Kobi and G\u00f6lz, Paul and Hong, Rose M. and Procaccia, Ariel D.}, year={2023}, month={Jun.}, pages={5490-5498} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25682/25454", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25682", + "pdf_size": 199409, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8207057416413062551&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sms.ed.ac.uk;exseed.ed.ac.uk;seas.harvard.edu;college.harvard.edu;seas.harvard.edu", + "email": "sms.ed.ac.uk;exseed.ed.ac.uk;seas.harvard.edu;college.harvard.edu;seas.harvard.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;2;2", + "aff_unique_norm": "University of Edinburgh;Ben-Gurion University of the Negev;Harvard University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ed.ac.uk;https://www.bgu.ac.il;https://www.harvard.edu", + "aff_unique_abbr": "Edinburgh;BGU;Harvard", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;2;2;2", + "aff_country_unique": "United Kingdom;Israel;United States" + }, + { + "id": "article-25505", + "title": "NuWLS: Improving Local Search for (Weighted) Partial MaxSAT by New Weighting Techniques", + "track": "main", + "status": "Technical", + "abstract": "Maximum Satisfiability (MaxSAT) is a prototypical constraint optimization problem, and its generalized version is the (Weighted) Partial MaxSAT problem, denoted as (W)PMS, which deals with hard and soft clauses. Considerable progress has been made on stochastic local search (SLS) algorithms for solving (W)PMS, which mainly focus on clause weighting techniques. In this work, we identify two issues of existing clause weighting techniques for (W)PMS, and propose two ideas correspondingly. First, we observe that the initial values of soft clause weights have a big effect on the performance of the SLS solver for solving (W)PMS, and propose a weight initialization method. Second, we propose a new clause weighting scheme that for the first time employs different conditions for updating hard and soft clause weights. Based on these two ideas, we develop a new SLS solver for (W)PMS named NuWLS. Through extensive experiments, NuWLS performs much better than existing SLS solvers on all 6 benchmarks from the incomplete tracks of MaxSAT Evaluations (MSEs) 2019, 2020, and 2021. In terms of the number of winning instances, NuWLS outperforms state-of-the-art SAT-based incomplete solvers on all the 6 benchmarks. More encouragingly, a hybrid solver that combines NuWLS and an SAT-based solver won all four categories in the incomplete track of the MaxSAT Evaluation 2022.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yi Chu; Shaowei Cai; Chuan Luo", + "authorids": "", + "aff": "Institute of Software, Chinese Academy of Sciences, China; State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, China+School of Computer Science and Technology, University of Chinese Academy of Sciences, China; School of Software, Beihang University, China", + "bibtex": "@article{Chu_Cai_Luo_2023, title={NuWLS: Improving Local Search for (Weighted) Partial MaxSAT by New Weighting Techniques}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25505}, DOI={10.1609/aaai.v37i4.25505}, abstractNote={Maximum Satisfiability (MaxSAT) is a prototypical constraint optimization problem, and its generalized version is the (Weighted) Partial MaxSAT problem, denoted as (W)PMS, which deals with hard and soft clauses. Considerable progress has been made on stochastic local search (SLS) algorithms for solving (W)PMS, which mainly focus on clause weighting techniques. In this work, we identify two issues of existing clause weighting techniques for (W)PMS, and propose two ideas correspondingly. First, we observe that the initial values of soft clause weights have a big effect on the performance of the SLS solver for solving (W)PMS, and propose a weight initialization method. Second, we propose a new clause weighting scheme that for the first time employs different conditions for updating hard and soft clause weights. Based on these two ideas, we develop a new SLS solver for (W)PMS named NuWLS. Through extensive experiments, NuWLS performs much better than existing SLS solvers on all 6 benchmarks from the incomplete tracks of MaxSAT Evaluations (MSEs) 2019, 2020, and 2021. In terms of the number of winning instances, NuWLS outperforms state-of-the-art SAT-based incomplete solvers on all the 6 benchmarks. More encouragingly, a hybrid solver that combines NuWLS and an SAT-based solver won all four categories in the incomplete track of the MaxSAT Evaluation 2022.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chu, Yi and Cai, Shaowei and Luo, Chuan}, year={2023}, month={Jun.}, pages={3915-3923} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25505/25277", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25505", + "pdf_size": 180851, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12840671255812792534&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "ios.ac.cn;ios.ac.cn;buaa.edu.cn", + "email": "ios.ac.cn;ios.ac.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Beihang University", + "aff_unique_dep": "Institute of Software;School of Computer Science and Technology;School of Software", + "aff_unique_url": "http://www.ios.ac.cn;http://www.ucas.ac.cn;http://www.buaa.edu.cn", + "aff_unique_abbr": "CAS;UCAS;Beihang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26310", + "title": "ODE-RSSM: Learning Stochastic Recurrent State Space Model from Irregularly Sampled Data", + "track": "main", + "status": "Technical", + "abstract": "For the complicated input-output systems with nonlinearity and stochasticity, Deep State Space Models (SSMs) are effective for identifying systems in the latent state space, which are of great significance for representation, forecasting, and planning in online scenarios. However, most SSMs are designed for discrete-time sequences and inapplicable when the observations are irregular in time. To solve the problem, we propose a novel continuous-time SSM named Ordinary Differential Equation Recurrent State Space Model (ODE-RSSM). ODE-RSSM incorporates an ordinary differential equation (ODE) network (ODE-Net) to model the continuous-time evolution of latent states between adjacent time points. Inspired from the equivalent linear transformation on integration limits, we propose an efficient reparameterization method for solving batched ODEs with non-uniform time spans in parallel for efficiently training the ODE-RSSM with irregularly sampled sequences. We also conduct extensive experiments to evaluate the proposed ODE-RSSM and the baselines on three input-output datasets, one of which is a rollout of a private industrial dataset with strong long-term delay and stochasticity. The results demonstrate that the ODE-RSSM achieves better performance than other baselines in open loop prediction even if the time spans of predicted points are uneven and the distribution of length is changeable. Code is availiable at https://github.com/yuanzhaolin/ODE-RSSM.", + "primary_area": "machine learning iv", + "author": "Zhaolin Yuan; Xiaojuan Ban; Zixuan Zhang; Xiaorui Li; Hong-Ning Dai", + "authorids": "", + "aff": "School of Intelligence Science and Technology, Beijing Key Laboratory of Knowledge Engineering for Materials Science, University of Science and Technology Beijing, Beijing 100083, China; Beijing Advanced Innovation Center for Materials Genome Engineering, University of Science and Technology Beijing+Key Laboratory of Intelligent Bionic Unmanned Systems, Ministry of Education, University of Science and Technology Beijing, Beijing 100083, China; School of Intelligence Science and Technology, Beijing Key Laboratory of Knowledge Engineering for Materials Science, University of Science and Technology Beijing, Beijing 100083, China; School of Intelligence Science and Technology, Beijing Key Laboratory of Knowledge Engineering for Materials Science, University of Science and Technology Beijing, Beijing 100083, China; Department of Computer Science, Hong Kong Baptist University, Hong Kong, China", + "bibtex": "@article{Yuan_Ban_Zhang_Li_Dai_2023, title={ODE-RSSM: Learning Stochastic Recurrent State Space Model from Irregularly Sampled Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26310}, DOI={10.1609/aaai.v37i9.26310}, abstractNote={For the complicated input-output systems with nonlinearity and stochasticity, Deep State Space Models (SSMs) are effective for identifying systems in the latent state space, which are of great significance for representation, forecasting, and planning in online scenarios. However, most SSMs are designed for discrete-time sequences and inapplicable when the observations are irregular in time. To solve the problem, we propose a novel continuous-time SSM named Ordinary Differential Equation Recurrent State Space Model (ODE-RSSM). ODE-RSSM incorporates an ordinary differential equation (ODE) network (ODE-Net) to model the continuous-time evolution of latent states between adjacent time points. Inspired from the equivalent linear transformation on integration limits, we propose an efficient reparameterization method for solving batched ODEs with non-uniform time spans in parallel for efficiently training the ODE-RSSM with irregularly sampled sequences. We also conduct extensive experiments to evaluate the proposed ODE-RSSM and the baselines on three input-output datasets, one of which is a rollout of a private industrial dataset with strong long-term delay and stochasticity. The results demonstrate that the ODE-RSSM achieves better performance than other baselines in open loop prediction even if the time spans of predicted points are uneven and the distribution of length is changeable. Code is availiable at https://github.com/yuanzhaolin/ODE-RSSM.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Zhaolin and Ban, Xiaojuan and Zhang, Zixuan and Li, Xiaorui and Dai, Hong-Ning}, year={2023}, month={Jun.}, pages={11060-11068} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26310/26082", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26310", + "pdf_size": 424450, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10118447521307138033&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "163.com;ustb.edu.cn;163.com;xs.ustb.edu.cn;ieee.org", + "email": "163.com;ustb.edu.cn;163.com;xs.ustb.edu.cn;ieee.org", + "github": "https://github.com/yuanzhaolin/ODE-RSSM", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0;0;0;1", + "aff_unique_norm": "University of Science and Technology Beijing;Hong Kong Baptist University", + "aff_unique_dep": "School of Intelligence Science and Technology;Department of Computer Science", + "aff_unique_url": "http://www.ustb.edu.cn;https://www.hkbu.edu.hk", + "aff_unique_abbr": "USTB;HKBU", + "aff_campus_unique_index": "0;0+0;0;0;1", + "aff_campus_unique": "Beijing;Hong Kong", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26084", + "title": "OMPQ: Orthogonal Mixed Precision Quantization", + "track": "main", + "status": "Technical", + "abstract": "To bridge the ever-increasing gap between deep neural networks' complexity and hardware capability, network quantization has attracted more and more research attention. The latest trend of mixed precision quantization takes advantage of hardware's multiple bit-width arithmetic operations to unleash the full potential of network quantization. However, existing approaches rely heavily on an extremely time-consuming search process and various relaxations when seeking the optimal bit configuration. To address this issue, we propose to optimize a proxy metric of network orthogonality that can be efficiently solved with linear programming, which proves to be highly correlated with quantized model accuracy and bit-width. Our approach significantly reduces the search time and the required data amount by orders of magnitude, but without a compromise on quantization accuracy. Specifically, we achieve 72.08% Top-1 accuracy on ResNet-18 with 6.7Mb parameters, which does not require any searching iterations. Given the high efficiency and low data dependency of our algorithm, we use it for the post-training quantization, which achieves 71.27% Top-1 accuracy on MobileNetV2 with only 1.5Mb parameters.", + "primary_area": "machine learning ii", + "author": "Yuexiao Ma; Taisong Jin; Xiawu Zheng; Yan Wang; Huixia Li; Yongjian Wu; Guannan Jiang; Wei Zhang; Rongrong Ji", + "authorids": "", + "aff": "MAC Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, China; MAC Lab, Department of Computer Science and Technology, School of Informatics, Xiamen University, China; Peng Cheng Laboratory, Shenzhen, China; Samsara, Seattle, WA, USA; MAC Lab, Department of Computer Science and Technology, School of Informatics, Xiamen University, China; Tencent Technology (Shanghai) Co., Ltd, China; CATL, China; CATL, China; MAC Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, China", + "bibtex": "@article{Ma_Jin_Zheng_Wang_Li_Wu_Jiang_Zhang_Ji_2023, title={OMPQ: Orthogonal Mixed Precision Quantization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26084}, DOI={10.1609/aaai.v37i7.26084}, abstractNote={To bridge the ever-increasing gap between deep neural networks\u2019 complexity and hardware capability, network quantization has attracted more and more research attention. The latest trend of mixed precision quantization takes advantage of hardware\u2019s multiple bit-width arithmetic operations to unleash the full potential of network quantization. However, existing approaches rely heavily on an extremely time-consuming search process and various relaxations when seeking the optimal bit configuration. To address this issue, we propose to optimize a proxy metric of network orthogonality that can be efficiently solved with linear programming, which proves to be highly correlated with quantized model accuracy and bit-width. Our approach significantly reduces the search time and the required data amount by orders of magnitude, but without a compromise on quantization accuracy. Specifically, we achieve 72.08% Top-1 accuracy on ResNet-18 with 6.7Mb parameters, which does not require any searching iterations. Given the high efficiency and low data dependency of our algorithm, we use it for the post-training quantization, which achieves 71.27% Top-1 accuracy on MobileNetV2 with only 1.5Mb parameters.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Yuexiao and Jin, Taisong and Zheng, Xiawu and Wang, Yan and Li, Huixia and Wu, Yongjian and Jiang, Guannan and Zhang, Wei and Ji, Rongrong}, year={2023}, month={Jun.}, pages={9029-9037} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26084/25856", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26084", + "pdf_size": 623912, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2169512239653025258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;pcl.ac.cn;samsara.com;stu.xmu.edu.cn;tencent.com;catl.com;catl.com;xmu.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn;pcl.ac.cn;samsara.com;stu.xmu.edu.cn;tencent.com;catl.com;catl.com;xmu.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;2;0;3;4;4;0", + "aff_unique_norm": "Xiamen University;Peng Cheng Laboratory;Samsara;Tencent Technology (Shanghai) Co., Ltd;CATL", + "aff_unique_dep": "Department of Artificial Intelligence;;;;", + "aff_unique_url": "https://www.xmu.edu.cn;;;https://www.tencent.com;https://www.catl.com.cn", + "aff_unique_abbr": "XMU;;;Tencent;CATL", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Shenzhen;Seattle", + "aff_country_unique_index": "0;0;0;1;0;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26851", + "title": "OPRADI: Applying Security Game to Fight Drive under the Influence in Real-World", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Driving under the influence (DUI) is one of the main causes of traffic accidents, often leading to severe life and property losses. Setting up sobriety checkpoints on certain roads is the most commonly used practice to identify DUI-drivers in many countries worldwide. However, setting up checkpoints according to the police's experiences may not be effective for ignoring the strategic interactions between the police and DUI-drivers, particularly when inspecting resources are limited. To remedy this situation, we adapt the classic Stackelberg security game (SSG) to a new SSG-DUI game to describe the strategic interactions in catching DUI-drivers. SSG-DUI features drivers' bounded rationality and social knowledge sharing among them, thus realizing improved real-world fidelity. With SSG-DUI, we propose OPRADI, a systematic approach for advising better strategies in setting up checkpoints. We perform extensive experiments to evaluate it in both simulated environments and real-world contexts, in collaborating with a Chinese city's police bureau. The results reveal its effectiveness in improving police's real-world operations, thus having significant practical potentials.", + "primary_area": "emerging applications of ai", + "author": "Luzhan Yuan; Wei Wang; Gaowei Zhang; Yi Wang", + "authorids": "", + "aff": "Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications", + "bibtex": "@article{Yuan_Wang_Zhang_Wang_2024, title={OPRADI: Applying Security Game to Fight Drive under the Influence in Real-World}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26851}, DOI={10.1609/aaai.v37i13.26851}, abstractNote={Driving under the influence (DUI) is one of the main causes of traffic accidents, often leading to severe life and property losses. Setting up sobriety checkpoints on certain roads is the most commonly used practice to identify DUI-drivers in many countries worldwide. However, setting up checkpoints according to the police\u2019s experiences may not be effective for ignoring the strategic interactions between the police and DUI-drivers, particularly when inspecting resources are limited. To remedy this situation, we adapt the classic Stackelberg security game (SSG) to a new SSG-DUI game to describe the strategic interactions in catching DUI-drivers. SSG-DUI features drivers\u2019 bounded rationality and social knowledge sharing among them, thus realizing improved real-world fidelity. With SSG-DUI, we propose OPRADI, a systematic approach for advising better strategies in setting up checkpoints. We perform extensive experiments to evaluate it in both simulated environments and real-world contexts, in collaborating with a Chinese city\u2019s police bureau. The results reveal its effectiveness in improving police\u2019s real-world operations, thus having significant practical potentials.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Luzhan and Wang, Wei and Zhang, Gaowei and Wang, Yi}, year={2024}, month={Jul.}, pages={15612-15620} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26851/26623", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26851", + "pdf_size": 2440716, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:hD7KyvZdMUYJ:scholar.google.com/&scioq=OPRADI:+Applying+Security+Game+to+Fight+Drive+under+the+Influence+in+Real-World&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26468", + "title": "OPT-GAN: A Broad-Spectrum Global Optimizer for Black-Box Problems by Learning Distribution", + "track": "main", + "status": "Technical", + "abstract": "Black-box optimization (BBO) algorithms are concerned with finding the best solutions for problems with missing analytical details. Most classical methods for such problems are based on strong and fixed a priori assumptions, such as Gaussianity. However, the complex real-world problems, especially when the global optimum is desired, could be very far from the a priori assumptions because of their diversities, causing unexpected obstacles. In this study, we propose a generative adversarial net-based broad-spectrum global optimizer (OPT-GAN) which estimates the distribution of optimum gradually, with strategies to balance exploration-exploitation trade-off. It has potential to better adapt to the regularity and structure of diversified landscapes than other methods with fixed prior, e.g., Gaussian assumption or separability. \nExperiments on diverse BBO benchmarks and high dimensional real world applications exhibit that OPT-GAN outperforms other traditional and neural net-based BBO algorithms. The code and Appendix are available at https://github.com/NBICLAB/OPT-GAN", + "primary_area": "search and optimization", + "author": "Minfang Lu; Shuai Ning; Shuangrong Liu; Fengyang Sun; Bo Zhang; Bo Yang; Lin Wang", + "authorids": "", + "aff": "Shandong Provincial Key Laboratory of Network Based Intelligent Computing, University of Jinan, Jinan 250022, China+Quan Cheng Laboratory, Jinan 250100, China+Cainiao Network, Hangzhou, China; Shandong Provincial Key Laboratory of Network Based Intelligent Computing, University of Jinan, Jinan 250022, China+Quan Cheng Laboratory, Jinan 250100, China; Department of Computer Science, The University of Suwon, Hwaseong 18323, South Korea; Victoria University of Wellington, Wellington 6140, New Zealand; Shandong Provincial Key Laboratory of Network Based Intelligent Computing, University of Jinan, Jinan 250022, China+Quan Cheng Laboratory, Jinan 250100, China; Quan Cheng Laboratory, Jinan 250100, China; Shandong Provincial Key Laboratory of Network Based Intelligent Computing, University of Jinan, Jinan 250022, China", + "bibtex": "@article{Lu_Ning_Liu_Sun_Zhang_Yang_Wang_2023, title={OPT-GAN: A Broad-Spectrum Global Optimizer for Black-Box Problems by Learning Distribution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26468}, DOI={10.1609/aaai.v37i10.26468}, abstractNote={Black-box optimization (BBO) algorithms are concerned with finding the best solutions for problems with missing analytical details. Most classical methods for such problems are based on strong and fixed a priori assumptions, such as Gaussianity. However, the complex real-world problems, especially when the global optimum is desired, could be very far from the a priori assumptions because of their diversities, causing unexpected obstacles. In this study, we propose a generative adversarial net-based broad-spectrum global optimizer (OPT-GAN) which estimates the distribution of optimum gradually, with strategies to balance exploration-exploitation trade-off. It has potential to better adapt to the regularity and structure of diversified landscapes than other methods with fixed prior, e.g., Gaussian assumption or separability. Experiments on diverse BBO benchmarks and high dimensional real world applications exhibit that OPT-GAN outperforms other traditional and neural net-based BBO algorithms. The code and Appendix are available at https://github.com/NBICLAB/OPT-GAN}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Minfang and Ning, Shuai and Liu, Shuangrong and Sun, Fengyang and Zhang, Bo and Yang, Bo and Wang, Lin}, year={2023}, month={Jun.}, pages={12462-12472} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26468/26240", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26468", + "pdf_size": 12996009, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4607512246512655409&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "alibaba-inc.com;gmail.com;gmail.com;vuw.ac.nz;gmail.com;qcl.edu.cn;gmail.com", + "email": "alibaba-inc.com;gmail.com;gmail.com;vuw.ac.nz;gmail.com;qcl.edu.cn;gmail.com", + "github": "https://github.com/NBICLAB/OPT-GAN", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;0+1;3;4;0+1;1;0", + "aff_unique_norm": "University of Jinan;Quan Cheng Laboratory;Cainiao Network;The University of Suwon;Victoria University of Wellington", + "aff_unique_dep": "Shandong Provincial Key Laboratory of Network Based Intelligent Computing;;;Department of Computer Science;", + "aff_unique_url": ";;https://www.cainiao.com;http://www.suwon.ac.kr;https://www.victoria.ac.nz", + "aff_unique_abbr": ";;Cainiao;Suwon;VUW", + "aff_campus_unique_index": "0+2;0;3;4;0;0", + "aff_campus_unique": "Jinan;;Hangzhou;Hwaseong;Wellington", + "aff_country_unique_index": "0+0+0;0+0;1;2;0+0;0;0", + "aff_country_unique": "China;South Korea;New Zealand" + }, + { + "id": "article-25474", + "title": "Occupancy Planes for Single-View RGB-D Human Reconstruction", + "track": "main", + "status": "Technical", + "abstract": "Single-view RGB-D human reconstruction with implicit functions is often formulated as per-point classification. Specifically, a set of 3D locations within the view-frustum of the camera are first projected independently onto the image and a corresponding feature is subsequently extracted for each 3D location. The feature of each 3D location is then used to classify independently whether the corresponding 3D point is inside or outside the observed object. This procedure leads to sub-optimal results because correlations between predictions for neighboring locations are only taken into account implicitly via the extracted features. For more accurate results we propose the occupancy planes (OPlanes) representation, which enables to formulate single-view RGB-D human reconstruction as occupancy prediction on planes which slice through the camera's view frustum. Such a representation provides more flexibility than voxel grids and enables to better leverage correlations than per-point classification. On the challenging S3D data we observe a simple classifier based on the OPlanes representation to yield compelling results, especially in difficult situations with partial occlusions due to other objects and partial visibility, which haven't been addressed by prior work.", + "primary_area": "computer vision iii", + "author": "Xiaoming Zhao; Yuan-Ting Hu; Zhongzheng Ren; Alexander G. Schwing", + "authorids": "", + "aff": "University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign", + "bibtex": "@article{Zhao_Hu_Ren_Schwing_2023, title={Occupancy Planes for Single-View RGB-D Human Reconstruction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25474}, DOI={10.1609/aaai.v37i3.25474}, abstractNote={Single-view RGB-D human reconstruction with implicit functions is often formulated as per-point classification. Specifically, a set of 3D locations within the view-frustum of the camera are first projected independently onto the image and a corresponding feature is subsequently extracted for each 3D location. The feature of each 3D location is then used to classify independently whether the corresponding 3D point is inside or outside the observed object. This procedure leads to sub-optimal results because correlations between predictions for neighboring locations are only taken into account implicitly via the extracted features. For more accurate results we propose the occupancy planes (OPlanes) representation, which enables to formulate single-view RGB-D human reconstruction as occupancy prediction on planes which slice through the camera\u2019s view frustum. Such a representation provides more flexibility than voxel grids and enables to better leverage correlations than per-point classification. On the challenging S3D data we observe a simple classifier based on the OPlanes representation to yield compelling results, especially in difficult situations with partial occlusions due to other objects and partial visibility, which haven\u2019t been addressed by prior work.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Xiaoming and Hu, Yuan-Ting and Ren, Zhongzheng and Schwing, Alexander G.}, year={2023}, month={Jun.}, pages={3633-3641} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25474/25246", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25474", + "pdf_size": 13155861, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6041906040448376589&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25121", + "title": "OctFormer: Efficient Octree-Based Transformer for Point Cloud Compression with Local Enhancement", + "track": "main", + "status": "Technical", + "abstract": "Point cloud compression with a higher compression ratio and tiny loss is essential for efficient data transportation. However, previous methods that depend on 3D convolution or frequent multi-head self-attention operations bring huge computations. To address this problem, we propose an octree-based Transformer compression method called OctFormer, which does not rely on the occupancy information of sibling nodes. Our method uses non-overlapped context windows to construct octree node sequences and share the result of a multi-head self-attention operation among a sequence of nodes. Besides, we introduce a locally-enhance module for exploiting the sibling features and a positional encoding generator for enhancing the translation invariance of the octree node sequence. Compared to the previous state-of-the-art works, our method obtains up to 17% Bpp savings compared to the voxel-context-based baseline and saves an overall 99% coding time compared to the attention-based baseline.", + "primary_area": "computer vision i", + "author": "Mingyue Cui; Junhua Long; Mingjian Feng; Boyang Li; Huang Kai", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University + Shenzhen Institute, Sun Yat-sen University", + "bibtex": "@article{Cui_Long_Feng_Li_Kai_2023, title={OctFormer: Efficient Octree-Based Transformer for Point Cloud Compression with Local Enhancement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25121}, DOI={10.1609/aaai.v37i1.25121}, abstractNote={Point cloud compression with a higher compression ratio and tiny loss is essential for efficient data transportation. However, previous methods that depend on 3D convolution or frequent multi-head self-attention operations bring huge computations. To address this problem, we propose an octree-based Transformer compression method called OctFormer, which does not rely on the occupancy information of sibling nodes. Our method uses non-overlapped context windows to construct octree node sequences and share the result of a multi-head self-attention operation among a sequence of nodes. Besides, we introduce a locally-enhance module for exploiting the sibling features and a positional encoding generator for enhancing the translation invariance of the octree node sequence. Compared to the previous state-of-the-art works, our method obtains up to 17% Bpp savings compared to the voxel-context-based baseline and saves an overall 99% coding time compared to the attention-based baseline.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Mingyue and Long, Junhua and Feng, Mingjian and Li, Boyang and Kai, Huang}, year={2023}, month={Jun.}, pages={470-478} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25121/24893", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25121", + "pdf_size": 2042636, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7409159883188544598&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+0", + "aff_unique_norm": "Sun Yat-sen University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.sysu.edu.cn", + "aff_unique_abbr": "SYSU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26099", + "title": "Off-Policy Proximal Policy Optimization", + "track": "main", + "status": "Technical", + "abstract": "Proximal Policy Optimization (PPO) is an important reinforcement learning method, which has achieved great success in sequential decision-making problems. However, PPO faces the issue of sample inefficiency, which is due to the PPO cannot make use of off-policy data. In this paper, we propose an Off-Policy Proximal Policy Optimization method (Off-Policy PPO) that improves the sample efficiency of PPO by utilizing off-policy data. Specifically, we first propose a clipped surrogate objective function that can utilize off-policy data and avoid excessively large policy updates. Next, we theoretically clarify the stability of the optimization process of the proposed surrogate objective by demonstrating the degree of policy update distance is consistent with that in the PPO. We then describe the implementation details of the proposed Off-Policy PPO which iteratively updates policies by optimizing the proposed clipped surrogate objective. Finally, the experimental results on representative continuous control tasks validate that our method outperforms the state-of-the-art methods on most tasks.", + "primary_area": "machine learning iii", + "author": "Wenjia Meng; Qian Zheng; Gang Pan; Yilong Yin", + "authorids": "", + "aff": "School of Software, Shandong University, Jinan, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China + College of Computer Science and Technology, Zhejiang University, Hangzhou, China; The State Key Lab of Brain-Machine Intelligence, Zhejiang University, Hangzhou, China + College of Computer Science and Technology, Zhejiang University, Hangzhou, China; School of Software, Shandong University, Jinan, China", + "bibtex": "@article{Meng_Zheng_Pan_Yin_2023, title={Off-Policy Proximal Policy Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26099}, DOI={10.1609/aaai.v37i8.26099}, abstractNote={Proximal Policy Optimization (PPO) is an important reinforcement learning method, which has achieved great success in sequential decision-making problems. However, PPO faces the issue of sample inefficiency, which is due to the PPO cannot make use of off-policy data. In this paper, we propose an Off-Policy Proximal Policy Optimization method (Off-Policy PPO) that improves the sample efficiency of PPO by utilizing off-policy data. Specifically, we first propose a clipped surrogate objective function that can utilize off-policy data and avoid excessively large policy updates. Next, we theoretically clarify the stability of the optimization process of the proposed surrogate objective by demonstrating the degree of policy update distance is consistent with that in the PPO. We then describe the implementation details of the proposed Off-Policy PPO which iteratively updates policies by optimizing the proposed clipped surrogate objective. Finally, the experimental results on representative continuous control tasks validate that our method outperforms the state-of-the-art methods on most tasks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Meng, Wenjia and Zheng, Qian and Pan, Gang and Yin, Yilong}, year={2023}, month={Jun.}, pages={9162-9170} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26099/25871", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26099", + "pdf_size": 1133692, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4421912275450113740&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sdu.edu.cn;zju.edu.cn;zju.edu.cn;sdu.edu.cn", + "email": "sdu.edu.cn;zju.edu.cn;zju.edu.cn;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+1;1+1;0", + "aff_unique_norm": "Shandong University;Zhejiang University", + "aff_unique_dep": "School of Software;State Key Lab of Brain-Machine Intelligence", + "aff_unique_url": "http://www.sdu.edu.cn;http://www.zju.edu.cn", + "aff_unique_abbr": ";ZJU", + "aff_campus_unique_index": "0;1+1;1+1;0", + "aff_campus_unique": "Jinan;Hangzhou", + "aff_country_unique_index": "0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26305", + "title": "Offline Imitation Learning with Suboptimal Demonstrations via Relaxed Distribution Matching", + "track": "main", + "status": "Technical", + "abstract": "Offline imitation learning (IL) promises the ability to learn performant policies from pre-collected demonstrations without interactions with the environment. However, imitating behaviors fully offline typically requires numerous expert data. To tackle this issue, we study the setting where we have limited expert data and supplementary suboptimal data. In this case, a well-known issue is the distribution shift between the learned policy and the behavior policy that collects the offline data. Prior works mitigate this issue by regularizing the KL divergence between the stationary state-action distributions of the learned policy and the behavior policy. We argue that such constraints based on exact distribution matching can be overly conservative and hamper policy learning, especially when the imperfect offline data is highly suboptimal. To resolve this issue, we present RelaxDICE, which employs an asymmetrically-relaxed f-divergence for explicit support regularization. Specifically, instead of driving the learned policy to exactly match the behavior policy, we impose little penalty whenever the density ratio between their stationary state-action distributions is upper bounded by a constant. Note that such formulation leads to a nested min-max optimization problem, which causes instability in practice. RelaxDICE addresses this challenge by supporting a closed-form solution for the inner maximization problem. Extensive empirical study shows that our method significantly outperforms the best prior offline IL method in six standard continuous control environments with over 30% performance gain on average, across 22 settings where the imperfect dataset is highly suboptimal.", + "primary_area": "machine learning iv", + "author": "Lantao Yu; Tianhe Yu; Jiaming Song; Willie Neiswanger; Stefano Ermon", + "authorids": "", + "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University; NVIDIA (Work done while at Stanford); Computer Science Department, Stanford University; Computer Science Department, Stanford University", + "bibtex": "@article{Yu_Yu_Song_Neiswanger_Ermon_2023, title={Offline Imitation Learning with Suboptimal Demonstrations via Relaxed Distribution Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26305}, DOI={10.1609/aaai.v37i9.26305}, abstractNote={Offline imitation learning (IL) promises the ability to learn performant policies from pre-collected demonstrations without interactions with the environment. However, imitating behaviors fully offline typically requires numerous expert data. To tackle this issue, we study the setting where we have limited expert data and supplementary suboptimal data. In this case, a well-known issue is the distribution shift between the learned policy and the behavior policy that collects the offline data. Prior works mitigate this issue by regularizing the KL divergence between the stationary state-action distributions of the learned policy and the behavior policy. We argue that such constraints based on exact distribution matching can be overly conservative and hamper policy learning, especially when the imperfect offline data is highly suboptimal. To resolve this issue, we present RelaxDICE, which employs an asymmetrically-relaxed f-divergence for explicit support regularization. Specifically, instead of driving the learned policy to exactly match the behavior policy, we impose little penalty whenever the density ratio between their stationary state-action distributions is upper bounded by a constant. Note that such formulation leads to a nested min-max optimization problem, which causes instability in practice. RelaxDICE addresses this challenge by supporting a closed-form solution for the inner maximization problem. Extensive empirical study shows that our method significantly outperforms the best prior offline IL method in six standard continuous control environments with over 30% performance gain on average, across 22 settings where the imperfect dataset is highly suboptimal.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Lantao and Yu, Tianhe and Song, Jiaming and Neiswanger, Willie and Ermon, Stefano}, year={2023}, month={Jun.}, pages={11016-11024} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26305/26077", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26305", + "pdf_size": 256186, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14494999292158181444&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Stanford University;NVIDIA", + "aff_unique_dep": "Computer Science Department;", + "aff_unique_url": "https://www.stanford.edu;https://www.nvidia.com", + "aff_unique_abbr": "Stanford;NVIDIA", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25872", + "title": "Offline Quantum Reinforcement Learning in a Conservative Manner", + "track": "main", + "status": "Technical", + "abstract": "Recently, to reap the quantum advantage, empowering reinforcement learning (RL) with quantum computing has attracted much attention, which is dubbed as quantum RL (QRL). However, current QRL algorithms employ an online learning scheme, i.e., the policy that is run on a quantum computer needs to interact with the environment to collect experiences, which could be expensive and dangerous for practical applications. In this paper, we aim to solve this problem in an offline learning manner. To be more specific, we develop the first offline quantum RL (offline QRL) algorithm named CQ2L (Conservative Quantum Q-learning), which learns from offline samples and does not require any interaction with the environment. CQ2L utilizes variational quantum circuits (VQCs), which are improved with data re-uploading and scaling parameters, to represent Q-value functions of agents. To suppress the overestimation of Q-values resulting from offline data, we first employ a double Q-learning framework to reduce the overestimation bias; then a penalty term that encourages generating conservative Q-values is designed. We conduct abundant experiments to demonstrate that the proposed method CQ2L can successfully solve offline QRL tasks that the online counterpart could not.", + "primary_area": "machine learning i", + "author": "Zhihao Cheng; Kaining Zhang; Li Shen; Dacheng Tao", + "authorids": "", + "aff": "The University of Sydney, Australia; The University of Sydney, Australia; JD Explore Academy, China; JD Explore Academy, China", + "bibtex": "@article{Cheng_Zhang_Shen_Tao_2023, title={Offline Quantum Reinforcement Learning in a Conservative Manner}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25872}, DOI={10.1609/aaai.v37i6.25872}, abstractNote={Recently, to reap the quantum advantage, empowering reinforcement learning (RL) with quantum computing has attracted much attention, which is dubbed as quantum RL (QRL). However, current QRL algorithms employ an online learning scheme, i.e., the policy that is run on a quantum computer needs to interact with the environment to collect experiences, which could be expensive and dangerous for practical applications. In this paper, we aim to solve this problem in an offline learning manner. To be more specific, we develop the first offline quantum RL (offline QRL) algorithm named CQ2L (Conservative Quantum Q-learning), which learns from offline samples and does not require any interaction with the environment. CQ2L utilizes variational quantum circuits (VQCs), which are improved with data re-uploading and scaling parameters, to represent Q-value functions of agents. To suppress the overestimation of Q-values resulting from offline data, we first employ a double Q-learning framework to reduce the overestimation bias; then a penalty term that encourages generating conservative Q-values is designed. We conduct abundant experiments to demonstrate that the proposed method CQ2L can successfully solve offline QRL tasks that the online counterpart could not.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Zhihao and Zhang, Kaining and Shen, Li and Tao, Dacheng}, year={2023}, month={Jun.}, pages={7148-7156} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25872/25644", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25872", + "pdf_size": 1485377, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8304947301701187086&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "uni.sydney.edu.au;uni.sydney.edu.au;gmail.com;gmail.com", + "email": "uni.sydney.edu.au;uni.sydney.edu.au;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "The University of Sydney;JD Explore Academy", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.sydney.edu.au;", + "aff_unique_abbr": "USYD;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26987", + "title": "On Analyzing the Role of Image for Visual-Enhanced Relation Extraction (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Multimodal relation extraction is an essential task for knowledge graph construction. In this paper, we take an in-depth empirical analysis that indicates the inaccurate information in the visual scene graph leads to poor modal alignment weights, further degrading performance. Moreover, the visual shuffle experiments illustrate that the current approaches may not take full advantage of visual information. Based on the above observation, we further propose a strong baseline with an implicit fine-grained multimodal alignment based on Transformer for multimodal relation extraction. Experimental results demonstrate the better performance of our method. Codes are available at https://github.com/zjunlp/DeepKE/tree/main/example/re/multimodal.", + "primary_area": "", + "author": "Lei Li; Xiang Chen; Shuofei Qiao; Feiyu Xiong; Huajun Chen; Ningyu Zhang", + "authorids": "", + "aff": "Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Alibaba Group, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China", + "bibtex": "@article{Li_Chen_Qiao_Xiong_Chen_Zhang_2024, title={On Analyzing the Role of Image for Visual-Enhanced Relation Extraction (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26987}, DOI={10.1609/aaai.v37i13.26987}, abstractNote={Multimodal relation extraction is an essential task for knowledge graph construction. In this paper, we take an in-depth empirical analysis that indicates the inaccurate information in the visual scene graph leads to poor modal alignment weights, further degrading performance. Moreover, the visual shuffle experiments illustrate that the current approaches may not take full advantage of visual information. Based on the above observation, we further propose a strong baseline with an implicit fine-grained multimodal alignment based on Transformer for multimodal relation extraction. Experimental results demonstrate the better performance of our method. Codes are available at https://github.com/zjunlp/DeepKE/tree/main/example/re/multimodal.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Lei and Chen, Xiang and Qiao, Shuofei and Xiong, Feiyu and Chen, Huajun and Zhang, Ningyu}, year={2024}, month={Jul.}, pages={16254-16255} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26987/26759", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26987", + "pdf_size": 1024204, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2790013237452130061&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;zju.edu.cn;zju.edu.cn", + "github": "https://github.com/zjunlp/DeepKE/tree/main/example/re/multimodal", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;1;0+0;0+0", + "aff_unique_norm": "Zhejiang University;Alibaba Group", + "aff_unique_dep": "Joint Lab for Knowledge Engine;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ZJU;Alibaba", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25993", + "title": "On Error and Compression Rates for Prototype Rules", + "track": "main", + "status": "Technical", + "abstract": "We study the close interplay between error and compression in the non-parametric multiclass classification setting in terms of prototype learning rules. We focus in particular on a recently proposed compression-based learning rule termed OptiNet. Beyond its computational merits, this rule has been recently shown to be universally consistent in any metric instance space that admits a universally consistent rule---the first learning algorithm known to enjoy this property. However, its error and compression rates have been left open. Here we derive such rates in the case where instances reside in Euclidean space under commonly posed smoothness and tail conditions on the data distribution. We first show that OptiNet achieves non-trivial compression rates while enjoying near minimax-optimal error rates. We then proceed to study a novel general compression scheme for further compressing prototype rules that locally adapts to the noise level without sacrificing accuracy. Applying it to OptiNet, we show that under a geometric margin condition further gain in the compression rate is achieved. Experimental results comparing the performance of the various methods are presented.", + "primary_area": "machine learning ii", + "author": "Omer Kerem; Roi Weiss", + "authorids": "", + "aff": "Ben-Gurion University of the Negev; Ariel University", + "bibtex": "@article{Kerem_Weiss_2023, title={On Error and Compression Rates for Prototype Rules}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25993}, DOI={10.1609/aaai.v37i7.25993}, abstractNote={We study the close interplay between error and compression in the non-parametric multiclass classification setting in terms of prototype learning rules. We focus in particular on a recently proposed compression-based learning rule termed OptiNet. Beyond its computational merits, this rule has been recently shown to be universally consistent in any metric instance space that admits a universally consistent rule---the first learning algorithm known to enjoy this property. However, its error and compression rates have been left open. Here we derive such rates in the case where instances reside in Euclidean space under commonly posed smoothness and tail conditions on the data distribution. We first show that OptiNet achieves non-trivial compression rates while enjoying near minimax-optimal error rates. We then proceed to study a novel general compression scheme for further compressing prototype rules that locally adapts to the noise level without sacrificing accuracy. Applying it to OptiNet, we show that under a geometric margin condition further gain in the compression rate is achieved. Experimental results comparing the performance of the various methods are presented.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kerem, Omer and Weiss, Roi}, year={2023}, month={Jun.}, pages={8228-8236} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25993/25765", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25993", + "pdf_size": 1075658, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12265372688191012942&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "post.bgu.ac.il;ariel.ac.il", + "email": "post.bgu.ac.il;ariel.ac.il", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Ben-Gurion University of the Negev;Ariel University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bgu.ac.il;https://www.ariel.ac.il", + "aff_unique_abbr": "BGU;Ariel U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25574", + "title": "On Generalized Degree Fairness in Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Conventional graph neural networks (GNNs) are often confronted with fairness issues that may stem from their input, including node attributes and neighbors surrounding a node. While several recent approaches have been proposed to eliminate the bias rooted in sensitive attributes, they ignore the other key input of GNNs, namely the neighbors of a node, which can introduce bias since GNNs hinge on neighborhood structures to generate node representations. In particular, the varying neighborhood structures across nodes, manifesting themselves in drastically different node degrees, give rise to the diverse behaviors of nodes and biased outcomes. In this paper, we first define and generalize the degree bias using a generalized definition of node degree as a manifestation and quantification of different multi-hop structures around different nodes. To address the bias in the context of node classification, we propose a novel GNN framework called Generalized Degree Fairness-centric Graph Neural Network (DegFairGNN). Specifically, in each GNN layer, we employ a learnable debiasing function to generate debiasing contexts, which modulate the layer-wise neighborhood aggregation to eliminate the degree bias originating from the diverse degrees among nodes. Extensive experiments on three benchmark datasets demonstrate the effectiveness of our model on both accuracy and fairness metrics.", + "primary_area": "data mining and knowledge management", + "author": "Zemin Liu; Trung-Kien Nguyen; Yuan Fang", + "authorids": "", + "aff": "National University of Singapore; Singapore Management University; Singapore Management University", + "bibtex": "@article{Liu_Nguyen_Fang_2023, title={On Generalized Degree Fairness in Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25574}, DOI={10.1609/aaai.v37i4.25574}, abstractNote={Conventional graph neural networks (GNNs) are often confronted with fairness issues that may stem from their input, including node attributes and neighbors surrounding a node. While several recent approaches have been proposed to eliminate the bias rooted in sensitive attributes, they ignore the other key input of GNNs, namely the neighbors of a node, which can introduce bias since GNNs hinge on neighborhood structures to generate node representations. In particular, the varying neighborhood structures across nodes, manifesting themselves in drastically different node degrees, give rise to the diverse behaviors of nodes and biased outcomes. In this paper, we first define and generalize the degree bias using a generalized definition of node degree as a manifestation and quantification of different multi-hop structures around different nodes. To address the bias in the context of node classification, we propose a novel GNN framework called Generalized Degree Fairness-centric Graph Neural Network (DegFairGNN). Specifically, in each GNN layer, we employ a learnable debiasing function to generate debiasing contexts, which modulate the layer-wise neighborhood aggregation to eliminate the degree bias originating from the diverse degrees among nodes. Extensive experiments on three benchmark datasets demonstrate the effectiveness of our model on both accuracy and fairness metrics.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zemin and Nguyen, Trung-Kien and Fang, Yuan}, year={2023}, month={Jun.}, pages={4525-4533} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25574/25346", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25574", + "pdf_size": 326630, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7070549325230254602&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 14, + "aff_domain": "nus.edu.sg;smu.edu.sg;smu.edu.sg", + "email": "nus.edu.sg;smu.edu.sg;smu.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "National University of Singapore;Singapore Management University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nus.edu.sg;https://www.smu.edu.sg", + "aff_unique_abbr": "NUS;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26549", + "title": "On Grounded Planning for Embodied Tasks with Language Models", + "track": "main", + "status": "Technical", + "abstract": "Language models (LMs) have demonstrated their capability in possessing commonsense knowledge of the physical world, a crucial aspect of performing tasks in everyday life. However, it remains unclear whether they have the capacity to generate grounded, executable plans for embodied tasks. This is a challenging task as LMs lack the ability to perceive the environment through vision and feedback from the physical environment. In this paper, we address this important research question and present the first investigation into the topic. Our novel problem formulation, named G-PlanET, inputs a high-level goal and a data table about objects in a specific environment, and then outputs a step-by-step actionable plan for a robotic agent to follow. To facilitate the study, we establish an evaluation protocol and design a dedicated metric, KAS, to assess the quality of the plans. Our experiments demonstrate that the use of tables for encoding the environment and an iterative decoding strategy can significantly enhance the LMs' ability in grounded planning. Our analysis also reveals interesting and non-trivial findings.", + "primary_area": "speech natural language processing", + "author": "Bill Yuchen Lin; Chengsong Huang; Qian Liu; Wenda Gu; Sam Sommerer; Xiang Ren", + "authorids": "", + "aff": "University of Southern California; Fudan University; Sea AI Lab; University of Southern California; University of Southern California; University of Southern California", + "bibtex": "@article{Lin_Huang_Liu_Gu_Sommerer_Ren_2023, title={On Grounded Planning for Embodied Tasks with Language Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26549}, DOI={10.1609/aaai.v37i11.26549}, abstractNote={Language models (LMs) have demonstrated their capability in possessing commonsense knowledge of the physical world, a crucial aspect of performing tasks in everyday life. However, it remains unclear whether they have the capacity to generate grounded, executable plans for embodied tasks. This is a challenging task as LMs lack the ability to perceive the environment through vision and feedback from the physical environment. In this paper, we address this important research question and present the first investigation into the topic. Our novel problem formulation, named G-PlanET, inputs a high-level goal and a data table about objects in a specific environment, and then outputs a step-by-step actionable plan for a robotic agent to follow. To facilitate the study, we establish an evaluation protocol and design a dedicated metric, KAS, to assess the quality of the plans. Our experiments demonstrate that the use of tables for encoding the environment and an iterative decoding strategy can significantly enhance the LMs\u2019 ability in grounded planning. Our analysis also reveals interesting and non-trivial findings.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Bill Yuchen and Huang, Chengsong and Liu, Qian and Gu, Wenda and Sommerer, Sam and Ren, Xiang}, year={2023}, month={Jun.}, pages={13192-13200} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26549/26321", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26549", + "pdf_size": 703060, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9294057231355022617&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "usc.edu;fudan.edu.cn;sea.com;usc.edu;usc.edu;usc.edu", + "email": "usc.edu;fudan.edu.cn;sea.com;usc.edu;usc.edu;usc.edu", + "github": "", + "project": "https://inklab.usc.edu/G-PlanET", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;0", + "aff_unique_norm": "University of Southern California;Fudan University;Sea AI Lab", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usc.edu;https://www.fudan.edu.cn;", + "aff_unique_abbr": "USC;Fudan;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "United States;China;" + }, + { + "id": "article-26116", + "title": "On Instance-Dependent Bounds for Offline Reinforcement Learning with Linear Function Approximation", + "track": "main", + "status": "Technical", + "abstract": "Sample-efficient offline reinforcement learning (RL) with linear function approximation has been studied extensively recently. Much of the prior work has yielded instance-independent rates that hold even for the worst-case realization of problem instances. This work seeks to understand instance-dependent bounds for offline RL with linear function approximation. We present an algorithm called Bootstrapped and Constrained Pessimistic Value Iteration (BCP-VI), which leverages data bootstrapping and constrained optimization on top of pessimism. We show that under a partial data coverage assumption, that of concentrability with respect to an optimal policy, the proposed algorithm yields a fast rate for offline RL when there is a positive gap in the optimal Q-value functions, even if the offline data were collected adaptively. Moreover, when the linear features of the optimal actions in the states reachable by an optimal policy span those reachable by the behavior policy and the optimal actions are unique, offline RL achieves absolute zero sub-optimality error when the number of episodes exceeds a (finite) instance-dependent threshold. To the best of our knowledge, these are the first results that give a fast rate bound on the sub-optimality and an absolute zero sub-optimality bound for offline RL with linear function approximation from adaptive data with partial coverage. We also provide instance-agnostic and instance-dependent information-theoretical lower bounds to complement our upper bounds.", + "primary_area": "machine learning iii", + "author": "Thanh Nguyen-Tang; Ming Yin; Sunil Gupta; Svetha Venkatesh; Raman Arora", + "authorids": "", + "aff": "Department of Computer Science, Johns Hopkins University; Department of Computer Science and Department of Statistics and Applied Probability, UC Santa Barbara; Applied AI Institute, Deakin University; Applied AI Institute, Deakin University; Department of Computer Science, Johns Hopkins University", + "bibtex": "@article{Nguyen-Tang_Yin_Gupta_Venkatesh_Arora_2023, title={On Instance-Dependent Bounds for Offline Reinforcement Learning with Linear Function Approximation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26116}, DOI={10.1609/aaai.v37i8.26116}, abstractNote={Sample-efficient offline reinforcement learning (RL) with linear function approximation has been studied extensively recently. Much of the prior work has yielded instance-independent rates that hold even for the worst-case realization of problem instances. This work seeks to understand instance-dependent bounds for offline RL with linear function approximation. We present an algorithm called Bootstrapped and Constrained Pessimistic Value Iteration (BCP-VI), which leverages data bootstrapping and constrained optimization on top of pessimism. We show that under a partial data coverage assumption, that of concentrability with respect to an optimal policy, the proposed algorithm yields a fast rate for offline RL when there is a positive gap in the optimal Q-value functions, even if the offline data were collected adaptively. Moreover, when the linear features of the optimal actions in the states reachable by an optimal policy span those reachable by the behavior policy and the optimal actions are unique, offline RL achieves absolute zero sub-optimality error when the number of episodes exceeds a (finite) instance-dependent threshold. To the best of our knowledge, these are the first results that give a fast rate bound on the sub-optimality and an absolute zero sub-optimality bound for offline RL with linear function approximation from adaptive data with partial coverage. We also provide instance-agnostic and instance-dependent information-theoretical lower bounds to complement our upper bounds.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen-Tang, Thanh and Yin, Ming and Gupta, Sunil and Venkatesh, Svetha and Arora, Raman}, year={2023}, month={Jun.}, pages={9310-9318} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26116/25888", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26116", + "pdf_size": 177626, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12325465254819490314&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.jhu.edu; ; ; ; ", + "email": "cs.jhu.edu; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;0", + "aff_unique_norm": "Johns Hopkins University;UC Santa Barbara;Deakin University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Applied AI Institute", + "aff_unique_url": "https://www.jhu.edu;https://www.ucsb.edu;https://www.deakin.edu.au", + "aff_unique_abbr": "JHU;UCSB;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Santa Barbara", + "aff_country_unique_index": "0;0;1;1;0", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-25652", + "title": "On Manipulating Weight Predictions in Signed Weighted Networks", + "track": "main", + "status": "Technical", + "abstract": "Adversarial social network analysis studies how graphs can be rewired or otherwise manipulated to evade social network analysis tools. While there is ample literature on manipulating simple networks, more sophisticated network types are much less understood in this respect. In this paper, we focus on the problem of evading FGA---an edge weight prediction method for signed weighted networks by Kumar et al. 2016. Among others, this method can be used for trust prediction in reputation systems. We study the theoretical underpinnings of FGA and its computational properties in terms of manipulability. Our positive finding is that, unlike many other tools, this measure is not only difficult to manipulate optimally, but also it can be difficult to manipulate in practice.", + "primary_area": "domain s of application", + "author": "Tomasz Lizurej; Tomasz Michalak; Stefan Dziembowski", + "authorids": "", + "aff": "University of Warsaw + IDEAS NCBR; University of Warsaw + IDEAS NCBR; University of Warsaw + IDEAS NCBR", + "bibtex": "@article{Lizurej_Michalak_Dziembowski_2023, title={On Manipulating Weight Predictions in Signed Weighted Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25652}, DOI={10.1609/aaai.v37i4.25652}, abstractNote={Adversarial social network analysis studies how graphs can be rewired or otherwise manipulated to evade social network analysis tools. While there is ample literature on manipulating simple networks, more sophisticated network types are much less understood in this respect. In this paper, we focus on the problem of evading FGA---an edge weight prediction method for signed weighted networks by Kumar et al. 2016. Among others, this method can be used for trust prediction in reputation systems. We study the theoretical underpinnings of FGA and its computational properties in terms of manipulability. Our positive finding is that, unlike many other tools, this measure is not only difficult to manipulate optimally, but also it can be difficult to manipulate in practice.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lizurej, Tomasz and Michalak, Tomasz and Dziembowski, Stefan}, year={2023}, month={Jun.}, pages={5222-5229} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25652/25424", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25652", + "pdf_size": 429267, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4768526032141979973&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "crypto.edu.pl;mimuw.edu.pl;crypto.edu.pl", + "email": "crypto.edu.pl;mimuw.edu.pl;crypto.edu.pl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "University of Warsaw;Institute for Development, Economic Analysis, and Simulation (IDEAS)", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uw.edu.pl;https://www.ideas-ncbr.gov.pl", + "aff_unique_abbr": "UW;IDEAS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "Poland" + }, + { + "id": "article-25981", + "title": "On Solution Functions of Optimization: Universal Approximation and Covering Number Bounds", + "track": "main", + "status": "Technical", + "abstract": "We study the expressibility and learnability of solution functions of convex optimization and their multi-layer architectural extension. The main results are: (1) the class of solution functions of linear programming (LP) and quadratic programming (QP) is a universal approximant for the smooth model class or some restricted Sobolev space, and we characterize the rate-distortion, (2) the approximation power is investigated through a viewpoint of regression error, where information about the target function is provided in terms of data observations, (3) compositionality in the form of deep architecture with optimization as a layer is shown to reconstruct some basic functions used in numerical analysis without error, which implies that (4) a substantial reduction in rate-distortion can be achieved with a universal network architecture, and (5) we discuss the statistical bounds of empirical covering numbers for LP/QP, as well as a generic optimization problem (possibly nonconvex) by exploiting tame geometry. Our results provide the **first rigorous analysis of the approximation and learning-theoretic properties of solution functions** with implications for algorithmic design and performance guarantees.", + "primary_area": "machine learning ii", + "author": "Ming Jin; Vanshaj Khattar; Harshal Kaushik; Bilgehan Sel; Ruoxi Jia", + "authorids": "", + "aff": "Electrical and Computer Engineering, Virginia Tech; Electrical and Computer Engineering, Virginia Tech; Electrical and Computer Engineering, Virginia Tech; Electrical and Computer Engineering, Virginia Tech; Electrical and Computer Engineering, Virginia Tech", + "bibtex": "@article{Jin_Khattar_Kaushik_Sel_Jia_2023, title={On Solution Functions of Optimization: Universal Approximation and Covering Number Bounds}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25981}, DOI={10.1609/aaai.v37i7.25981}, abstractNote={We study the expressibility and learnability of solution functions of convex optimization and their multi-layer architectural extension. The main results are: (1) the class of solution functions of linear programming (LP) and quadratic programming (QP) is a universal approximant for the smooth model class or some restricted Sobolev space, and we characterize the rate-distortion, (2) the approximation power is investigated through a viewpoint of regression error, where information about the target function is provided in terms of data observations, (3) compositionality in the form of deep architecture with optimization as a layer is shown to reconstruct some basic functions used in numerical analysis without error, which implies that (4) a substantial reduction in rate-distortion can be achieved with a universal network architecture, and (5) we discuss the statistical bounds of empirical covering numbers for LP/QP, as well as a generic optimization problem (possibly nonconvex) by exploiting tame geometry. Our results provide the **first rigorous analysis of the approximation and learning-theoretic properties of solution functions** with implications for algorithmic design and performance guarantees.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Ming and Khattar, Vanshaj and Kaushik, Harshal and Sel, Bilgehan and Jia, Ruoxi}, year={2023}, month={Jun.}, pages={8123-8131} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25981/25753", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25981", + "pdf_size": 174712, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10533053146992100462&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "vt.edu;vt.edu;vt.edu;vt.edu;vt.edu", + "email": "vt.edu;vt.edu;vt.edu;vt.edu;vt.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Virginia Tech", + "aff_unique_dep": "Electrical and Computer Engineering", + "aff_unique_url": "https://www.vt.edu", + "aff_unique_abbr": "VT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26420", + "title": "On Total-Order HTN Plan Verification with Method Preconditions \u2013 An Extension of the CYK Parsing Algorithm", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we consider the plan verification problem for totally ordered (TO) HTN planning. The problem is proved to be solvable in polynomial time by recognizing its connection to the membership decision problem for context-free grammars. Currently, most HTN plan verification approaches do not have special treatments for the TO configuration, and the only one features such an optimization still relies on an exhaustive search. Hence, we will develop a new TOHTN plan verification approach in this paper by extending the standard CYK parsing algorithm which acts as the best decision procedure in general.", + "primary_area": "planning routing and scheduling", + "author": "Songtuan Lin; Gregor Behnke; Simona Ondr\u010dkov\u00e1; Roman Bart\u00e1k; Pascal Bercher", + "authorids": "", + "aff": "School of Computing, The Australian National University, Canberra, Australia; ILLC, University of Amsterdam, Amsterdam, The Netherlands; Faculty of Mathematics and Physics, Charles University, Prague, Czech Republic; Faculty of Mathematics and Physics, Charles University, Prague, Czech Republic; School of Computing, The Australian National University, Canberra, Australia", + "bibtex": "@article{Lin_Behnke_Ondr\u010dkov\u00e1_Bart\u00e1k_Bercher_2023, title={On Total-Order HTN Plan Verification with Method Preconditions \u2013 An Extension of the CYK Parsing Algorithm}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26420}, DOI={10.1609/aaai.v37i10.26420}, abstractNote={In this paper, we consider the plan verification problem for totally ordered (TO) HTN planning. The problem is proved to be solvable in polynomial time by recognizing its connection to the membership decision problem for context-free grammars. Currently, most HTN plan verification approaches do not have special treatments for the TO configuration, and the only one features such an optimization still relies on an exhaustive search. Hence, we will develop a new TOHTN plan verification approach in this paper by extending the standard CYK parsing algorithm which acts as the best decision procedure in general.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Songtuan and Behnke, Gregor and Ondr\u010dkov\u00e1, Simona and Bart\u00e1k, Roman and Bercher, Pascal}, year={2023}, month={Jun.}, pages={12041-12048} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26420/26192", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26420", + "pdf_size": 1032411, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6909785867293032497&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "anu.edu.au;uva.nl;ktiml.mff.cuni.cz;ktiml.mff.cuni.cz;anu.edu.au", + "email": "anu.edu.au;uva.nl;ktiml.mff.cuni.cz;ktiml.mff.cuni.cz;anu.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;0", + "aff_unique_norm": "The Australian National University;University of Amsterdam;Charles University", + "aff_unique_dep": "School of Computing;ILLC;Faculty of Mathematics and Physics", + "aff_unique_url": "https://www.anu.edu.au;https://www.uva.nl;https://www.cuni.cz", + "aff_unique_abbr": "ANU;UvA;Charles University", + "aff_campus_unique_index": "0;1;2;2;0", + "aff_campus_unique": "Canberra;Amsterdam;Prague", + "aff_country_unique_index": "0;1;2;2;0", + "aff_country_unique": "Australia;The Netherlands;Czech Republic" + }, + { + "id": "article-25805", + "title": "On Undisputed Sets in Abstract Argumentation", + "track": "main", + "status": "Technical", + "abstract": "We introduce the notion of an undisputed set for abstract argumentation frameworks, which is a conflict-free set of arguments, such that its reduct contains no non-empty admissible set. We show that undisputed sets, and the stronger notion of strongly undisputed sets, provide a meaningful approach to weaken admissibility and deal with the problem of attacks from self-attacking arguments, in a similar manner as the recently introduced notion of weak admissibility. We investigate the properties of our new semantical notions and show certain relationships to classical semantics, in particular that undisputed sets are a generalisation of preferred extensions and strongly undisputed sets are a generalisation of stable extensions. We also investigate the computational complexity of standard reasoning tasks with these new notions and show that they lie on the second and third level of the polynomial hierarchy, respectively.", + "primary_area": "knowledge representation and reasoning", + "author": "Matthias Thimm", + "authorids": "", + "aff": "Artificial Intelligence Group, University of Hagen, Germany", + "bibtex": "@article{Thimm_2023, title={On Undisputed Sets in Abstract Argumentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25805}, DOI={10.1609/aaai.v37i5.25805}, abstractNote={We introduce the notion of an undisputed set for abstract argumentation frameworks, which is a conflict-free set of arguments, such that its reduct contains no non-empty admissible set. We show that undisputed sets, and the stronger notion of strongly undisputed sets, provide a meaningful approach to weaken admissibility and deal with the problem of attacks from self-attacking arguments, in a similar manner as the recently introduced notion of weak admissibility. We investigate the properties of our new semantical notions and show certain relationships to classical semantics, in particular that undisputed sets are a generalisation of preferred extensions and strongly undisputed sets are a generalisation of stable extensions. We also investigate the computational complexity of standard reasoning tasks with these new notions and show that they lie on the second and third level of the polynomial hierarchy, respectively.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Thimm, Matthias}, year={2023}, month={Jun.}, pages={6550-6557} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25805/25577", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25805", + "pdf_size": 129543, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9680745151807766576&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "fernuni-hagen.de", + "email": "fernuni-hagen.de", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Hagen", + "aff_unique_dep": "Artificial Intelligence Group", + "aff_unique_url": "https://www.uni-hagen.de/", + "aff_unique_abbr": "", + "aff_country_unique_index": "0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26630", + "title": "On the Calibration and Uncertainty with P\u00f3lya-Gamma Augmentation for Dialog Retrieval Models", + "track": "main", + "status": "Technical", + "abstract": "Deep neural retrieval models have amply demonstrated their power but estimating the reliability of their predictions remains challenging. Most dialog response retrieval models output a single score for a response on how relevant it is to a given question. However, the bad calibration of deep neural network results in various uncertainty for the single score such that the unreliable predictions always misinform user decisions. To investigate these issues, we present an efficient calibration and uncertainty estimation framework PG-DRR for dialog response retrieval models which adds a Gaussian Process layer to a deterministic deep neural network and recovers conjugacy for tractable posterior inference by P\u00f3lya-Gamma augmentation. Finally, PG-DRR achieves the lowest empirical calibration error (ECE) in the in-domain datasets and the distributional shift task while keeping R10@1 and MAP performance.", + "primary_area": "speech natural language processing", + "author": "Tong Ye; Shijing Si; Jianzong Wang; Ning Cheng; Zhitao Li; Jing Xiao", + "authorids": "", + "aff": "Ping An Technology (Shenzhen) Co., Ltd. + University of Science and Technology of China; Ping An Technology (Shenzhen) Co., Ltd.; Ping An Technology (Shenzhen) Co., Ltd.; Ping An Technology (Shenzhen) Co., Ltd.; Ping An Technology (Shenzhen) Co., Ltd.; Ping An Technology (Shenzhen) Co., Ltd.", + "bibtex": "@article{Ye_Si_Wang_Cheng_Li_Xiao_2023, title={On the Calibration and Uncertainty with P\u00f3lya-Gamma Augmentation for Dialog Retrieval Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26630}, DOI={10.1609/aaai.v37i11.26630}, abstractNote={Deep neural retrieval models have amply demonstrated their power but estimating the reliability of their predictions remains challenging. Most dialog response retrieval models output a single score for a response on how relevant it is to a given question. However, the bad calibration of deep neural network results in various uncertainty for the single score such that the unreliable predictions always misinform user decisions. To investigate these issues, we present an efficient calibration and uncertainty estimation framework PG-DRR for dialog response retrieval models which adds a Gaussian Process layer to a deterministic deep neural network and recovers conjugacy for tractable posterior inference by P\u00f3lya-Gamma augmentation. Finally, PG-DRR achieves the lowest empirical calibration error (ECE) in the in-domain datasets and the distributional shift task while keeping R10@1 and MAP performance.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ye, Tong and Si, Shijing and Wang, Jianzong and Cheng, Ning and Li, Zhitao and Xiao, Jing}, year={2023}, month={Jun.}, pages={13923-13931} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26630/26402", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26630", + "pdf_size": 469350, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17190805824251973166&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "188.com; ; ; ; ; ", + "email": "188.com; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;0;0;0", + "aff_unique_norm": "Ping An Technology;University of Science and Technology of China", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.pingan.com;http://www.ustc.edu.cn", + "aff_unique_abbr": "Ping An Tech;USTC", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26650", + "title": "On the Challenges of Using Reinforcement Learning in Precision Drug Dosing: Delay and Prolongedness of Action Effects", + "track": "aaai special track", + "status": "Technical", + "abstract": "Drug dosing is an important application of AI, which can be formulated as a Reinforcement Learning (RL) problem. In this paper, we identify two major challenges of using RL for drug dosing: delayed and prolonged effects of administering medications, which break the Markov assumption of the RL framework. We focus on prolongedness and define PAE-POMDP (Prolonged Action Effect-Partially Observable Markov Decision Process), a subclass of POMDPs in which the Markov assumption does not hold specifically due to prolonged effects of actions. Motivated by the pharmacology literature, we propose a simple and effective approach to converting drug dosing PAE-POMDPs into MDPs, enabling the use of the existing RL algorithms to solve such problems. We validate the proposed approach on a toy task, and a challenging glucose control task, for which we devise a clinically-inspired reward function. Our results demonstrate that: (1) the proposed method to restore the Markov assumption leads to significant improvements over a vanilla baseline; (2) the approach is competitive with recurrent policies which may inherently capture the prolonged affect of actions; (3) it is remarkably more time and memory efficient than the recurrent baseline and hence more suitable for real-time dosing control systems; and (4) it exhibits favourable qualitative behavior in our policy analysis.", + "primary_area": "ai for social impact", + "author": "Sumana Basu; Marc-Andr\u00e9 Legault; Adriana Romero-Soriano; Doina Precup", + "authorids": "", + "aff": "McGill University+Mila; McGill University+Mila; McGill University+Mila+Meta AI; McGill University+Mila", + "bibtex": "@article{Basu_Legault_Romero-Soriano_Precup_2023, title={On the Challenges of Using Reinforcement Learning in Precision Drug Dosing: Delay and Prolongedness of Action Effects}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26650}, DOI={10.1609/aaai.v37i12.26650}, abstractNote={Drug dosing is an important application of AI, which can be formulated as a Reinforcement Learning (RL) problem. In this paper, we identify two major challenges of using RL for drug dosing: delayed and prolonged effects of administering medications, which break the Markov assumption of the RL framework. We focus on prolongedness and define PAE-POMDP (Prolonged Action Effect-Partially Observable Markov Decision Process), a subclass of POMDPs in which the Markov assumption does not hold specifically due to prolonged effects of actions. Motivated by the pharmacology literature, we propose a simple and effective approach to converting drug dosing PAE-POMDPs into MDPs, enabling the use of the existing RL algorithms to solve such problems. We validate the proposed approach on a toy task, and a challenging glucose control task, for which we devise a clinically-inspired reward function. Our results demonstrate that: (1) the proposed method to restore the Markov assumption leads to significant improvements over a vanilla baseline; (2) the approach is competitive with recurrent policies which may inherently capture the prolonged affect of actions; (3) it is remarkably more time and memory efficient than the recurrent baseline and hence more suitable for real-time dosing control systems; and (4) it exhibits favourable qualitative behavior in our policy analysis.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Basu, Sumana and Legault, Marc-Andr\u00e9 and Romero-Soriano, Adriana and Precup, Doina}, year={2023}, month={Jun.}, pages={14102-14109} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26650/26422", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26650", + "pdf_size": 3119005, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12730545439432812578&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "mail.mcgill.ca;mcgill.ca;gmail.com;cs.mcgill.ca", + "email": "mail.mcgill.ca;mcgill.ca;gmail.com;cs.mcgill.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1+2;0+1", + "aff_unique_norm": "McGill University;Mila;Meta Platforms, Inc.", + "aff_unique_dep": ";Quebec Artificial Intelligence Institute;Meta AI", + "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec;https://meta.com", + "aff_unique_abbr": "McGill;Mila;Meta", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0+1;0+0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "article-25878", + "title": "On the Complexity of PAC Learning in Hilbert Spaces", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of binary classification from the point of\nview of learning convex polyhedra in Hilbert spaces, to which\none can reduce any binary classification problem. The problem\nof learning convex polyhedra in finite-dimensional spaces is\nsufficiently well studied in the literature. We generalize this\nproblem to that in a Hilbert space and propose an algorithm\nfor learning a polyhedron which correctly classifies at least\n1 \u2212 \u03b5 of the distribution, with a probability of at least 1 \u2212 \u03b4,\nwhere \u03b5 and \u03b4 are given parameters. Also, as a corollary, we\nimprove some previous bounds for polyhedral classification\nin finite-dimensional spaces.", + "primary_area": "machine learning i", + "author": "Sergei Chubanov", + "authorids": "", + "aff": "Bosch Center for Artificial Intelligence, Germany", + "bibtex": "@article{Chubanov_2023, title={On the Complexity of PAC Learning in Hilbert Spaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25878}, DOI={10.1609/aaai.v37i6.25878}, abstractNote={We study the problem of binary classification from the point of\nview of learning convex polyhedra in Hilbert spaces, to which\none can reduce any binary classification problem. The problem\nof learning convex polyhedra in finite-dimensional spaces is\nsufficiently well studied in the literature. We generalize this\nproblem to that in a Hilbert space and propose an algorithm\nfor learning a polyhedron which correctly classifies at least\n1 \u2212 \u03b5 of the distribution, with a probability of at least 1 \u2212 \u03b4,\nwhere \u03b5 and \u03b4 are given parameters. Also, as a corollary, we\nimprove some previous bounds for polyhedral classification\nin finite-dimensional spaces.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chubanov, Sergei}, year={2023}, month={Jun.}, pages={7202-7209} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25878/25650", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25878", + "pdf_size": 167725, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LodxcZGdX6wJ:scholar.google.com/&scioq=On+the+Complexity+of+PAC+Learning+in+Hilbert+Spaces&hl=en&as_sdt=0,44", + "gs_version_total": 4, + "aff_domain": "de.bosch.com", + "email": "de.bosch.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Bosch Center for Artificial Intelligence", + "aff_unique_dep": "Artificial Intelligence", + "aff_unique_url": "https://www.bosch-ai.com", + "aff_unique_abbr": "BCAI", + "aff_country_unique_index": "0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26250", + "title": "On the Connection between Invariant Learning and Adversarial Training for Out-of-Distribution Generalization", + "track": "main", + "status": "Technical", + "abstract": "Despite impressive success in many tasks, deep learning models are shown to rely on spurious features, which will catastrophically fail when generalized to out-of-distribution (OOD) data. Invariant Risk Minimization (IRM) is proposed to alleviate this issue by extracting domain-invariant features for OOD generalization. Nevertheless, recent work shows that IRM is only effective for a certain type of distribution shift (e.g., correlation shift) while it fails for other cases (e.g., diversity shift). Meanwhile, another thread of method, Adversarial Training (AT), has shown better domain transfer performance, suggesting that it has the potential to be an effective candidate for extracting domain-invariant features. This paper investigates this possibility by exploring the similarity between the IRM and AT objectives. Inspired by this connection, we propose Domain-wise Adversarial Training (DAT), an AT-inspired method for alleviating distribution shift by domain-specific perturbations. Extensive experiments show that our proposed DAT can effectively remove domain-varying features and improve OOD generalization under both correlation shift and diversity shift.", + "primary_area": "machine learning iv", + "author": "Shiji Xin; Yifei Wang; Jingtong Su; Yisen Wang", + "authorids": "", + "aff": "Key Lab. of Machine Perception (MoE), School of Intelligence Science and Technology, Peking University+School of EECS, Peking University+Institute for Artificial Intelligence, Peking University; School of Mathematical Sciences, Peking University; Center for Data Science, New York University; Key Lab. of Machine Perception (MoE), School of Intelligence Science and Technology, Peking University+Institute for Artificial Intelligence, Peking University", + "bibtex": "@article{Xin_Wang_Su_Wang_2023, title={On the Connection between Invariant Learning and Adversarial Training for Out-of-Distribution Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26250}, DOI={10.1609/aaai.v37i9.26250}, abstractNote={Despite impressive success in many tasks, deep learning models are shown to rely on spurious features, which will catastrophically fail when generalized to out-of-distribution (OOD) data. Invariant Risk Minimization (IRM) is proposed to alleviate this issue by extracting domain-invariant features for OOD generalization. Nevertheless, recent work shows that IRM is only effective for a certain type of distribution shift (e.g., correlation shift) while it fails for other cases (e.g., diversity shift). Meanwhile, another thread of method, Adversarial Training (AT), has shown better domain transfer performance, suggesting that it has the potential to be an effective candidate for extracting domain-invariant features. This paper investigates this possibility by exploring the similarity between the IRM and AT objectives. Inspired by this connection, we propose Domain-wise Adversarial Training (DAT), an AT-inspired method for alleviating distribution shift by domain-specific perturbations. Extensive experiments show that our proposed DAT can effectively remove domain-varying features and improve OOD generalization under both correlation shift and diversity shift.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xin, Shiji and Wang, Yifei and Su, Jingtong and Wang, Yisen}, year={2023}, month={Jun.}, pages={10519-10527} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26250/26022", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26250", + "pdf_size": 4167178, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=937386001847011663&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pku.edu.cn;nyu.edu;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;nyu.edu;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+0;0;1;0+0", + "aff_unique_norm": "Peking University;New York University", + "aff_unique_dep": "School of Intelligence Science and Technology;Center for Data Science", + "aff_unique_url": "http://www.pku.edu.cn;https://www.nyu.edu", + "aff_unique_abbr": "PKU;NYU", + "aff_campus_unique_index": ";1;2;", + "aff_campus_unique": ";Beijing;New York", + "aff_country_unique_index": "0+0+0;0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26651", + "title": "On the Cost of Demographic Parity in Influence Maximization", + "track": "aaai special track", + "status": "Technical", + "abstract": "Modeling and shaping how information spreads through a network is a major research topic in network analysis. While initially the focus has been mostly on efficiency, recently fairness criteria have been taken into account in this setting.\nMost work has focused on the maximin criteria however, and thus still different groups can receive very different shares of information. In this work we propose to consider fairness as a notion to be guaranteed by an algorithm rather than as a criterion to be maximized. To this end, we propose three optimization problems that aim at maximizing the overall spread while enforcing strict levels of demographic parity fairness via constraints (either ex-post or ex-ante). The level of fairness hence becomes a user choice rather than a property to be observed upon output. We study this setting from various perspectives.\nFirst, we prove that the cost of introducing demographic parity can be high in terms of both overall spread and computational complexity, i.e., the price of fairness may be unbounded for all three problems and optimal solutions are hard to compute, in some case even approximately or when fairness constraints may be violated. \nFor one of our problems, we still design an algorithm with both constant approximation factor and fairness violation.\nWe also give two heuristics that allow the user to choose the tolerated fairness violation. By means of an extensive experimental study, we show that our algorithms perform well in practice, that is, they achieve the best demographic parity fairness values. For certain instances we additionally even obtain an overall spread comparable to the most efficient algorithms that come without any fairness guarantee, indicating that the empirical price of fairness may actually be small when using our algorithms.", + "primary_area": "ai for social impact", + "author": "Ruben Becker; Gianlorenzo D'Angelo; Sajjad Ghobadi", + "authorids": "", + "aff": "Ca\u2019 Foscari University of Venice, Italy; Gran Sasso Science Institute, L\u2019Aquila, Italy; Gran Sasso Science Institute, L\u2019Aquila, Italy", + "bibtex": "@article{Becker_D\u2019Angelo_Ghobadi_2023, title={On the Cost of Demographic Parity in Influence Maximization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26651}, DOI={10.1609/aaai.v37i12.26651}, abstractNote={Modeling and shaping how information spreads through a network is a major research topic in network analysis. While initially the focus has been mostly on efficiency, recently fairness criteria have been taken into account in this setting.\nMost work has focused on the maximin criteria however, and thus still different groups can receive very different shares of information. In this work we propose to consider fairness as a notion to be guaranteed by an algorithm rather than as a criterion to be maximized. To this end, we propose three optimization problems that aim at maximizing the overall spread while enforcing strict levels of demographic parity fairness via constraints (either ex-post or ex-ante). The level of fairness hence becomes a user choice rather than a property to be observed upon output. We study this setting from various perspectives.\nFirst, we prove that the cost of introducing demographic parity can be high in terms of both overall spread and computational complexity, i.e., the price of fairness may be unbounded for all three problems and optimal solutions are hard to compute, in some case even approximately or when fairness constraints may be violated. For one of our problems, we still design an algorithm with both constant approximation factor and fairness violation.\nWe also give two heuristics that allow the user to choose the tolerated fairness violation. By means of an extensive experimental study, we show that our algorithms perform well in practice, that is, they achieve the best demographic parity fairness values. For certain instances we additionally even obtain an overall spread comparable to the most efficient algorithms that come without any fairness guarantee, indicating that the empirical price of fairness may actually be small when using our algorithms.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Becker, Ruben and D\u2019Angelo, Gianlorenzo and Ghobadi, Sajjad}, year={2023}, month={Jun.}, pages={14110-14118} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26651/26423", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26651", + "pdf_size": 1910308, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15629218334668761732&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "unive.it;gssi.it;gssi.it", + "email": "unive.it;gssi.it;gssi.it", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Ca\u2019 Foscari University of Venice;Gran Sasso Science Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unive.it;https://www.gssi.it", + "aff_unique_abbr": "Ca\u2019 Foscari;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";L\u2019Aquila", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26707", + "title": "On the Effectiveness of Curriculum Learning in Educational Text Scoring", + "track": "aaai special track", + "status": "Technical", + "abstract": "Automatic Text Scoring (ATS) is a widely-investigated task in education. Existing approaches often stressed the structure design of an ATS model and neglected the training process of the model. Considering the difficult nature of this task, we argued that the performance of an ATS model could be potentially boosted by carefully selecting data of varying complexities in the training process. Therefore, we aimed to investigate the effectiveness of curriculum learning (CL) in scoring educational text. Specifically, we designed two types of difficulty measurers: (i) pre-defined, calculated by measuring a sample's readability, length, the number of grammatical errors or unique words it contains; and (ii) automatic, calculated based on whether a model in a training epoch can accurately score the samples. These measurers were tested in both the easy-to-hard to hard-to-easy training paradigms. Through extensive evaluations on two widely-used datasets (one for short answer scoring and the other for long essay scoring), we demonstrated that (a) CL indeed could boost the performance of state-of-the-art ATS models, and the maximum improvement could be up to 4.5%, but most improvements were achieved when assessing short and easy answers; (b) the pre-defined measurer calculated based on the number of grammatical errors contained in a text sample tended to outperform the other difficulty measurers across different training paradigms.", + "primary_area": "ai for social impact", + "author": "Zijie Zeng; Dragan Gasevic; Guangliang Chen", + "authorids": "", + "aff": "Centre for Learning Analytics, Monash University, Australia; Centre for Learning Analytics, Monash University, Australia; Centre for Learning Analytics, Monash University, Australia", + "bibtex": "@article{Zeng_Gasevic_Chen_2023, title={On the Effectiveness of Curriculum Learning in Educational Text Scoring}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26707}, DOI={10.1609/aaai.v37i12.26707}, abstractNote={Automatic Text Scoring (ATS) is a widely-investigated task in education. Existing approaches often stressed the structure design of an ATS model and neglected the training process of the model. Considering the difficult nature of this task, we argued that the performance of an ATS model could be potentially boosted by carefully selecting data of varying complexities in the training process. Therefore, we aimed to investigate the effectiveness of curriculum learning (CL) in scoring educational text. Specifically, we designed two types of difficulty measurers: (i) pre-defined, calculated by measuring a sample\u2019s readability, length, the number of grammatical errors or unique words it contains; and (ii) automatic, calculated based on whether a model in a training epoch can accurately score the samples. These measurers were tested in both the easy-to-hard to hard-to-easy training paradigms. Through extensive evaluations on two widely-used datasets (one for short answer scoring and the other for long essay scoring), we demonstrated that (a) CL indeed could boost the performance of state-of-the-art ATS models, and the maximum improvement could be up to 4.5%, but most improvements were achieved when assessing short and easy answers; (b) the pre-defined measurer calculated based on the number of grammatical errors contained in a text sample tended to outperform the other difficulty measurers across different training paradigms.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Zijie and Gasevic, Dragan and Chen, Guangliang}, year={2023}, month={Jun.}, pages={14602-14610} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26707/26479", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26707", + "pdf_size": 244145, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14082947586276228661&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "monash.edu;monash.edu;monash.edu", + "email": "monash.edu;monash.edu;monash.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Monash University", + "aff_unique_dep": "Centre for Learning Analytics", + "aff_unique_url": "https://www.monash.edu", + "aff_unique_abbr": "Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26505", + "title": "On the Effectiveness of Parameter-Efficient Fine-Tuning", + "track": "main", + "status": "Technical", + "abstract": "Fine-tuning pre-trained models has been ubiquitously proven to be effective in a wide range of NLP tasks. However, fine-tuning the whole model is parameter inefficient as it always yields an entirely new model for each task. Currently, many research works propose to only fine-tune a small portion of the parameters while keeping most of the parameters shared across different tasks. These methods achieve surprisingly good performance and are shown to be more stable than their corresponding fully fine-tuned counterparts. However, such kind of methods is still not well understood. Some natural questions arise: How does the parameter sparsity lead to promising performance? Why is the model more stable than the fully fine-tuned models? How to choose the tunable parameters? In this paper, we first categorize the existing methods into random approaches, rule-based approaches, and projection-based approaches based on how they choose which parameters to tune. Then, we show that all of the methods are actually sparse fine-tuned models and conduct a novel theoretical analysis of them. We indicate that the sparsity is actually imposing a regularization on the original model by controlling the upper bound of the stability. Such stability leads to better generalization capability which has been empirically observed in a lot of recent research works. Despite the effectiveness of sparsity grounded by our theory, it still remains an open problem of how to choose the tunable parameters. Currently, the random and rule-based methods do not utilize task-specific data information while the projection-based approaches suffer from the projection discontinuity problem. To better choose the tunable parameters, we propose a novel Second-order Approximation Method (SAM) which approximates the original problem with an analytically solvable optimization function. The tunable parameters are determined by directly optimizing the approximation function. We conduct extensive experiments on several tasks. The experimental results show that our proposed SAM model outperforms many strong baseline models and it also verifies our theoretical analysis. The source code of this paper can be obtained from https://github.com/fuzihaofzh/AnalyzeParameterEff\\/icientFinetune .", + "primary_area": "speech natural language processing", + "author": "Zihao Fu; Haoran Yang; Anthony Man-Cho So; Wai Lam; Lidong Bing; Nigel Collier", + "authorids": "", + "aff": "Language Technology Lab, University of Cambridge; The Chinese University of Hong Kong; The Chinese University of Hong Kong; The Chinese University of Hong Kong; DAMO Academy, Alibaba Group; Language Technology Lab, University of Cambridge", + "bibtex": "@article{Fu_Yang_So_Lam_Bing_Collier_2023, title={On the Effectiveness of Parameter-Efficient Fine-Tuning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26505}, DOI={10.1609/aaai.v37i11.26505}, abstractNote={Fine-tuning pre-trained models has been ubiquitously proven to be effective in a wide range of NLP tasks. However, fine-tuning the whole model is parameter inefficient as it always yields an entirely new model for each task. Currently, many research works propose to only fine-tune a small portion of the parameters while keeping most of the parameters shared across different tasks. These methods achieve surprisingly good performance and are shown to be more stable than their corresponding fully fine-tuned counterparts. However, such kind of methods is still not well understood. Some natural questions arise: How does the parameter sparsity lead to promising performance? Why is the model more stable than the fully fine-tuned models? How to choose the tunable parameters? In this paper, we first categorize the existing methods into random approaches, rule-based approaches, and projection-based approaches based on how they choose which parameters to tune. Then, we show that all of the methods are actually sparse fine-tuned models and conduct a novel theoretical analysis of them. We indicate that the sparsity is actually imposing a regularization on the original model by controlling the upper bound of the stability. Such stability leads to better generalization capability which has been empirically observed in a lot of recent research works. Despite the effectiveness of sparsity grounded by our theory, it still remains an open problem of how to choose the tunable parameters. Currently, the random and rule-based methods do not utilize task-specific data information while the projection-based approaches suffer from the projection discontinuity problem. To better choose the tunable parameters, we propose a novel Second-order Approximation Method (SAM) which approximates the original problem with an analytically solvable optimization function. The tunable parameters are determined by directly optimizing the approximation function. We conduct extensive experiments on several tasks. The experimental results show that our proposed SAM model outperforms many strong baseline models and it also verifies our theoretical analysis. The source code of this paper can be obtained from https://github.com/fuzihaofzh/AnalyzeParameterEff\\/icientFinetune .}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fu, Zihao and Yang, Haoran and So, Anthony Man-Cho and Lam, Wai and Bing, Lidong and Collier, Nigel}, year={2023}, month={Jun.}, pages={12799-12807} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26505/26277", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26505", + "pdf_size": 207621, + "gs_citation": 204, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7932814826975079724&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cam.ac.uk;se.cuhk.edu.hk;se.cuhk.edu.hk;se.cuhk.edu.hk;alibaba-inc.com;cam.ac.uk", + "email": "cam.ac.uk;se.cuhk.edu.hk;se.cuhk.edu.hk;se.cuhk.edu.hk;alibaba-inc.com;cam.ac.uk", + "github": "https://github.com/fuzihaofzh/AnalyzeParameterEfficientFinetune", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;2;0", + "aff_unique_norm": "University of Cambridge;The Chinese University of Hong Kong;Alibaba Group", + "aff_unique_dep": "Language Technology Lab;;DAMO Academy", + "aff_unique_url": "https://www.cam.ac.uk;https://www.cuhk.edu.hk;https://www.alibaba-group.com", + "aff_unique_abbr": "Cambridge;CUHK;Alibaba", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;1;1;1;1;0", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "article-26055", + "title": "On the Expressive Flexibility of Self-Attention Matrices", + "track": "main", + "status": "Technical", + "abstract": "Transformer networks are able to capture patterns in data coming from many domains (text, images, videos, proteins, etc.) with little or no change to architecture components. We perform a theoretical analysis of the core component responsible for signal propagation between elements, i.e. the self-attention matrix. We ask the following question: Can self-attention matrix approximate arbitrary patterns? How small is the query dimension d required for such approximation? Our first result shows that the task of deciding whether approximation of a given pattern is possible or not is NP-hard for a fixed d greater than one. In practice, self-attention matrix typically exhibits two properties: it is sparse, and it changes dynamically depending on the input to the module. Motivated by this observation, we show that the self-attention matrix can provably approximate sparse matrices. While the parameters of self-attention are fixed, various sparse matrices can be approximated by only modifying the inputs. Our proof is based on the random projection technique and uses the seminal Johnson-Lindenstrauss lemma. In particular, we show that, in order to approximate any sparse matrix up to a given precision defined in terms of preserving matrix element ratios, d grows only logarithmically with the sequence length n.", + "primary_area": "machine learning ii", + "author": "Valerii Likhosherstov; Krzysztof Choromanski; Adrian Weller", + "authorids": "", + "aff": "University of Cambridge; Google Brain; University of Cambridge + The Alan Turing Institute", + "bibtex": "@article{Likhosherstov_Choromanski_Weller_2023, title={On the Expressive Flexibility of Self-Attention Matrices}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26055}, DOI={10.1609/aaai.v37i7.26055}, abstractNote={Transformer networks are able to capture patterns in data coming from many domains (text, images, videos, proteins, etc.) with little or no change to architecture components. We perform a theoretical analysis of the core component responsible for signal propagation between elements, i.e. the self-attention matrix. We ask the following question: Can self-attention matrix approximate arbitrary patterns? How small is the query dimension d required for such approximation? Our first result shows that the task of deciding whether approximation of a given pattern is possible or not is NP-hard for a fixed d greater than one. In practice, self-attention matrix typically exhibits two properties: it is sparse, and it changes dynamically depending on the input to the module. Motivated by this observation, we show that the self-attention matrix can provably approximate sparse matrices. While the parameters of self-attention are fixed, various sparse matrices can be approximated by only modifying the inputs. Our proof is based on the random projection technique and uses the seminal Johnson-Lindenstrauss lemma. In particular, we show that, in order to approximate any sparse matrix up to a given precision defined in terms of preserving matrix element ratios, d grows only logarithmically with the sequence length n.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Likhosherstov, Valerii and Choromanski, Krzysztof and Weller, Adrian}, year={2023}, month={Jun.}, pages={8773-8781} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26055/25827", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26055", + "pdf_size": 875942, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14153508715217747851&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 2, + "aff_domain": "cam.ac.uk; ; ", + "email": "cam.ac.uk; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "University of Cambridge;Google;The Alan Turing Institute", + "aff_unique_dep": ";Google Brain;", + "aff_unique_url": "https://www.cam.ac.uk;https://brain.google.com;https://www.turing.ac.uk", + "aff_unique_abbr": "Cambridge;Google Brain;ATI", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Cambridge;Mountain View;", + "aff_country_unique_index": "0;1;0+0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-26155", + "title": "On the Sample Complexity of Representation Learning in Multi-Task Bandits with Global and Local Structure", + "track": "main", + "status": "Technical", + "abstract": "We investigate the sample complexity of learning the optimal arm for multi-task bandit problems. Arms consist of two components: one that is shared across tasks (that we call representation) and one that is task-specific (that we call predictor). \nThe objective is to learn the optimal (representation, predictor)-pair for each task, under the assumption that the optimal representation is common to all tasks. Within this framework, efficient learning algorithms should transfer knowledge across tasks. \nWe consider the best-arm identification problem with fixed confidence, where, in each round, the learner actively selects both a task, and an arm, and observes the corresponding reward.\nWe derive instance-specific sample complexity lower bounds, which apply to any algorithm that identifies the best representation, and the best predictor for a task, with prescribed confidence levels. \nWe devise an algorithm, OSRL-SC, that can learn the optimal representation, and the optimal predictors, separately, and whose sample complexity approaches the lower bound. Theoretical and numerical results demonstrate that OSRL-SC achieves a better scaling with respect to the number of tasks compared to the classical best-arm identification algorithm.\nThe code can be found here https://github.com/rssalessio/OSRL-SC.", + "primary_area": "machine learning iii", + "author": "Alessio Russo; Alexandre Proutiere", + "authorids": "", + "aff": "Division of Decision and Control Systems, KTH Royal Institute of Technology, Stockholm, SE; Division of Decision and Control Systems, KTH Royal Institute of Technology, Stockholm, SE", + "bibtex": "@article{Russo_Proutiere_2023, title={On the Sample Complexity of Representation Learning in Multi-Task Bandits with Global and Local Structure}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26155}, DOI={10.1609/aaai.v37i8.26155}, abstractNote={We investigate the sample complexity of learning the optimal arm for multi-task bandit problems. Arms consist of two components: one that is shared across tasks (that we call representation) and one that is task-specific (that we call predictor). The objective is to learn the optimal (representation, predictor)-pair for each task, under the assumption that the optimal representation is common to all tasks. Within this framework, efficient learning algorithms should transfer knowledge across tasks. We consider the best-arm identification problem with fixed confidence, where, in each round, the learner actively selects both a task, and an arm, and observes the corresponding reward.\nWe derive instance-specific sample complexity lower bounds, which apply to any algorithm that identifies the best representation, and the best predictor for a task, with prescribed confidence levels. We devise an algorithm, OSRL-SC, that can learn the optimal representation, and the optimal predictors, separately, and whose sample complexity approaches the lower bound. Theoretical and numerical results demonstrate that OSRL-SC achieves a better scaling with respect to the number of tasks compared to the classical best-arm identification algorithm.\nThe code can be found here https://github.com/rssalessio/OSRL-SC.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Russo, Alessio and Proutiere, Alexandre}, year={2023}, month={Jun.}, pages={9658-9667} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26155/25927", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26155", + "pdf_size": 452193, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12415726048781271689&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kth.se;kth.se", + "email": "kth.se;kth.se", + "github": "https://github.com/rssalessio/OSRL-SC", + "project": "https://arxiv.org/abs/2211.15129", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "KTH Royal Institute of Technology", + "aff_unique_dep": "Division of Decision and Control Systems", + "aff_unique_url": "https://www.kth.se", + "aff_unique_abbr": "KTH", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stockholm", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Sweden" + }, + { + "id": "article-25989", + "title": "On the Sample Complexity of Vanilla Model-Based Offline Reinforcement Learning with Dependent Samples", + "track": "main", + "status": "Technical", + "abstract": "Offline reinforcement learning (offline RL) considers problems where learning is performed using only previously collected samples and is helpful for the settings in which collecting new data is costly or risky. In model-based offline RL, the learner performs estimation (or optimization) using a model constructed according to the empirical transition frequencies. We analyze the sample complexity of vanilla model-based offline RL with dependent samples in the infinite-horizon discounted-reward setting. In our setting, the samples obey the dynamics of the Markov decision process and, consequently, may have interdependencies. Under no assumption of independent samples, we provide a high-probability, polynomial sample complexity bound for vanilla model-based off-policy evaluation that requires partial or uniform coverage. We extend this result to the off-policy optimization under uniform coverage. As a comparison to the model-based approach, we analyze the sample complexity of off-policy evaluation with vanilla importance sampling in the infinite-horizon setting. Finally, we provide an estimator that outperforms the sample-mean estimator for almost deterministic dynamics that are prevalent in reinforcement learning.", + "primary_area": "machine learning ii", + "author": "Mustafa O. Karabag; Ufuk Topcu", + "authorids": "", + "aff": "The University of Texas at Austin, Austin, TX, USA; The University of Texas at Austin, Austin, TX, USA", + "bibtex": "@article{Karabag_Topcu_2023, title={On the Sample Complexity of Vanilla Model-Based Offline Reinforcement Learning with Dependent Samples}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25989}, DOI={10.1609/aaai.v37i7.25989}, abstractNote={Offline reinforcement learning (offline RL) considers problems where learning is performed using only previously collected samples and is helpful for the settings in which collecting new data is costly or risky. In model-based offline RL, the learner performs estimation (or optimization) using a model constructed according to the empirical transition frequencies. We analyze the sample complexity of vanilla model-based offline RL with dependent samples in the infinite-horizon discounted-reward setting. In our setting, the samples obey the dynamics of the Markov decision process and, consequently, may have interdependencies. Under no assumption of independent samples, we provide a high-probability, polynomial sample complexity bound for vanilla model-based off-policy evaluation that requires partial or uniform coverage. We extend this result to the off-policy optimization under uniform coverage. As a comparison to the model-based approach, we analyze the sample complexity of off-policy evaluation with vanilla importance sampling in the infinite-horizon setting. Finally, we provide an estimator that outperforms the sample-mean estimator for almost deterministic dynamics that are prevalent in reinforcement learning.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Karabag, Mustafa O. and Topcu, Ufuk}, year={2023}, month={Jun.}, pages={8195-8202} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25989/25761", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25989", + "pdf_size": 164334, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15623974567613079328&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25859", + "title": "On the Stability and Generalization of Triplet Learning", + "track": "main", + "status": "Technical", + "abstract": "Triplet learning, i.e. learning from triplet data, has attracted much attention in computer vision tasks with an extremely large number of categories, e.g., face recognition and person re-identification. Albeit with rapid progress in designing and applying triplet learning algorithms, there is a lacking study on the theoretical understanding of their generalization performance. To fill this gap, this paper investigates the generalization guarantees of triplet learning by leveraging the stability analysis. Specifically, we establish the first general high-probability generalization bound for the triplet learning algorithm satisfying the uniform stability, and then obtain the excess risk bounds of the order O(log(n)/(\u221an) ) for both stochastic gradient descent (SGD) and regularized risk minimization (RRM), where 2n is approximately equal to the number of training samples. Moreover, an optimistic generalization bound in expectation as fast as O(1/n) is derived for RRM in a low noise case via the on-average stability analysis. Finally, our results are applied to triplet metric learning to characterize its theoretical underpinning.", + "primary_area": "machine learning i", + "author": "Jun Chen; Hong Chen; Xue Jiang; Bin Gu; Weifu Li; Tieliang Gong; Feng Zheng", + "authorids": "", + "aff": "College of Informatics, Huazhong Agricultural University, Wuhan 430070, China; College of Science, Huazhong Agricultural University, Wuhan 430070, China+ Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education, Wuhan 430070, China+ Key Laboratory of Smart Farming for Agricultural Animals, Wuhan 430070, China; Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China; Mohamed bin Zayed University of Artificial Intelligence, Abu Dhabi, United Arab Emirates; College of Science, Huazhong Agricultural University, Wuhan 430070, China+ Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education, Wuhan 430070, China+ Key Laboratory of Smart Farming for Agricultural Animals, Wuhan 430070, China; School of Computer Science and Technology, Xi\u2019an Jiaotong University, Xi\u2019an 710049, China+ Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering, Ministry of Education, Xi\u2019an 710049, China; Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China", + "bibtex": "@article{Chen_Chen_Jiang_Gu_Li_Gong_Zheng_2023, title={On the Stability and Generalization of Triplet Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25859}, DOI={10.1609/aaai.v37i6.25859}, abstractNote={Triplet learning, i.e. learning from triplet data, has attracted much attention in computer vision tasks with an extremely large number of categories, e.g., face recognition and person re-identification. Albeit with rapid progress in designing and applying triplet learning algorithms, there is a lacking study on the theoretical understanding of their generalization performance. To fill this gap, this paper investigates the generalization guarantees of triplet learning by leveraging the stability analysis. Specifically, we establish the first general high-probability generalization bound for the triplet learning algorithm satisfying the uniform stability, and then obtain the excess risk bounds of the order O(log(n)/(\u221an) ) for both stochastic gradient descent (SGD) and regularized risk minimization (RRM), where 2n is approximately equal to the number of training samples. Moreover, an optimistic generalization bound in expectation as fast as O(1/n) is derived for RRM in a low noise case via the on-average stability analysis. Finally, our results are applied to triplet metric learning to characterize its theoretical underpinning.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jun and Chen, Hong and Jiang, Xue and Gu, Bin and Li, Weifu and Gong, Tieliang and Zheng, Feng}, year={2023}, month={Jun.}, pages={7033-7041} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25859/25631", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25859", + "pdf_size": 159933, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15706474271799721362&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": ";mail.hzau.edu.cn; ; ; ; ; ", + "email": ";mail.hzau.edu.cn; ; ; ; ; ", + "github": "", + "project": "http://arxiv.org/abs/2302.09815", + "author_num": 7, + "aff_unique_index": "0;0+1+2;3;4;0+1+2;5+6;3", + "aff_unique_norm": "Huazhong Agricultural University;Engineering Research Center of Intelligent Technology for Agriculture;Key Laboratory of Smart Farming for Agricultural Animals;Southern University of Science and Technology;Mohamed bin Zayed University of Artificial Intelligence;Xi'an Jiaotong University;Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering", + "aff_unique_dep": "College of Informatics;Ministry of Education;;Department of Computer Science and Engineering;;School of Computer Science and Technology;Ministry of Education", + "aff_unique_url": "http://www.hzau.edu.cn;;;https://www.sustech.edu.cn;https://www.mbzuai.ac.ae;http://www.xjtu.edu.cn;", + "aff_unique_abbr": "HZAU;;;SUSTech;MBZUAI;XJTU;", + "aff_campus_unique_index": "0;0;2;3;0;4;2", + "aff_campus_unique": "Wuhan;;Shenzhen;Abu Dhabi;Xi'an", + "aff_country_unique_index": "0;0+0+0;0;1;0+0+0;0+0;0", + "aff_country_unique": "China;United Arab Emirates" + }, + { + "id": "article-26393", + "title": "On the Vulnerability of Backdoor Defenses for Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Federated learning (FL) is a popular distributed machine learning paradigm which enables jointly training a global model without sharing clients' data. However, its repetitive server-client communication gives room for possible backdoor attacks which aims to mislead the global model into a targeted misprediction when a specific trigger pattern is presented. In response to such backdoor threats on federated learning, various defense measures have been proposed. In this paper, we study whether the current defense mechanisms truly neutralize the backdoor threats from federated learning in a practical setting by proposing a new federated backdoor attack framework for possible countermeasures. Different from traditional training (on triggered data) and rescaling (the malicious client model) based backdoor injection, the proposed backdoor attack framework (1) directly modifies (a small proportion of) local model weights to inject the backdoor trigger via sign flips; (2) jointly optimize the trigger pattern with the client model, thus is more persistent and stealthy for circumventing existing defenses. In a case study, we examine the strength and weaknesses of several recent federated backdoor defenses from three major categories and provide suggestions to the practitioners when training federated models in practice.", + "primary_area": "philosophy and ethics of ai", + "author": "Pei Fang; Jinghui Chen", + "authorids": "", + "aff": "Tongji University; Pennsylvania State University", + "bibtex": "@article{Fang_Chen_2023, title={On the Vulnerability of Backdoor Defenses for Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26393}, DOI={10.1609/aaai.v37i10.26393}, abstractNote={Federated learning (FL) is a popular distributed machine learning paradigm which enables jointly training a global model without sharing clients\u2019 data. However, its repetitive server-client communication gives room for possible backdoor attacks which aims to mislead the global model into a targeted misprediction when a specific trigger pattern is presented. In response to such backdoor threats on federated learning, various defense measures have been proposed. In this paper, we study whether the current defense mechanisms truly neutralize the backdoor threats from federated learning in a practical setting by proposing a new federated backdoor attack framework for possible countermeasures. Different from traditional training (on triggered data) and rescaling (the malicious client model) based backdoor injection, the proposed backdoor attack framework (1) directly modifies (a small proportion of) local model weights to inject the backdoor trigger via sign flips; (2) jointly optimize the trigger pattern with the client model, thus is more persistent and stealthy for circumventing existing defenses. In a case study, we examine the strength and weaknesses of several recent federated backdoor defenses from three major categories and provide suggestions to the practitioners when training federated models in practice.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Pei and Chen, Jinghui}, year={2023}, month={Jun.}, pages={11800-11808} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26393/26165", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26393", + "pdf_size": 1581629, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=323278607852340135&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;psu.edu", + "email": "gmail.com;psu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Tongji University;Pennsylvania State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tongji.edu.cn;https://www.psu.edu", + "aff_unique_abbr": "Tongji;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25135", + "title": "One Is All: Bridging the Gap between Neural Radiance Fields Architectures with Progressive Volume Distillation", + "track": "main", + "status": "Technical", + "abstract": "Neural Radiance Fields (NeRF) methods have proved effective as compact, high-quality and versatile representations for 3D scenes, and enable downstream tasks such as editing, retrieval, navigation, etc. Various neural architectures are vying for the core structure of NeRF, including the plain Multi-Layer Perceptron (MLP), sparse tensors, low-rank tensors, hashtables and their compositions. Each of these representations has its particular set of trade-offs. For example, the hashtable-based representations admit faster training and rendering but their lack of clear geometric meaning hampers downstream tasks like spatial-relation-aware editing. In this paper, we propose Progressive Volume Distillation (PVD), a systematic distillation method that allows any-to-any conversions between different architectures, including MLP, sparse or low-rank tensors, hashtables and their compositions. PVD consequently empowers downstream applications to optimally adapt the neural representations for the task at hand in a post hoc fashion. The conversions are fast, as distillation is progressively performed on different levels of volume representations, from shallower to deeper. We also employ special treatment of density to deal with its specific numerical instability problem. Empirical evidence is presented to validate our method on the NeRF-Synthetic, LLFF and TanksAndTemples datasets. For example, with PVD, an MLP-based NeRF model can be distilled from a hashtable-based Instant-NGP model at a 10~20X faster speed than being trained the original NeRF from scratch, while achieving a superior level of synthesis quality. Code is available at https://github.com/megvii-research/AAAI2023-PVD.", + "primary_area": "computer vision i", + "author": "Shuangkang Fang; Weixin Xu; Heng Wang; Yi Yang; Yufeng Wang; Shuchang Zhou", + "authorids": "", + "aff": "Beihang University; Megvii Research; Megvii Research; Megvii Research; Beihang University; Megvii Research", + "bibtex": "@article{Fang_Xu_Wang_Yang_Wang_Zhou_2023, title={One Is All: Bridging the Gap between Neural Radiance Fields Architectures with Progressive Volume Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25135}, DOI={10.1609/aaai.v37i1.25135}, abstractNote={Neural Radiance Fields (NeRF) methods have proved effective as compact, high-quality and versatile representations for 3D scenes, and enable downstream tasks such as editing, retrieval, navigation, etc. Various neural architectures are vying for the core structure of NeRF, including the plain Multi-Layer Perceptron (MLP), sparse tensors, low-rank tensors, hashtables and their compositions. Each of these representations has its particular set of trade-offs. For example, the hashtable-based representations admit faster training and rendering but their lack of clear geometric meaning hampers downstream tasks like spatial-relation-aware editing. In this paper, we propose Progressive Volume Distillation (PVD), a systematic distillation method that allows any-to-any conversions between different architectures, including MLP, sparse or low-rank tensors, hashtables and their compositions. PVD consequently empowers downstream applications to optimally adapt the neural representations for the task at hand in a post hoc fashion. The conversions are fast, as distillation is progressively performed on different levels of volume representations, from shallower to deeper. We also employ special treatment of density to deal with its specific numerical instability problem. Empirical evidence is presented to validate our method on the NeRF-Synthetic, LLFF and TanksAndTemples datasets. For example, with PVD, an MLP-based NeRF model can be distilled from a hashtable-based Instant-NGP model at a 10~20X faster speed than being trained the original NeRF from scratch, while achieving a superior level of synthesis quality. Code is available at https://github.com/megvii-research/AAAI2023-PVD.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Shuangkang and Xu, Weixin and Wang, Heng and Yang, Yi and Wang, Yufeng and Zhou, Shuchang}, year={2023}, month={Jun.}, pages={597-605} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25135/24907", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25135", + "pdf_size": 4499458, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=883711186984609579&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;megvii.com;megvii.com;megvii.com;buaa.edu.cn;megvii.com", + "email": "buaa.edu.cn;megvii.com;megvii.com;megvii.com;buaa.edu.cn;megvii.com", + "github": "https://github.com/megvii-research/AAAI2023-PVD", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;1", + "aff_unique_norm": "Beihang University;Megvii Technology", + "aff_unique_dep": ";Megvii Research", + "aff_unique_url": "http://www.buaa.edu.cn/;https://www.megvii.com", + "aff_unique_abbr": "BUAA;Megvii", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25417", + "title": "One-Shot Replay: Boosting Incremental Object Detection via Retrospecting One Object", + "track": "main", + "status": "Technical", + "abstract": "Modern object detectors are ill-equipped to incrementally learn new emerging object classes over time due to the well-known phenomenon of catastrophic forgetting. Due to data privacy or limited storage, few or no images of the old data can be stored for replay. In this paper, we design a novel One-Shot Replay (OSR) method for incremental object detection, which is an augmentation-based method. Rather than storing original images, only one object-level sample for each old class is stored to reduce memory usage significantly, and we find that copy-paste is a harmonious way to replay for incremental object detection. In the incremental learning procedure, diverse augmented samples with co-occurrence of old and new objects to existing training data are generated. To introduce more variants for objects of old classes, we propose two augmentation modules. The object augmentation module aims to enhance the ability of the detector to perceive potential unknown objects. The feature augmentation module explores the relations between old and new classes and augments the feature space via analogy. Extensive experimental results on VOC2007 and COCO demonstrate that OSR can outperform the state-of-the-art incremental object detection methods without using extra wild data.", + "primary_area": "computer vision iii", + "author": "Dongbao Yang; Yu Zhou; Xiaopeng Hong; Aoting Zhang; Weiping Wang", + "authorids": "", + "aff": "Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Harbin Institute of Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences", + "bibtex": "@article{Yang_Zhou_Hong_Zhang_Wang_2023, title={One-Shot Replay: Boosting Incremental Object Detection via Retrospecting One Object}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25417}, DOI={10.1609/aaai.v37i3.25417}, abstractNote={Modern object detectors are ill-equipped to incrementally learn new emerging object classes over time due to the well-known phenomenon of catastrophic forgetting. Due to data privacy or limited storage, few or no images of the old data can be stored for replay. In this paper, we design a novel One-Shot Replay (OSR) method for incremental object detection, which is an augmentation-based method. Rather than storing original images, only one object-level sample for each old class is stored to reduce memory usage significantly, and we find that copy-paste is a harmonious way to replay for incremental object detection. In the incremental learning procedure, diverse augmented samples with co-occurrence of old and new objects to existing training data are generated. To introduce more variants for objects of old classes, we propose two augmentation modules. The object augmentation module aims to enhance the ability of the detector to perceive potential unknown objects. The feature augmentation module explores the relations between old and new classes and augments the feature space via analogy. Extensive experimental results on VOC2007 and COCO demonstrate that OSR can outperform the state-of-the-art incremental object detection methods without using extra wild data.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Dongbao and Zhou, Yu and Hong, Xiaopeng and Zhang, Aoting and Wang, Weiping}, year={2023}, month={Jun.}, pages={3127-3135} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25417/25189", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25417", + "pdf_size": 321613, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14066424268207106363&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "iie.ac.cn;iie.ac.cn;ieee.org;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;ieee.org;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0+1;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Harbin Institute of Technology", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "CAS;UCAS;HIT", + "aff_campus_unique_index": ";;1;", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0+0;0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25604", + "title": "One-for-All: Proposal Masked Cross-Class Anomaly Detection", + "track": "main", + "status": "Technical", + "abstract": "One of the most challenges for anomaly detection (AD) is how to learn one unified and generalizable model to adapt to multi-class especially cross-class settings: the model is trained with normal samples from seen classes with the objective to detect anomalies from both seen and unseen classes. In this work, we propose a novel Proposal Masked Anomaly Detection (PMAD) approach for such challenging multi- and cross-class anomaly detection. The proposed PMAD can be adapted to seen and unseen classes by two key designs: MAE-based patch-level reconstruction and prototype-guided proposal masking. First, motivated by MAE (Masked AutoEncoder), we develop a patch-level reconstruction model rather than the image-level reconstruction adopted in most AD methods for this reason: the masked patches in unseen classes can be reconstructed well by using the visible patches and the adaptive reconstruction capability of MAE. Moreover, we improve MAE by ViT encoder-decoder architecture, combinational masking, and visual tokens as reconstruction objectives to make it more suitable for anomaly detection. Second, we develop a two-stage anomaly detection manner during inference. In the proposal masking stage, the prototype-guided proposal masking module is utilized to generate proposals for suspicious anomalies as much as possible, then masked patches can be generated from the proposal regions. By masking most likely anomalous patches, the \u201cshortcut reconstruction\u201d issue (i.e., anomalous regions can be well reconstructed) can be mostly avoided. In the reconstruction stage, these masked patches are then reconstructed by the trained patch-level reconstruction model to determine if they are anomalies. Extensive experiments show that the proposed PMAD can outperform current state-of-the-art models significantly under the multi- and especially cross-class settings. Code will be publicly available at https://github.com/xcyao00/PMAD.", + "primary_area": "data mining and knowledge management", + "author": "Xincheng Yao; Chongyang Zhang; Ruoqi Li; Jun Sun; Zhenyu Liu", + "authorids": "", + "aff": "School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University + MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; Ningbo HTVision Digital Technology Co.,Ltd", + "bibtex": "@article{Yao_Zhang_Li_Sun_Liu_2023, title={One-for-All: Proposal Masked Cross-Class Anomaly Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25604}, DOI={10.1609/aaai.v37i4.25604}, abstractNote={One of the most challenges for anomaly detection (AD) is how to learn one unified and generalizable model to adapt to multi-class especially cross-class settings: the model is trained with normal samples from seen classes with the objective to detect anomalies from both seen and unseen classes. In this work, we propose a novel Proposal Masked Anomaly Detection (PMAD) approach for such challenging multi- and cross-class anomaly detection. The proposed PMAD can be adapted to seen and unseen classes by two key designs: MAE-based patch-level reconstruction and prototype-guided proposal masking. First, motivated by MAE (Masked AutoEncoder), we develop a patch-level reconstruction model rather than the image-level reconstruction adopted in most AD methods for this reason: the masked patches in unseen classes can be reconstructed well by using the visible patches and the adaptive reconstruction capability of MAE. Moreover, we improve MAE by ViT encoder-decoder architecture, combinational masking, and visual tokens as reconstruction objectives to make it more suitable for anomaly detection. Second, we develop a two-stage anomaly detection manner during inference. In the proposal masking stage, the prototype-guided proposal masking module is utilized to generate proposals for suspicious anomalies as much as possible, then masked patches can be generated from the proposal regions. By masking most likely anomalous patches, the \u201cshortcut reconstruction\u201d issue (i.e., anomalous regions can be well reconstructed) can be mostly avoided. In the reconstruction stage, these masked patches are then reconstructed by the trained patch-level reconstruction model to determine if they are anomalies. Extensive experiments show that the proposed PMAD can outperform current state-of-the-art models significantly under the multi- and especially cross-class settings. Code will be publicly available at https://github.com/xcyao00/PMAD.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yao, Xincheng and Zhang, Chongyang and Li, Ruoqi and Sun, Jun and Liu, Zhenyu}, year={2023}, month={Jun.}, pages={4792-4800} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25604/25376", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25604", + "pdf_size": 4654351, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13024767043602344298&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;163.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;163.com", + "github": "https://github.com/xcyao00/PMAD", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0;0;0;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Ningbo HTVision Digital Technology Co., Ltd", + "aff_unique_dep": "School of Electronic Information and Electrical Engineering;", + "aff_unique_url": "https://www.sjtu.edu.cn;", + "aff_unique_abbr": "SJTU;", + "aff_campus_unique_index": "0;0+0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26070", + "title": "Online Hyperparameter Optimization for Class-Incremental Learning", + "track": "main", + "status": "Technical", + "abstract": "Class-incremental learning (CIL) aims to train a classification model while the number of classes increases phase-by-phase. An inherent challenge of CIL is the stability-plasticity tradeoff, i.e., CIL models should keep stable to retain old knowledge and keep plastic to absorb new knowledge. However, none of the existing CIL models can achieve the optimal tradeoff in different data-receiving settings\u2014where typically the training-from-half (TFH) setting needs more stability, but the training-from-scratch (TFS) needs more plasticity. To this end, we design an online learning method that can adaptively optimize the tradeoff without knowing the setting as a priori. Specifically, we first introduce the key hyperparameters that influence the tradeoff, e.g., knowledge distillation (KD) loss weights, learning rates, and classifier types. Then, we formulate the hyperparameter optimization process as an online Markov Decision Process (MDP) problem and propose a specific algorithm to solve it. We apply local estimated rewards and a classic bandit algorithm Exp3 to address the issues when applying online MDP methods to the CIL protocol. Our method consistently improves top-performing CIL methods in both TFH and TFS settings, e.g., boosting the average accuracy of TFH and TFS by 2.2 percentage points on ImageNet-Full, compared to the state-of-the-art. Code is provided at https://class-il.mpi-inf.mpg.de/online/", + "primary_area": "machine learning ii", + "author": "Yaoyao Liu; Yingying Li; Bernt Schiele; Qianru Sun", + "authorids": "", + "aff": "Max Planck Institute for Informatics, Saarland Informatics Campus + Department of Computer Science, Johns Hopkins University; Computing and Mathematical Sciences, California Institute of Technology; Max Planck Institute for Informatics, Saarland Informatics Campus; School of Computing and Information Systems, Singapore Management University", + "bibtex": "@article{Liu_Li_Schiele_Sun_2023, title={Online Hyperparameter Optimization for Class-Incremental Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26070}, DOI={10.1609/aaai.v37i7.26070}, abstractNote={Class-incremental learning (CIL) aims to train a classification model while the number of classes increases phase-by-phase. An inherent challenge of CIL is the stability-plasticity tradeoff, i.e., CIL models should keep stable to retain old knowledge and keep plastic to absorb new knowledge. However, none of the existing CIL models can achieve the optimal tradeoff in different data-receiving settings\u2014where typically the training-from-half (TFH) setting needs more stability, but the training-from-scratch (TFS) needs more plasticity. To this end, we design an online learning method that can adaptively optimize the tradeoff without knowing the setting as a priori. Specifically, we first introduce the key hyperparameters that influence the tradeoff, e.g., knowledge distillation (KD) loss weights, learning rates, and classifier types. Then, we formulate the hyperparameter optimization process as an online Markov Decision Process (MDP) problem and propose a specific algorithm to solve it. We apply local estimated rewards and a classic bandit algorithm Exp3 to address the issues when applying online MDP methods to the CIL protocol. Our method consistently improves top-performing CIL methods in both TFH and TFS settings, e.g., boosting the average accuracy of TFH and TFS by 2.2 percentage points on ImageNet-Full, compared to the state-of-the-art. Code is provided at https://class-il.mpi-inf.mpg.de/online/}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yaoyao and Li, Yingying and Schiele, Bernt and Sun, Qianru}, year={2023}, month={Jun.}, pages={8906-8913} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26070/25842", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26070", + "pdf_size": 1196693, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14957893288705696601&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "jhu.edu;caltech.edu;mpi-inf.mpg.de;smu.edu.sg", + "email": "jhu.edu;caltech.edu;mpi-inf.mpg.de;smu.edu.sg", + "github": "", + "project": "https://class-il.mpi-inf.mpg.de/online/", + "author_num": 4, + "aff_unique_index": "0+1;2;0;3", + "aff_unique_norm": "Max Planck Institute for Informatics;Johns Hopkins University;California Institute of Technology;Singapore Management University", + "aff_unique_dep": ";Department of Computer Science;Computing and Mathematical Sciences;School of Computing and Information Systems", + "aff_unique_url": "https://mpi-inf.mpg.de;https://www.jhu.edu;https://www.caltech.edu;https://www.smu.edu.sg", + "aff_unique_abbr": "MPII;JHU;Caltech;SMU", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Saarland;;Pasadena", + "aff_country_unique_index": "0+1;1;0;2", + "aff_country_unique": "Germany;United States;Singapore" + }, + { + "id": "article-26534", + "title": "Online Noisy Continual Relation Learning", + "track": "main", + "status": "Technical", + "abstract": "Recent work for continual relation learning has achieved remarkable progress. However, most existing methods only focus on tackling catastrophic forgetting to improve performance in the existing setup, while continually learning relations in the real-world must overcome many other challenges. One is that the data possibly comes in an online streaming fashion with data distributions gradually changing and without distinct task boundaries. Another is that noisy labels are inevitable in real-world, as relation samples may be contaminated by label inconsistencies or labeled with distant supervision. In this work, therefore, we propose a novel continual relation learning framework that simultaneously addresses both online and noisy relation learning challenges. Our framework contains three key modules: (i) a sample separated online purifying module that divides the online data stream into clean and noisy samples, (ii) a self-supervised online learning module that circumvents inferior training signals caused by noisy data, and (iii) a semi-supervised offline finetuning module that ensures the participation of both clean and noisy samples. Experimental results on FewRel, TACRED and NYT-H with real-world noise demonstrate that our framework greatly outperforms the combinations of the state-of-the-art online continual learning and noisy label learning methods.", + "primary_area": "speech natural language processing", + "author": "Guozheng Li; Peng Wang; Qiqing Luo; Yanhe Liu; Wenjun Ke", + "authorids": "", + "aff": "School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University; School of Computer Science and Engineering, Southeast University + Beijing Institute of Computer Technology and Application", + "bibtex": "@article{Li_Wang_Luo_Liu_Ke_2023, title={Online Noisy Continual Relation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26534}, DOI={10.1609/aaai.v37i11.26534}, abstractNote={Recent work for continual relation learning has achieved remarkable progress. However, most existing methods only focus on tackling catastrophic forgetting to improve performance in the existing setup, while continually learning relations in the real-world must overcome many other challenges. One is that the data possibly comes in an online streaming fashion with data distributions gradually changing and without distinct task boundaries. Another is that noisy labels are inevitable in real-world, as relation samples may be contaminated by label inconsistencies or labeled with distant supervision. In this work, therefore, we propose a novel continual relation learning framework that simultaneously addresses both online and noisy relation learning challenges. Our framework contains three key modules: (i) a sample separated online purifying module that divides the online data stream into clean and noisy samples, (ii) a self-supervised online learning module that circumvents inferior training signals caused by noisy data, and (iii) a semi-supervised offline finetuning module that ensures the participation of both clean and noisy samples. Experimental results on FewRel, TACRED and NYT-H with real-world noise demonstrate that our framework greatly outperforms the combinations of the state-of-the-art online continual learning and noisy label learning methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Guozheng and Wang, Peng and Luo, Qiqing and Liu, Yanhe and Ke, Wenjun}, year={2023}, month={Jun.}, pages={13059-13066} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26534/26306", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26534", + "pdf_size": 520805, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=686582194314957298&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 2, + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;163.com", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "Southeast University;Beijing Institute of Computer Technology and Application", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "https://www.seu.edu.cn/;", + "aff_unique_abbr": "SEU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26404", + "title": "Online Platforms and the Fair Exposure Problem under Homophily", + "track": "main", + "status": "Technical", + "abstract": "In the wake of increasing political extremism, online platforms have been criticized for contributing to polarization. One line of criticism has focused on echo chambers and the recommended content served to users by these platforms. In this work, we introduce the fair exposure problem: given limited intervention power of the platform, the goal is to enforce balance in the spread of content (e.g., news articles) among two groups of users through constraints similar to those imposed by the Fairness Doctrine in the United States in the past. Groups are characterized by different affiliations (e.g., political views) and have different preferences for content. We develop a stylized framework that models intra- and inter-group content propagation under homophily, and we formulate the platform's decision as an optimization problem that aims at maximizing user engagement, potentially under fairness constraints. Our main notion of fairness requires that each group see a mixture of their preferred and non-preferred content, encouraging information diversity. Promoting such information diversity is often viewed as desirable and a potential means for breaking out of harmful echo chambers. We study the solutions to both the fairness-agnostic and fairness-aware problems. We prove that a fairness-agnostic approach inevitably leads to group-homogeneous targeting by the platform. This is only partially mitigated by imposing fairness constraints: we show that there exist optimal fairness-aware solutions which target one group with different types of content and the other group with only one type that is not necessarily the group's most preferred. Finally, using simulations with real-world data, we study the system dynamics and quantify the price of fairness.", + "primary_area": "philosophy and ethics of ai", + "author": "Jakob Schoeffer; Alexander Ritchie; Keziah Naggita; Faidra Monachou; Jessica Finocchiaro; Marc Juarez", + "authorids": "", + "aff": "Karlsruhe Institute of Technology (KIT); University of Michigan; Toyota Technological Institute at Chicago; Harvard University; Harvard University + Center for Research on Computation and Society (CRCS); University of Edinburgh", + "bibtex": "@article{Schoeffer_Ritchie_Naggita_Monachou_Finocchiaro_Juarez_2023, title={Online Platforms and the Fair Exposure Problem under Homophily}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26404}, DOI={10.1609/aaai.v37i10.26404}, abstractNote={In the wake of increasing political extremism, online platforms have been criticized for contributing to polarization. One line of criticism has focused on echo chambers and the recommended content served to users by these platforms. In this work, we introduce the fair exposure problem: given limited intervention power of the platform, the goal is to enforce balance in the spread of content (e.g., news articles) among two groups of users through constraints similar to those imposed by the Fairness Doctrine in the United States in the past. Groups are characterized by different affiliations (e.g., political views) and have different preferences for content. We develop a stylized framework that models intra- and inter-group content propagation under homophily, and we formulate the platform\u2019s decision as an optimization problem that aims at maximizing user engagement, potentially under fairness constraints. Our main notion of fairness requires that each group see a mixture of their preferred and non-preferred content, encouraging information diversity. Promoting such information diversity is often viewed as desirable and a potential means for breaking out of harmful echo chambers. We study the solutions to both the fairness-agnostic and fairness-aware problems. We prove that a fairness-agnostic approach inevitably leads to group-homogeneous targeting by the platform. This is only partially mitigated by imposing fairness constraints: we show that there exist optimal fairness-aware solutions which target one group with different types of content and the other group with only one type that is not necessarily the group\u2019s most preferred. Finally, using simulations with real-world data, we study the system dynamics and quantify the price of fairness.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Schoeffer, Jakob and Ritchie, Alexander and Naggita, Keziah and Monachou, Faidra and Finocchiaro, Jessica and Juarez, Marc}, year={2023}, month={Jun.}, pages={11899-11908} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26404/26176", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26404", + "pdf_size": 359055, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1806839158721399130&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "kit.edu;umich.edu;ttic.edu;stanford.edu;seas.harvard.edu;ed.ac.uk", + "email": "kit.edu;umich.edu;ttic.edu;stanford.edu;seas.harvard.edu;ed.ac.uk", + "github": "https://github.com/jfinocchiaro/fair-exposure", + "project": "https://arxiv.org/abs/2202.09727", + "author_num": 6, + "aff_unique_index": "0;1;2;3;3+4;5", + "aff_unique_norm": "Karlsruhe Institute of Technology;University of Michigan;Toyota Technological Institute at Chicago;Harvard University;Center for Research on Computation and Society;University of Edinburgh", + "aff_unique_dep": ";;;;Computer Science;", + "aff_unique_url": "https://www.kit.edu;https://www.umich.edu;https://www.tti-chicago.org;https://www.harvard.edu;;https://www.ed.ac.uk", + "aff_unique_abbr": "KIT;UM;TTI Chicago;Harvard;CRCS;Edinburgh", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0;1;1;1;1+1;2", + "aff_country_unique": "Germany;United States;United Kingdom" + }, + { + "id": "article-25581", + "title": "Online Random Feature Forests for Learning in Varying Feature Spaces", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we propose a new online learning algorithm tailored for data streams described by varying feature spaces (VFS), wherein new features constantly emerge and old features may stop to be observed over various time spans. Our proposed algorithm, named Online Random Feature Forests for Feature space Variabilities (ORF3V), provides a strategy to respect such feature dynamics by generating, updating, pruning, as well as online re-weighing an ensemble of what we call feature forests, which are generated and updated based on a compressed and storage efficient representation for each observed feature. We benchmark our algorithm on 12 datasets, including one novel real-world dataset of government COVID-19 responses collected through a crowd-sensing program in Spain. The empirical results substantiate the viability and effectiveness of our ORF3V algorithm and its superior accuracy performance over the state-of-the-art rival models.", + "primary_area": "data mining and knowledge management", + "author": "Christian Schreckenberger; Yi He; Stefan L\u00fcdtke; Christian Bartelt; Heiner Stuckenschmidt", + "authorids": "", + "aff": "Chair for Artificial Intelligence, University of Mannheim, Germany+Institute for Enterprise Systems, University of Mannheim, Germany; Department of Computer Science, Old Dominion University, USA; Institute for Enterprise Systems, University of Mannheim, Germany; Institute for Enterprise Systems, University of Mannheim, Germany; Chair for Artificial Intelligence, University of Mannheim, Germany", + "bibtex": "@article{Schreckenberger_He_L\u00fcdtke_Bartelt_Stuckenschmidt_2023, title={Online Random Feature Forests for Learning in Varying Feature Spaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25581}, DOI={10.1609/aaai.v37i4.25581}, abstractNote={In this paper, we propose a new online learning algorithm tailored for data streams described by varying feature spaces (VFS), wherein new features constantly emerge and old features may stop to be observed over various time spans. Our proposed algorithm, named Online Random Feature Forests for Feature space Variabilities (ORF3V), provides a strategy to respect such feature dynamics by generating, updating, pruning, as well as online re-weighing an ensemble of what we call feature forests, which are generated and updated based on a compressed and storage efficient representation for each observed feature. We benchmark our algorithm on 12 datasets, including one novel real-world dataset of government COVID-19 responses collected through a crowd-sensing program in Spain. The empirical results substantiate the viability and effectiveness of our ORF3V algorithm and its superior accuracy performance over the state-of-the-art rival models.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Schreckenberger, Christian and He, Yi and L\u00fcdtke, Stefan and Bartelt, Christian and Stuckenschmidt, Heiner}, year={2023}, month={Jun.}, pages={4587-4595} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25581/25353", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25581", + "pdf_size": 178897, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8356087049292150439&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "uni-mannheim.de;cs.odu.edu;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de", + "email": "uni-mannheim.de;cs.odu.edu;uni-mannheim.de;uni-mannheim.de;uni-mannheim.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;0;0;0", + "aff_unique_norm": "University of Mannheim;Old Dominion University", + "aff_unique_dep": "Chair for Artificial Intelligence;Department of Computer Science", + "aff_unique_url": "https://www.uni-mannheim.de;https://www.odu.edu", + "aff_unique_abbr": ";ODU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;0;0;0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "article-26088", + "title": "Online Reinforcement Learning with Uncertain Episode Lengths", + "track": "main", + "status": "Technical", + "abstract": "Existing episodic reinforcement algorithms assume that the length of an episode is fixed across time and known a priori. In this paper, we consider a general framework of episodic reinforcement learning when the length of each episode is drawn from a distribution. We first establish that this problem is equivalent to online reinforcement learning with general discounting where the learner is trying to optimize the expected discounted sum of rewards over an infinite horizon, but where the discounting function is not necessarily geometric. We show that minimizing regret with this new general discounting is equivalent to minimizing regret with uncertain episode lengths. We then design a reinforcement learning algorithm that minimizes regret with general discounting but acts for the setting with uncertain episode lengths. We instantiate our general bound for different types of discounting, including geometric and polynomial discounting. We also show that we can obtain similar regret bounds even when the uncertainty over the episode lengths is unknown, by estimating the unknown distribution over time. Finally, we compare our learning algorithms with existing value-iteration based episodic RL algorithms on a grid-world environment.", + "primary_area": "machine learning ii", + "author": "Debmalya Mandal; Goran Radanovic; Jiarui Gan; Adish Singla; Rupak Majumdar", + "authorids": "", + "aff": "Max Planck Institute for Software Systems; Max Planck Institute for Software Systems; University of Oxford; Max Planck Institute for Software Systems; Max Planck Institute for Software Systems", + "bibtex": "@article{Mandal_Radanovic_Gan_Singla_Majumdar_2023, title={Online Reinforcement Learning with Uncertain Episode Lengths}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26088}, DOI={10.1609/aaai.v37i7.26088}, abstractNote={Existing episodic reinforcement algorithms assume that the length of an episode is fixed across time and known a priori. In this paper, we consider a general framework of episodic reinforcement learning when the length of each episode is drawn from a distribution. We first establish that this problem is equivalent to online reinforcement learning with general discounting where the learner is trying to optimize the expected discounted sum of rewards over an infinite horizon, but where the discounting function is not necessarily geometric. We show that minimizing regret with this new general discounting is equivalent to minimizing regret with uncertain episode lengths. We then design a reinforcement learning algorithm that minimizes regret with general discounting but acts for the setting with uncertain episode lengths. We instantiate our general bound for different types of discounting, including geometric and polynomial discounting. We also show that we can obtain similar regret bounds even when the uncertainty over the episode lengths is unknown, by estimating the unknown distribution over time. Finally, we compare our learning algorithms with existing value-iteration based episodic RL algorithms on a grid-world environment.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mandal, Debmalya and Radanovic, Goran and Gan, Jiarui and Singla, Adish and Majumdar, Rupak}, year={2023}, month={Jun.}, pages={9064-9071} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26088/25860", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26088", + "pdf_size": 591594, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2920675860827936189&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "mpi-sws.org;mpi-sws.org;cs.ox.ac.uk;mpi-sws.org;mpi-sws.org", + "email": "mpi-sws.org;mpi-sws.org;cs.ox.ac.uk;mpi-sws.org;mpi-sws.org", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Max Planck Institute for Software Systems;University of Oxford", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.mpi-sws.org;https://www.ox.ac.uk", + "aff_unique_abbr": "MPI-SWS;Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "Germany;United Kingdom" + }, + { + "id": "article-25596", + "title": "Online Semi-supervised Learning with Mix-Typed Streaming Features", + "track": "main", + "status": "Technical", + "abstract": "Online learning with feature spaces that are not fixed but can vary over time renders a seemingly flexible learning paradigm thus has drawn much attention. Unfortunately, two restrictions prohibit a ubiquitous application of this learning paradigm in practice. First, whereas prior studies mainly assume a homogenous feature type, data streams generated from real applications can be heterogeneous in which Boolean, ordinal, and continuous co-exist. Existing methods that prescribe parametric distributions such as Gaussians would not suffice to model the correlation among such mixtyped features. Second, while full supervision seems to be a default setup, providing labels to all arriving data instances over a long time span is tangibly onerous, laborious, and economically unsustainable. Alas, a semi-supervised online learner that can deal with mix-typed, varying feature spaces is still missing. To fill the gap, this paper explores a novel problem, named Online Semi-supervised Learning with Mixtyped streaming Features (OSLMF), which strives to relax the restrictions on the feature type and supervision information. Our key idea to solve the new problem is to leverage copula model to align the data instances with different feature spaces so as to make their distance measurable. A geometric structure underlying data instances is then established in an online fashion based on their distances, through which the limited labeling information is propagated, from the scarce labeled instances to their close neighbors. Experimental results are documented to evidence the viability and effectiveness of our proposed approach. Code is released in https://github.com/wudi1989/OSLMF.", + "primary_area": "data mining and knowledge management", + "author": "Di Wu; Shengda Zhuo; Yu Wang; Zhong Chen; Yi He", + "authorids": "", + "aff": "College of Computer and Information Science, Southwest University, Chongqing 400715, China; Institute of Artificial Intelligence and Blockchain, Guangzhou University, Guangzhou 510006, China; Institute of Artificial Intelligence and Blockchain, Guangzhou University, Guangzhou 510006, China; Department of Computer Science, Xavier University of Louisiana, New Orleans, LA 70125, USA; Department of Computer Science, Old Dominion University, Norfolk, VA 23529, USA", + "bibtex": "@article{Wu_Zhuo_Wang_Chen_He_2023, title={Online Semi-supervised Learning with Mix-Typed Streaming Features}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25596}, DOI={10.1609/aaai.v37i4.25596}, abstractNote={Online learning with feature spaces that are not fixed but can vary over time renders a seemingly flexible learning paradigm thus has drawn much attention. Unfortunately, two restrictions prohibit a ubiquitous application of this learning paradigm in practice. First, whereas prior studies mainly assume a homogenous feature type, data streams generated from real applications can be heterogeneous in which Boolean, ordinal, and continuous co-exist. Existing methods that prescribe parametric distributions such as Gaussians would not suffice to model the correlation among such mixtyped features. Second, while full supervision seems to be a default setup, providing labels to all arriving data instances over a long time span is tangibly onerous, laborious, and economically unsustainable. Alas, a semi-supervised online learner that can deal with mix-typed, varying feature spaces is still missing. To fill the gap, this paper explores a novel problem, named Online Semi-supervised Learning with Mixtyped streaming Features (OSLMF), which strives to relax the restrictions on the feature type and supervision information. Our key idea to solve the new problem is to leverage copula model to align the data instances with different feature spaces so as to make their distance measurable. A geometric structure underlying data instances is then established in an online fashion based on their distances, through which the limited labeling information is propagated, from the scarce labeled instances to their close neighbors. Experimental results are documented to evidence the viability and effectiveness of our proposed approach. Code is released in https://github.com/wudi1989/OSLMF.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Di and Zhuo, Shengda and Wang, Yu and Chen, Zhong and He, Yi}, year={2023}, month={Jun.}, pages={4720-4728} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25596/25368", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25596", + "pdf_size": 2425835, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12762583652336322845&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com;gmail.com;gzhu.edu.cn;xula.edu;cs.odu.edu", + "email": "gmail.com;gmail.com;gzhu.edu.cn;xula.edu;cs.odu.edu", + "github": "https://github.com/wudi1989/OSLMF", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;3", + "aff_unique_norm": "Southwest University;Guangzhou University;Xavier University of Louisiana;Old Dominion University", + "aff_unique_dep": "College of Computer and Information Science;Institute of Artificial Intelligence and Blockchain;Department of Computer Science;Department of Computer Science", + "aff_unique_url": ";http://www.gzhu.edu.cn;https://www.xula.edu;https://www.odu.edu", + "aff_unique_abbr": ";GU;XULA;ODU", + "aff_campus_unique_index": "0;1;1;2;3", + "aff_campus_unique": "Chongqing;Guangzhou;New Orleans;Norfolk", + "aff_country_unique_index": "0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25641", + "title": "Online Symbolic Regression with Informative Query", + "track": "main", + "status": "Technical", + "abstract": "Symbolic regression, the task of extracting mathematical expressions from the observed data, plays a crucial role in scientific discovery. Despite the promising performance of existing methods, most of them conduct symbolic regression in an offline setting. That is, they treat the observed data points as given ones that are simply sampled from uniform distributions without exploring the expressive potential of data. However, for real-world scientific problems, the data used for symbolic regression are usually actively obtained by doing experiments, which is an online setting. Thus, how to obtain informative data that can facilitate the symbolic regression process is an important problem that remains challenging. \n\nIn this paper, we propose QUOSR, a query-based framework for online symbolic regression that can automatically obtain informative data in an iterative manner. Specifically, at each step, QUOSR receives historical data points, generates new x, and then queries the symbolic expression to get the corresponding y, where the (x, y) serves as new data points. This process repeats until the maximum number of query steps is reached. To make the generated data points informative, we implement the framework with a neural network and train it by maximizing the mutual information between generated data points and the target expression. Through comprehensive experiments, we show that QUOSR can facilitate modern symbolic regression methods by generating informative data.", + "primary_area": "domain s of application", + "author": "Pengwei Jin; Di Huang; Rui Zhang; Xing Hu; Ziyuan Nan; Zidong Du; Qi Guo; Yunji Chen", + "authorids": "", + "aff": "State Key Lab of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies; State Key Lab of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies; State Key Lab of Processors, Institute of Computing Technology, CAS + Cambricon Technologies; State Key Lab of Processors, Institute of Computing Technology, CAS; State Key Lab of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences + Cambricon Technologies; State Key Lab of Processors, Institute of Computing Technology, CAS; State Key Lab of Processors, Institute of Computing Technology, CAS; State Key Lab of Processors, Institute of Computing Technology, CAS + University of Chinese Academy of Sciences", + "bibtex": "@article{Jin_Huang_Zhang_Hu_Nan_Du_Guo_Chen_2023, title={Online Symbolic Regression with Informative Query}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25641}, DOI={10.1609/aaai.v37i4.25641}, abstractNote={Symbolic regression, the task of extracting mathematical expressions from the observed data, plays a crucial role in scientific discovery. Despite the promising performance of existing methods, most of them conduct symbolic regression in an offline setting. That is, they treat the observed data points as given ones that are simply sampled from uniform distributions without exploring the expressive potential of data. However, for real-world scientific problems, the data used for symbolic regression are usually actively obtained by doing experiments, which is an online setting. Thus, how to obtain informative data that can facilitate the symbolic regression process is an important problem that remains challenging. In this paper, we propose QUOSR, a query-based framework for online symbolic regression that can automatically obtain informative data in an iterative manner. Specifically, at each step, QUOSR receives historical data points, generates new x, and then queries the symbolic expression to get the corresponding y, where the (x, y) serves as new data points. This process repeats until the maximum number of query steps is reached. To make the generated data points informative, we implement the framework with a neural network and train it by maximizing the mutual information between generated data points and the target expression. Through comprehensive experiments, we show that QUOSR can facilitate modern symbolic regression methods by generating informative data.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Pengwei and Huang, Di and Zhang, Rui and Hu, Xing and Nan, Ziyuan and Du, Zidong and Guo, Qi and Chen, Yunji}, year={2023}, month={Jun.}, pages={5122-5130} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25641/25413", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25641", + "pdf_size": 1033120, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6389484259750513706&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1+2;0+1+2;0+2;0;0+1+2;0;0;0+1", + "aff_unique_norm": "Institute of Computing Technology;University of Chinese Academy of Sciences;Cambricon Technologies", + "aff_unique_dep": "State Key Lab of Processors;;", + "aff_unique_url": "http://www.ict.ac.cn;http://www.ucas.ac.cn;https://www.cambricon.com", + "aff_unique_abbr": "ICT;UCAS;", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;0+0;0;0+0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25973", + "title": "Online Tuning for Offline Decentralized Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Offline reinforcement learning could learn effective policies from a fixed dataset, which is promising for real-world applications. However, in offline decentralized multi-agent reinforcement learning, due to the discrepancy between the behavior policy and learned policy, the transition dynamics in offline experiences do not accord with the transition dynamics in online execution, which creates severe errors in value estimates, leading to uncoordinated low-performing policies. One way to overcome this problem is to bridge offline training and online tuning. However, considering both deployment efficiency and sample efficiency, we could only collect very limited online experiences, making it insufficient to use merely online data for updating the agent policy. To utilize both offline and online experiences to tune the policies of agents, we introduce online transition correction (OTC) to implicitly correct the offline transition dynamics by modifying sampling probabilities. We design two types of distances, i.e., embedding-based and value-based distance, to measure the similarity between transitions, and further propose an adaptive rank-based prioritization to sample transitions according to the transition similarity. OTC is simple yet effective to increase data efficiency and improve agent policies in online tuning. Empirically, OTC outperforms baselines in a variety of tasks.", + "primary_area": "machine learning ii", + "author": "Jiechuan Jiang; Zongqing Lu", + "authorids": "", + "aff": "School of Computer Science, Peking University; School of Computer Science, Peking University", + "bibtex": "@article{Jiang_Lu_2023, title={Online Tuning for Offline Decentralized Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25973}, DOI={10.1609/aaai.v37i7.25973}, abstractNote={Offline reinforcement learning could learn effective policies from a fixed dataset, which is promising for real-world applications. However, in offline decentralized multi-agent reinforcement learning, due to the discrepancy between the behavior policy and learned policy, the transition dynamics in offline experiences do not accord with the transition dynamics in online execution, which creates severe errors in value estimates, leading to uncoordinated low-performing policies. One way to overcome this problem is to bridge offline training and online tuning. However, considering both deployment efficiency and sample efficiency, we could only collect very limited online experiences, making it insufficient to use merely online data for updating the agent policy. To utilize both offline and online experiences to tune the policies of agents, we introduce online transition correction (OTC) to implicitly correct the offline transition dynamics by modifying sampling probabilities. We design two types of distances, i.e., embedding-based and value-based distance, to measure the similarity between transitions, and further propose an adaptive rank-based prioritization to sample transitions according to the transition similarity. OTC is simple yet effective to increase data efficiency and improve agent policies in online tuning. Empirically, OTC outperforms baselines in a variety of tasks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Jiechuan and Lu, Zongqing}, year={2023}, month={Jun.}, pages={8050-8059} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25973/25745", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25973", + "pdf_size": 842231, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9118701908262332158&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25242", + "title": "Only a Few Classes Confusing: Pixel-Wise Candidate Labels Disambiguation for Foggy Scene Understanding", + "track": "main", + "status": "Technical", + "abstract": "Not all semantics become confusing when deploying a semantic segmentation model for real-world scene understanding of adverse weather. The true semantics of most pixels have a high likelihood of appearing in the few top classes according to confidence ranking. In this paper, we replace the one-hot pseudo label with a candidate label set (CLS) that consists of only a few ambiguous classes and exploit its effects on self-training-based unsupervised domain adaptation. Specifically, we formulate the problem as a coarse-to-fine process. In the coarse-level process, adaptive CLS selection is proposed to pick a minimal set of confusing candidate labels based on the reliability of label predictions. Then, representation learning and label rectification are iteratively performed to facilitate feature clustering in an embedding space and to disambiguate the confusing semantics. Experimentally, our method outperforms the state-of-the-art methods on three realistic foggy benchmarks.", + "primary_area": "computer vision ii", + "author": "Liang Liao; Wenyi Chen; Zhen Zhang; Jing Xiao; Yan Yang; Chia-Wen Lin; Shin'ichi Satoh", + "authorids": "", + "aff": "S-lab, School of Computer Science and Engineering, Nanyang Technological University; School of Computer Science, Wuhan University; School of Computer Science, Wuhan University; School of Computer Science, Wuhan University; School of Resource and Environmental Sciences, Wuhan University; Department of Electrical Engineering, National Tsing Hua University; National Institute of Informatics", + "bibtex": "@article{Liao_Chen_Zhang_Xiao_Yang_Lin_Satoh_2023, title={Only a Few Classes Confusing: Pixel-Wise Candidate Labels Disambiguation for Foggy Scene Understanding}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25242}, DOI={10.1609/aaai.v37i2.25242}, abstractNote={Not all semantics become confusing when deploying a semantic segmentation model for real-world scene understanding of adverse weather. The true semantics of most pixels have a high likelihood of appearing in the few top classes according to confidence ranking. In this paper, we replace the one-hot pseudo label with a candidate label set (CLS) that consists of only a few ambiguous classes and exploit its effects on self-training-based unsupervised domain adaptation. Specifically, we formulate the problem as a coarse-to-fine process. In the coarse-level process, adaptive CLS selection is proposed to pick a minimal set of confusing candidate labels based on the reliability of label predictions. Then, representation learning and label rectification are iteratively performed to facilitate feature clustering in an embedding space and to disambiguate the confusing semantics. Experimentally, our method outperforms the state-of-the-art methods on three realistic foggy benchmarks.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liao, Liang and Chen, Wenyi and Zhang, Zhen and Xiao, Jing and Yang, Yan and Lin, Chia-Wen and Satoh, Shin\u2019ichi}, year={2023}, month={Jun.}, pages={1558-1567} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25242/25014", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25242", + "pdf_size": 5462370, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7424732235754208052&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ntu.edu.sg;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn;ee.nthu.edu.tw;nii.ac.jp", + "email": "ntu.edu.sg;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn;ee.nthu.edu.tw;nii.ac.jp", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;1;2;3", + "aff_unique_norm": "Nanyang Technological University;Wuhan University;National Tsing Hua University;National Institute of Informatics", + "aff_unique_dep": "School of Computer Science and Engineering;School of Computer Science;Department of Electrical Engineering;", + "aff_unique_url": "https://www.ntu.edu.sg;http://www.whu.edu.cn;https://www.nthu.edu.tw;https://www.nii.ac.jp/", + "aff_unique_abbr": "NTU;WHU;NTHU;NII", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Wuhan", + "aff_country_unique_index": "0;1;1;1;1;2;3", + "aff_country_unique": "Singapore;China;Taiwan, China;Japan" + }, + { + "id": "article-26257", + "title": "Open-Ended Diverse Solution Discovery with Regulated Behavior Patterns for Cross-Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "While Reinforcement Learning can achieve impressive results for complex tasks, the learned policies are generally prone to fail in downstream tasks with even minor model mismatch or unexpected perturbations. Recent works have demonstrated that a policy population with diverse behavior characteristics can generalize to downstream environments with various discrepancies. However, such policies might result in catastrophic damage during the deployment in practical scenarios like real-world systems due to the unrestricted behaviors of trained policies. Furthermore, training diverse policies without regulation of the behavior can result in inadequate feasible policies for extrapolating to a wide range of test conditions with dynamics shifts. In this work, we aim to train diverse policies under the regularization of the behavior patterns. We motivate our paradigm by observing the inverse dynamics in the environment with partial state information and propose Diversity in Regulation (DiR) training diverse policies with regulated behaviors to discover desired patterns that benefit the generalization. Considerable empirical results on various variations of different environments indicate that our method attains improvements over other diversity-driven counterparts.", + "primary_area": "machine learning iv", + "author": "Kang Xu; Yan Ma; Bingsheng Wei; Wei Li", + "authorids": "", + "aff": "Academy for Engineering and Technology, Fudan University, Shanghai, China; Academy for Engineering and Technology, Fudan University, Shanghai, China; Academy for Engineering and Technology, Fudan University, Shanghai, China; Academy for Engineering and Technology, Fudan University, Shanghai, China", + "bibtex": "@article{Xu_Ma_Wei_Li_2023, title={Open-Ended Diverse Solution Discovery with Regulated Behavior Patterns for Cross-Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26257}, DOI={10.1609/aaai.v37i9.26257}, abstractNote={While Reinforcement Learning can achieve impressive results for complex tasks, the learned policies are generally prone to fail in downstream tasks with even minor model mismatch or unexpected perturbations. Recent works have demonstrated that a policy population with diverse behavior characteristics can generalize to downstream environments with various discrepancies. However, such policies might result in catastrophic damage during the deployment in practical scenarios like real-world systems due to the unrestricted behaviors of trained policies. Furthermore, training diverse policies without regulation of the behavior can result in inadequate feasible policies for extrapolating to a wide range of test conditions with dynamics shifts. In this work, we aim to train diverse policies under the regularization of the behavior patterns. We motivate our paradigm by observing the inverse dynamics in the environment with partial state information and propose Diversity in Regulation (DiR) training diverse policies with regulated behaviors to discover desired patterns that benefit the generalization. Considerable empirical results on various variations of different environments indicate that our method attains improvements over other diversity-driven counterparts.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Kang and Ma, Yan and Wei, Bingsheng and Li, Wei}, year={2023}, month={Jun.}, pages={10585-10593} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26257/26029", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26257", + "pdf_size": 1175178, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=574958029651446566&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "Academy for Engineering and Technology", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25159", + "title": "Open-Vocabulary Multi-Label Classification via Multi-Modal Knowledge Transfer", + "track": "main", + "status": "Technical", + "abstract": "Real-world recognition system often encounters the challenge of unseen labels. To identify such unseen labels, multi-label zero-shot learning (ML-ZSL) focuses on transferring knowledge by a pre-trained textual label embedding (e.g., GloVe). However, such methods only exploit single-modal knowledge from a language model, while ignoring the rich semantic information inherent in image-text pairs. Instead, recently developed open-vocabulary (OV) based methods succeed in exploiting such information of image-text pairs in object detection, and achieve impressive performance. Inspired by the success of OV-based methods, we propose a novel open-vocabulary framework, named multi-modal knowledge transfer (MKT), for multi-label classification. Specifically, our method exploits multi-modal knowledge of image-text pairs based on a vision and language pre-training (VLP) model. To facilitate transferring the image-text matching ability of VLP model, knowledge distillation is employed to guarantee the consistency of image and label embeddings, along with prompt tuning to further update the label embeddings. To further enable the recognition of multiple objects, a simple but effective two-stream module is developed to capture both local and global features. Extensive experimental results show that our method significantly outperforms state-of-the-art methods on public benchmark datasets.", + "primary_area": "computer vision i", + "author": "Sunan He; Taian Guo; Tao Dai; Ruizhi Qiao; Xiujun Shu; Bo Ren; Shu-Tao Xia", + "authorids": "", + "aff": "College of Computer Science and Software Engineering, Shenzhen University + Tsinghua Shenzhen International Graduate School, Tsinghua University + YouTu Lab, Tencent; YouTu Lab, Tencent; College of Computer Science and Software Engineering, Shenzhen University; YouTu Lab, Tencent; YouTu Lab, Tencent; YouTu Lab, Tencent; Tsinghua Shenzhen International Graduate School, Tsinghua University + Research Center of Artificial Intelligence, Peng Cheng Laboratory", + "bibtex": "@article{He_Guo_Dai_Qiao_Shu_Ren_Xia_2023, title={Open-Vocabulary Multi-Label Classification via Multi-Modal Knowledge Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25159}, DOI={10.1609/aaai.v37i1.25159}, abstractNote={Real-world recognition system often encounters the challenge of unseen labels. To identify such unseen labels, multi-label zero-shot learning (ML-ZSL) focuses on transferring knowledge by a pre-trained textual label embedding (e.g., GloVe). However, such methods only exploit single-modal knowledge from a language model, while ignoring the rich semantic information inherent in image-text pairs. Instead, recently developed open-vocabulary (OV) based methods succeed in exploiting such information of image-text pairs in object detection, and achieve impressive performance. Inspired by the success of OV-based methods, we propose a novel open-vocabulary framework, named multi-modal knowledge transfer (MKT), for multi-label classification. Specifically, our method exploits multi-modal knowledge of image-text pairs based on a vision and language pre-training (VLP) model. To facilitate transferring the image-text matching ability of VLP model, knowledge distillation is employed to guarantee the consistency of image and label embeddings, along with prompt tuning to further update the label embeddings. To further enable the recognition of multiple objects, a simple but effective two-stream module is developed to capture both local and global features. Extensive experimental results show that our method significantly outperforms state-of-the-art methods on public benchmark datasets.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Sunan and Guo, Taian and Dai, Tao and Qiao, Ruizhi and Shu, Xiujun and Ren, Bo and Xia, Shu-Tao}, year={2023}, month={Jun.}, pages={808-816} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25159/24931", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25159", + "pdf_size": 626940, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9683393354275594390&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;tencent.com;gmail.com;tencent.com;tencent.com;tencent.com;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tencent.com;gmail.com;tencent.com;tencent.com;tencent.com;sz.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;2;0;2;2;2;1+3", + "aff_unique_norm": "Shenzhen University;Tsinghua University;Tencent;Peng Cheng Laboratory", + "aff_unique_dep": "College of Computer Science and Software Engineering;International Graduate School;YouTu Lab;Research Center of Artificial Intelligence", + "aff_unique_url": "https://www.szu.edu.cn;https://www.tsinghua.edu.cn;https://www.tencent.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "SZU;THU;Tencent;", + "aff_campus_unique_index": "0+0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0+0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26713", + "title": "OpenMapFlow: A Library for Rapid Map Creation with Machine Learning and Remote Sensing Data", + "track": "aaai special track", + "status": "Technical", + "abstract": "The desired output for most real-world tasks using machine learning (ML) and remote sensing data is a set of dense predictions that form a predicted map for a geographic region. However, most prior work involving ML and remote sensing follows the traditional practice of reporting metrics on a set of independent, geographically-sparse samples and does not perform dense predictions. To reduce the labor of producing dense prediction maps, we present OpenMapFlow---an open-source python library for rapid map creation with ML and remote sensing data. OpenMapFlow provides 1) a data processing pipeline for users to create labeled datasets for any region, 2) code to train state-of-the-art deep learning models on custom or existing datasets, and 3) a cloud-based architecture to deploy models for efficient map prediction. We demonstrate the benefits of OpenMapFlow through experiments on three binary classification tasks: cropland, crop type (maize), and building mapping. We show that OpenMapFlow drastically reduces the time required for dense prediction compared to traditional workflows. We hope this library will stimulate novel research in areas such as domain shift, unsupervised learning, and societally-relevant applications and lessen the barrier to adopting research methods for real-world tasks.", + "primary_area": "ai for social impact", + "author": "Ivan Zvonkov; Gabriel Tseng; Catherine Nakalembe; Hannah Kerner", + "authorids": "", + "aff": "University of Maryland, College Park; McGill University and Mila \u2013 Quebec AI Institute; University of Maryland, College Park; Arizona State University", + "bibtex": "@article{Zvonkov_Tseng_Nakalembe_Kerner_2023, title={OpenMapFlow: A Library for Rapid Map Creation with Machine Learning and Remote Sensing Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26713}, DOI={10.1609/aaai.v37i12.26713}, abstractNote={The desired output for most real-world tasks using machine learning (ML) and remote sensing data is a set of dense predictions that form a predicted map for a geographic region. However, most prior work involving ML and remote sensing follows the traditional practice of reporting metrics on a set of independent, geographically-sparse samples and does not perform dense predictions. To reduce the labor of producing dense prediction maps, we present OpenMapFlow---an open-source python library for rapid map creation with ML and remote sensing data. OpenMapFlow provides 1) a data processing pipeline for users to create labeled datasets for any region, 2) code to train state-of-the-art deep learning models on custom or existing datasets, and 3) a cloud-based architecture to deploy models for efficient map prediction. We demonstrate the benefits of OpenMapFlow through experiments on three binary classification tasks: cropland, crop type (maize), and building mapping. We show that OpenMapFlow drastically reduces the time required for dense prediction compared to traditional workflows. We hope this library will stimulate novel research in areas such as domain shift, unsupervised learning, and societally-relevant applications and lessen the barrier to adopting research methods for real-world tasks.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zvonkov, Ivan and Tseng, Gabriel and Nakalembe, Catherine and Kerner, Hannah}, year={2023}, month={Jun.}, pages={14655-14663} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26713/26485", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26713", + "pdf_size": 4640684, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=509091422962380437&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "umd.edu;mail.mcgill.ca;umd.edu;asu.edu", + "email": "umd.edu;mail.mcgill.ca;umd.edu;asu.edu", + "github": "https://github.com/nasaharvest/openmapflow", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "University of Maryland;McGill University;Arizona State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www/umd.edu;https://www.mcgill.ca;https://www.asu.edu", + "aff_unique_abbr": "UMD;McGill;ASU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park;", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-25585", + "title": "Opinion Optimization in Directed Social Networks", + "track": "main", + "status": "Technical", + "abstract": "Shifting social opinions has far-reaching implications in various aspects, such as public health campaigns, product marketing, and political candidates. In this paper, we study a problem of opinion optimization based on the popular Friedkin-Johnsen (FJ) model for opinion dynamics in an unweighted directed social network with n nodes and m edges. In the FJ model, the internal opinion of every node lies in the closed interval [0, 1], with 0 and 1 being polar opposites of opinions about a certain issue. Concretely, we focus on the problem of selecting a small number of k<", + "primary_area": "data mining and knowledge management", + "author": "Haoxin Sun; Zhongzhi Zhang", + "authorids": "", + "aff": "Shanghai Key Laboratory of Intelligent Information Processing, Fudan University, Shanghai 200433, China+School of Computer Science, Fudan University, Shanghai 200433, China; Shanghai Key Laboratory of Intelligent Information Processing, Fudan University, Shanghai 200433, China+School of Computer Science, Fudan University, Shanghai 200433, China", + "bibtex": "@article{Sun_Zhang_2023, title={Opinion Optimization in Directed Social Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25585}, DOI={10.1609/aaai.v37i4.25585}, abstractNote={Shifting social opinions has far-reaching implications in various aspects, such as public health campaigns, product marketing, and political candidates. In this paper, we study a problem of opinion optimization based on the popular Friedkin-Johnsen (FJ) model for opinion dynamics in an unweighted directed social network with n nodes and m edges. In the FJ model, the internal opinion of every node lies in the closed interval [0, 1], with 0 and 1 being polar opposites of opinions about a certain issue. Concretely, we focus on the problem of selecting a small number of k<<n nodes and changing their internal opinions to 0, in order to minimize the average opinion at equilibrium. We then design an algorithm that returns the optimal solution to the problem in O(n^3) time. To speed up the computation, we further develop a fast algorithm by sampling spanning forests, the time complexity of which is O(ln), with l being the number of samplings. Finally, we execute extensive experiments on various real directed networks, which show that the effectiveness of our two algorithms is similar to each other, both of which outperform several baseline strategies of node selection. Moreover, our fast algorithm is more efficient than the first one, which is scalable to massive graphs with more than twenty million nodes.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Haoxin and Zhang, Zhongzhi}, year={2023}, month={Jun.}, pages={4623-4632} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25585/25357", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25585", + "pdf_size": 186109, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7634673886254529191&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "Shanghai Key Laboratory of Intelligent Information Processing", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25886", + "title": "Opposite Online Learning via Sequentially Integrated Stochastic Gradient Descent Estimators", + "track": "main", + "status": "Technical", + "abstract": "Stochastic gradient descent algorithm (SGD) has been popular in various fields of artificial intelligence as well as a prototype of online learning algorithms. This article proposes a novel and general framework of one-sided testing for streaming data based on SGD, which determines whether the unknown parameter is greater than a certain positive constant. We construct the online-updated test statistic sequentially by integrating the selected batch-specific estimator or its opposite, which is referred to opposite online learning. The batch-specific online estimators are chosen strategically according to the proposed sequential tactics designed by two-armed bandit process. Theoretical results prove the advantage of the strategy ensuring the distribution of test statistic to be optimal under the null hypothesis and also supply the theoretical evidence of power enhancement compared with classical test statistic. In application, the proposed method is appealing for statistical inference of one-sided testing because it is scalable for any model. Finally, the superior finite-sample performance is evaluated by simulation studies.", + "primary_area": "machine learning i", + "author": "Wenhai Cui; Xiaoting Ji; Linglong Kong; Xiaodong Yan", + "authorids": "", + "aff": "Zhongtai Securities Institute for Financial Studies, Shandong University+Shandong Province Key Laboratory of Financial Risk+Shandong National Center for Applied Mathematics; Zhongtai Securities Institute for Financial Studies, Shandong University+Shandong Province Key Laboratory of Financial Risk+Shandong National Center for Applied Mathematics; Shandong Province Key Laboratory of Financial Risk; Zhongtai Securities Institute for Financial Studies, Shandong University+Department of Mathematical and Statistical Sciences, University of Alberta+Shandong Province Key Laboratory of Financial Risk+Shandong National Center for Applied Mathematics", + "bibtex": "@article{Cui_Ji_Kong_Yan_2023, title={Opposite Online Learning via Sequentially Integrated Stochastic Gradient Descent Estimators}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25886}, DOI={10.1609/aaai.v37i6.25886}, abstractNote={Stochastic gradient descent algorithm (SGD) has been popular in various fields of artificial intelligence as well as a prototype of online learning algorithms. This article proposes a novel and general framework of one-sided testing for streaming data based on SGD, which determines whether the unknown parameter is greater than a certain positive constant. We construct the online-updated test statistic sequentially by integrating the selected batch-specific estimator or its opposite, which is referred to opposite online learning. The batch-specific online estimators are chosen strategically according to the proposed sequential tactics designed by two-armed bandit process. Theoretical results prove the advantage of the strategy ensuring the distribution of test statistic to be optimal under the null hypothesis and also supply the theoretical evidence of power enhancement compared with classical test statistic. In application, the proposed method is appealing for statistical inference of one-sided testing because it is scalable for any model. Finally, the superior finite-sample performance is evaluated by simulation studies.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Wenhai and Ji, Xiaoting and Kong, Linglong and Yan, Xiaodong}, year={2023}, month={Jun.}, pages={7270-7278} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25886/25658", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25886", + "pdf_size": 339646, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11735769990824281059&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.sdu.edu.cn;mail.sdu.edu.cn;ualberta.ca;sdu.edu.cn", + "email": "mail.sdu.edu.cn;mail.sdu.edu.cn;ualberta.ca;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+1;0+0+1;0;0+2+0+1", + "aff_unique_norm": "Shandong University;Shandong National Center for Applied Mathematics;University of Alberta", + "aff_unique_dep": "Zhongtai Securities Institute for Financial Studies;Center for Applied Mathematics;Department of Mathematical and Statistical Sciences", + "aff_unique_url": "http://www.sdu.edu.cn;;https://www.ualberta.ca", + "aff_unique_abbr": ";;UAlberta", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+1+0+0", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-25920", + "title": "Optimal Decision Diagrams for Classification", + "track": "main", + "status": "Technical", + "abstract": "Decision diagrams for classification have some notable advantages over decision trees, as their internal connections can be determined at training time and their width is not bound to grow exponentially with their depth. Accordingly, decision diagrams are usually less prone to data fragmentation in internal nodes. However, the inherent complexity of training these classifiers acted as a long-standing barrier to their widespread adoption. In this context, we study the training of optimal decision diagrams (ODDs) from a mathematical programming perspective. We introduce a novel mixed-integer linear programming model for training and demonstrate its applicability for many datasets of practical importance. Further, we show how this model can be easily extended for fairness, parsimony, and stability notions. We present numerical analyses showing that our model allows training ODDs in short computational times, and that ODDs achieve better accuracy than optimal decision trees, while allowing for improved stability without significant accuracy losses.", + "primary_area": "machine learning i", + "author": "Alexandre M. Florio; Pedro Martins; Maximilian Schiffer; Thiago Serra; Thibaut Vidal", + "authorids": "", + "aff": "CIRRELT & SCALE-AI Chair in Data-Driven Supply Chains + Department of Mathematical and Industrial Engineering, Polytechnique Montr \u00b4eal, Canada; Department of Computer Science, Ponti\ufb01cal Catholic University of Rio de Janeiro, Brazil; School of Management & Munich Data Science Institute, Technical University of Munich, Germany; Freeman College of Management, Bucknell University, USA; CIRRELT & SCALE-AI Chair in Data-Driven Supply Chains + Department of Mathematical and Industrial Engineering, Polytechnique Montr \u00b4eal, Canada", + "bibtex": "@article{Florio_Martins_Schiffer_Serra_Vidal_2023, title={Optimal Decision Diagrams for Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25920}, DOI={10.1609/aaai.v37i6.25920}, abstractNote={Decision diagrams for classification have some notable advantages over decision trees, as their internal connections can be determined at training time and their width is not bound to grow exponentially with their depth. Accordingly, decision diagrams are usually less prone to data fragmentation in internal nodes. However, the inherent complexity of training these classifiers acted as a long-standing barrier to their widespread adoption. In this context, we study the training of optimal decision diagrams (ODDs) from a mathematical programming perspective. We introduce a novel mixed-integer linear programming model for training and demonstrate its applicability for many datasets of practical importance. Further, we show how this model can be easily extended for fairness, parsimony, and stability notions. We present numerical analyses showing that our model allows training ODDs in short computational times, and that ODDs achieve better accuracy than optimal decision trees, while allowing for improved stability without significant accuracy losses.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Florio, Alexandre M. and Martins, Pedro and Schiffer, Maximilian and Serra, Thiago and Vidal, Thibaut}, year={2023}, month={Jun.}, pages={7577-7585} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25920/25692", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25920", + "pdf_size": 466036, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15713653958670557173&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff_domain": "gmail.com;inf.puc-rio.br;tum.de;bucknell.edu;polymtl.ca", + "email": "gmail.com;inf.puc-rio.br;tum.de;bucknell.edu;polymtl.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;3;4;0+1", + "aff_unique_norm": "CIRRELT;Polytechnique Montr\u00e9al;Ponti\ufb01cal Catholic University of Rio de Janeiro;Technical University of Munich;Bucknell University", + "aff_unique_dep": "Data-Driven Supply Chains;Department of Mathematical and Industrial Engineering;Department of Computer Science;School of Management & Munich Data Science Institute;Freeman College of Management", + "aff_unique_url": ";https://www.polymtl.ca;http://www.puc-rio.br/;https://www.tum.de;https://www.bucknell.edu", + "aff_unique_abbr": "CIRRELT;Polytechnique;PUC-Rio;TUM;Bucknell", + "aff_campus_unique_index": ";1;2;", + "aff_campus_unique": ";Rio de Janeiro;Munich", + "aff_country_unique_index": "0+0;1;2;3;0+0", + "aff_country_unique": "Canada;Brazil;Germany;United States" + }, + { + "id": "article-26945", + "title": "Optimal Execution via Multi-Objective Multi-Armed Bandits (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "When trying to liquidate a large quantity of a particular stock, the price of that stock is likely to be affected by trades, thus leading to a reduced expected return if we were to sell the entire quantity at once. This leads to the problem of optimal execution, where the aim is to split the sell order into several smaller sell orders over the course of a period of time, to optimally balance stock price with market risk. This problem can be defined in terms of difference equations. Here, we show how we can reformulate this as a multi-objective problem, which we solve with a novel multi-armed bandit algorithm.", + "primary_area": "", + "author": "Francois Buet-Golfouse; Peter Hill", + "authorids": "", + "aff": "University College London; Independent Researcher", + "bibtex": "@article{Buet-Golfouse_Hill_2024, title={Optimal Execution via Multi-Objective Multi-Armed Bandits (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26945}, DOI={10.1609/aaai.v37i13.26945}, abstractNote={When trying to liquidate a large quantity of a particular stock, the price of that stock is likely to be affected by trades, thus leading to a reduced expected return if we were to sell the entire quantity at once. This leads to the problem of optimal execution, where the aim is to split the sell order into several smaller sell orders over the course of a period of time, to optimally balance stock price with market risk. This problem can be defined in terms of difference equations. Here, we show how we can reformulate this as a multi-objective problem, which we solve with a novel multi-armed bandit algorithm.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Buet-Golfouse, Francois and Hill, Peter}, year={2024}, month={Jul.}, pages={16170-16171} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26945/26717", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26945", + "pdf_size": 103516, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11740475942437275144&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ucl.ac.uk;gmail.com", + "email": "ucl.ac.uk;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University College London;Independent Researcher", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucl.ac.uk;", + "aff_unique_abbr": "UCL;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom;" + }, + { + "id": "article-26458", + "title": "Optimal Pathfinding on Weighted Grid Maps", + "track": "main", + "status": "Technical", + "abstract": "In many computer games up to hundreds of agents navigate in real-time across a dynamically changing weighted grid map. Pathfinding in these situations is challenging because the grids are large, traversal costs are not uniform, and because each shortest path has many symmetric permutations, all of which must be considered by an optimal online search. In this work we introduce Weighted Jump Point Search (JPSW), a new type of pathfinding algorithm which breaks weighted grid symmetries by introducing a tiebreaking policy that allows us to apply effective pruning rules in symmetric regions. We show that these pruning rules preserve at least one optimal path to every grid cell and that their application can yield large performance improvements for optimal pathfinding. We give a complete theoretical description of the new algorithm, including pseudo-code. We also conduct a wide-ranging experimental evaluation, including data from real games. Results indicate JPSW is up to orders of magnitude faster than the nearest baseline, online search using A*.", + "primary_area": "search and optimization", + "author": "Mark Carlson; Sajjad K. Moghadam; Daniel D. Harabor; Peter J. Stuckey; Morteza Ebrahimi", + "authorids": "", + "aff": "Department of Data Science and Artificial Intelligence, Monash University; University of Tehran; Department of Data Science and Artificial Intelligence, Monash University; Department of Data Science and Artificial Intelligence, Monash University; University of Tehran", + "bibtex": "@article{Carlson_Moghadam_Harabor_Stuckey_Ebrahimi_2023, title={Optimal Pathfinding on Weighted Grid Maps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26458}, DOI={10.1609/aaai.v37i10.26458}, abstractNote={In many computer games up to hundreds of agents navigate in real-time across a dynamically changing weighted grid map. Pathfinding in these situations is challenging because the grids are large, traversal costs are not uniform, and because each shortest path has many symmetric permutations, all of which must be considered by an optimal online search. In this work we introduce Weighted Jump Point Search (JPSW), a new type of pathfinding algorithm which breaks weighted grid symmetries by introducing a tiebreaking policy that allows us to apply effective pruning rules in symmetric regions. We show that these pruning rules preserve at least one optimal path to every grid cell and that their application can yield large performance improvements for optimal pathfinding. We give a complete theoretical description of the new algorithm, including pseudo-code. We also conduct a wide-ranging experimental evaluation, including data from real games. Results indicate JPSW is up to orders of magnitude faster than the nearest baseline, online search using A*.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carlson, Mark and Moghadam, Sajjad K. and Harabor, Daniel D. and Stuckey, Peter J. and Ebrahimi, Morteza}, year={2023}, month={Jun.}, pages={12373-12380} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26458/26230", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26458", + "pdf_size": 586431, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11112472741797971468&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "minuskelvin.net;ut.ac.ir;monash.edu;monash.edu;ut.ac.ir", + "email": "minuskelvin.net;ut.ac.ir;monash.edu;monash.edu;ut.ac.ir", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "Monash University;University of Tehran", + "aff_unique_dep": "Department of Data Science and Artificial Intelligence;", + "aff_unique_url": "https://www.monash.edu;https://ut.ac.ir", + "aff_unique_abbr": "Monash;UT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;1", + "aff_country_unique": "Australia;Iran" + }, + { + "id": "article-25716", + "title": "Optimal Pricing Schemes for Identical Items with Time-Sensitive Buyers", + "track": "main", + "status": "Technical", + "abstract": "Time or money? That is a question! In this paper, we consider this dilemma in the pricing regime, in which we try to find the optimal pricing scheme for identical items with heterogenous time-sensitive buyers. We characterize the revenue-optimal solution and propose an efficient algorithm to find it in a Bayesian setting. Our results also demonstrate the tight ratio between the value of wasted time and the seller's revenue, as well as that of two common-used pricing schemes, the k-step function and the fixed pricing. To explore the nature of the optimal scheme in the general setting, we present the closed forms over the product distribution and show by examples that positive correlation between the valuation of the item and the cost per unit time could help increase revenue. To the best of our knowledge, it is the first step towards understanding the impact of the time factor as a part of the buyer cost in pricing problems, in the computational view.", + "primary_area": "game theory and economic paradigms", + "author": "Zhengyang Liu; Liang Shan; Zihe Wang", + "authorids": "", + "aff": "Beijing Institute of Technology; Renmin University of China; Renmin University of China", + "bibtex": "@article{Liu_Shan_Wang_2023, title={Optimal Pricing Schemes for Identical Items with Time-Sensitive Buyers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25716}, DOI={10.1609/aaai.v37i5.25716}, abstractNote={Time or money? That is a question! In this paper, we consider this dilemma in the pricing regime, in which we try to find the optimal pricing scheme for identical items with heterogenous time-sensitive buyers. We characterize the revenue-optimal solution and propose an efficient algorithm to find it in a Bayesian setting. Our results also demonstrate the tight ratio between the value of wasted time and the seller\u2019s revenue, as well as that of two common-used pricing schemes, the k-step function and the fixed pricing. To explore the nature of the optimal scheme in the general setting, we present the closed forms over the product distribution and show by examples that positive correlation between the valuation of the item and the cost per unit time could help increase revenue. To the best of our knowledge, it is the first step towards understanding the impact of the time factor as a part of the buyer cost in pricing problems, in the computational view.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zhengyang and Shan, Liang and Wang, Zihe}, year={2023}, month={Jun.}, pages={5773-5780} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25716/25488", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25716", + "pdf_size": 151822, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:A2-5mlm4IQQJ:scholar.google.com/&scioq=Optimal+Pricing+Schemes+for+Identical+Items+with+Time-Sensitive+Buyers&hl=en&as_sdt=0,33", + "gs_version_total": 6, + "aff_domain": "bit.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "bit.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Beijing Institute of Technology;Renmin University of China", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bit.edu.cn/;http://www.ruc.edu.cn", + "aff_unique_abbr": "BIT;RUC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25827", + "title": "Optimal Sparse Recovery with Decision Stumps", + "track": "main", + "status": "Technical", + "abstract": "Decision trees are widely used for their low computational cost, good\n predictive performance, and ability to assess the importance of features.\n Though often used in practice for feature selection, the theoretical\n guarantees of these methods are not well understood. We here obtain a tight\n finite sample bound for the feature selection problem in linear regression\n using single-depth decision trees. We examine the statistical properties of\n these \"decision stumps\" for the recovery of the s active features from p\n total features, where s << p. Our analysis provides tight sample performance guarantees on\n high-dimensional sparse systems which align with the finite sample bound of\n O(s log p) as obtained by Lasso, improving upon previous bounds for both\n the median and optimal splitting criteria. Our results extend to the\n non-linear regime as well as arbitrary sub-Gaussian distributions,\n demonstrating that tree based methods attain strong feature selection\n properties under a wide variety of settings and further shedding light on the\n success of these methods in practice. As a byproduct of our analysis, we show\n that we can provably guarantee recovery even when the number of active\n features s is unknown.\n We further validate our theoretical results and proof methodology\n using computational experiments.", + "primary_area": "machine learning i", + "author": "Kiarash Banihashem; Mohammad Hajiaghayi; Max Springer", + "authorids": "", + "aff": "University of Maryland; University of Maryland; University of Maryland", + "bibtex": "@article{Banihashem_Hajiaghayi_Springer_2023, title={Optimal Sparse Recovery with Decision Stumps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25827}, DOI={10.1609/aaai.v37i6.25827}, abstractNote={Decision trees are widely used for their low computational cost, good predictive performance, and ability to assess the importance of features. Though often used in practice for feature selection, the theoretical guarantees of these methods are not well understood. We here obtain a tight finite sample bound for the feature selection problem in linear regression using single-depth decision trees. We examine the statistical properties of these "decision stumps" for the recovery of the s active features from p total features, where s << p. Our analysis provides tight sample performance guarantees on high-dimensional sparse systems which align with the finite sample bound of O(s log p) as obtained by Lasso, improving upon previous bounds for both the median and optimal splitting criteria. Our results extend to the non-linear regime as well as arbitrary sub-Gaussian distributions, demonstrating that tree based methods attain strong feature selection properties under a wide variety of settings and further shedding light on the success of these methods in practice. As a byproduct of our analysis, we show that we can provably guarantee recovery even when the number of active features s is unknown. We further validate our theoretical results and proof methodology using computational experiments.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Banihashem, Kiarash and Hajiaghayi, Mohammad and Springer, Max}, year={2023}, month={Jun.}, pages={6745-6752} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25827/25599", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25827", + "pdf_size": 291027, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:VdaNe7BabZMJ:scholar.google.com/&scioq=Optimal+Sparse+Recovery+with+Decision+Stumps&hl=en&as_sdt=0,23", + "gs_version_total": 5, + "aff_domain": "umd.edu;cs.umd.edu;umd.edu", + "email": "umd.edu;cs.umd.edu;umd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26334", + "title": "Optimal Sparse Regression Trees", + "track": "main", + "status": "Technical", + "abstract": "Regression trees are one of the oldest forms of AI models, and their predictions can be made without a calculator, which makes them broadly useful, particularly for high-stakes applications. Within the large literature on regression trees, there has been little effort towards full provable optimization, mainly due to the computational hardness of the problem. This work proposes a dynamic programming-with-bounds approach to the construction of provably-optimal sparse regression trees. We leverage a novel lower bound based on an optimal solution to the k-Means clustering algorithm on one dimensional data. We are often able to find optimal sparse trees in seconds, even for challenging datasets that involve large numbers of samples and highly-correlated features.", + "primary_area": "machine learning iv", + "author": "Rui Zhang; Rui Xin; Margo Seltzer; Cynthia Rudin", + "authorids": "", + "aff": "Duke University; Duke University; University of British Columbia; Duke University", + "bibtex": "@article{Zhang_Xin_Seltzer_Rudin_2023, title={Optimal Sparse Regression Trees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26334}, DOI={10.1609/aaai.v37i9.26334}, abstractNote={Regression trees are one of the oldest forms of AI models, and their predictions can be made without a calculator, which makes them broadly useful, particularly for high-stakes applications. Within the large literature on regression trees, there has been little effort towards full provable optimization, mainly due to the computational hardness of the problem. This work proposes a dynamic programming-with-bounds approach to the construction of provably-optimal sparse regression trees. We leverage a novel lower bound based on an optimal solution to the k-Means clustering algorithm on one dimensional data. We are often able to find optimal sparse trees in seconds, even for challenging datasets that involve large numbers of samples and highly-correlated features.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Rui and Xin, Rui and Seltzer, Margo and Rudin, Cynthia}, year={2023}, month={Jun.}, pages={11270-11279} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26334/26106", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26334", + "pdf_size": 368427, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2354979340169555338&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "duke.edu;duke.edu;cs.ubc.ca;duke.edu", + "email": "duke.edu;duke.edu;cs.ubc.ca;duke.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Duke University;University of British Columbia", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.duke.edu;https://www.ubc.ca", + "aff_unique_abbr": "Duke;UBC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Vancouver", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-26025", + "title": "Optimism in Face of a Context:Regret Guarantees for Stochastic Contextual MDP", + "track": "main", + "status": "Technical", + "abstract": "We present regret minimization algorithms for stochastic contextual MDPs under minimum reachability assumption, using an access to an offline least square regression oracle.\nWe analyze three different settings: where the dynamics is known, where the dynamics is unknown but independent of the context and the most challenging setting where the dynamics is unknown and context-dependent. For the latter, our algorithm obtains regret bound (up to poly-logarithmic factors) of order (H+1/p\u2098\u1d62\u2099)H|S|\u00b3\u141f\u00b2(|A|Tlog(max{|?|,|?|} /?))\u00b9\u141f\u00b2 with probability 1\u2212?, where ? and ? are finite and realizable function classes used to approximate the dynamics and rewards respectively, p\u2098\u1d62\u2099 is the minimum reachability parameter, S is the set of states, A the set of actions, H the horizon, and T the number of episodes.\nTo our knowledge, our approach is the first optimistic approach applied to contextual MDPs with general function approximation (i.e., without additional knowledge regarding the function class, such as it being linear and etc.).\nWe present a lower bound of ?((TH|S||A|ln|?| /ln|A| )\u00b9\u141f\u00b2 ), on the expected regret which holds even in the case of known dynamics.\nLastly, we discuss an extension of our results to CMDPs without minimum reachability, that obtains order of T\u00b3\u141f\u2074 regret.", + "primary_area": "machine learning ii", + "author": "Orin Levy; Yishay Mansour", + "authorids": "", + "aff": "Tel Aviv University; Tel Aviv University + Google Research, Tel Aviv", + "bibtex": "@article{Levy_Mansour_2023, title={Optimism in Face of a Context:Regret Guarantees for Stochastic Contextual MDP}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26025}, DOI={10.1609/aaai.v37i7.26025}, abstractNote={We present regret minimization algorithms for stochastic contextual MDPs under minimum reachability assumption, using an access to an offline least square regression oracle.\nWe analyze three different settings: where the dynamics is known, where the dynamics is unknown but independent of the context and the most challenging setting where the dynamics is unknown and context-dependent. For the latter, our algorithm obtains regret bound (up to poly-logarithmic factors) of order (H+1/p\u2098\u1d62\u2099)H|S|\u00b3\u141f\u00b2(|A|Tlog(max{|?|,|?|} /?))\u00b9\u141f\u00b2 with probability 1\u2212?, where ? and ? are finite and realizable function classes used to approximate the dynamics and rewards respectively, p\u2098\u1d62\u2099 is the minimum reachability parameter, S is the set of states, A the set of actions, H the horizon, and T the number of episodes.\nTo our knowledge, our approach is the first optimistic approach applied to contextual MDPs with general function approximation (i.e., without additional knowledge regarding the function class, such as it being linear and etc.).\nWe present a lower bound of ?((TH|S||A|ln|?| /ln|A| )\u00b9\u141f\u00b2 ), on the expected regret which holds even in the case of known dynamics.\nLastly, we discuss an extension of our results to CMDPs without minimum reachability, that obtains order of T\u00b3\u141f\u2074 regret.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Levy, Orin and Mansour, Yishay}, year={2023}, month={Jun.}, pages={8510-8517} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26025/25797", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26025", + "pdf_size": 212659, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3429933994294288466&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.tau.ac.il;gmail.com", + "email": "mail.tau.ac.il;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Tel Aviv University;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.tau.ac.il;https://research.google", + "aff_unique_abbr": "TAU;Google", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Tel Aviv", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26207", + "title": "Optimistic Whittle Index Policy: Online Learning for Restless Bandits", + "track": "main", + "status": "Technical", + "abstract": "Restless multi-armed bandits (RMABs) extend multi-armed bandits to allow for stateful arms, where the state of each arm evolves restlessly with different transitions depending on whether that arm is pulled. Solving RMABs requires information on transition dynamics, which are often unknown upfront. To plan in RMAB settings with unknown transitions, we propose the first online learning algorithm based on the Whittle index policy, using an upper confidence bound (UCB) approach to learn transition dynamics. Specifically, we estimate confidence bounds of the transition probabilities and formulate a bilinear program to compute optimistic Whittle indices using these estimates. Our algorithm, UCWhittle, achieves sublinear O(H \\sqrt{T log T}) frequentist regret to solve RMABs with unknown transitions in T episodes with a constant horizon H. Empirically, we demonstrate that UCWhittle leverages the structure of RMABs and the Whittle index policy solution to achieve better performance than existing online learning baselines across three domains, including one constructed from a real-world maternal and childcare dataset.", + "primary_area": "machine learning iii", + "author": "Kai Wang; Lily Xu; Aparna Taneja; Milind Tambe", + "authorids": "", + "aff": "Harvard University; Harvard University + Google Research; Google Research; Harvard University + Google Research", + "bibtex": "@article{Wang_Xu_Taneja_Tambe_2023, title={Optimistic Whittle Index Policy: Online Learning for Restless Bandits}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26207}, DOI={10.1609/aaai.v37i8.26207}, abstractNote={Restless multi-armed bandits (RMABs) extend multi-armed bandits to allow for stateful arms, where the state of each arm evolves restlessly with different transitions depending on whether that arm is pulled. Solving RMABs requires information on transition dynamics, which are often unknown upfront. To plan in RMAB settings with unknown transitions, we propose the first online learning algorithm based on the Whittle index policy, using an upper confidence bound (UCB) approach to learn transition dynamics. Specifically, we estimate confidence bounds of the transition probabilities and formulate a bilinear program to compute optimistic Whittle indices using these estimates. Our algorithm, UCWhittle, achieves sublinear O(H \\sqrt{T log T}) frequentist regret to solve RMABs with unknown transitions in T episodes with a constant horizon H. Empirically, we demonstrate that UCWhittle leverages the structure of RMABs and the Whittle index policy solution to achieve better performance than existing online learning baselines across three domains, including one constructed from a real-world maternal and childcare dataset.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Kai and Xu, Lily and Taneja, Aparna and Tambe, Milind}, year={2023}, month={Jun.}, pages={10131-10139} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26207/25979", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26207", + "pdf_size": 295479, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8212275668083412514&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "g.harvard.edu;g.harvard.edu;google.com;google.com", + "email": "g.harvard.edu;g.harvard.edu;google.com;google.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;1;0+1", + "aff_unique_norm": "Harvard University;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.harvard.edu;https://research.google", + "aff_unique_abbr": "Harvard;Google Research", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0+0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25703", + "title": "Optimizing Multiple Simultaneous Objectives for Voting and Facility Location", + "track": "main", + "status": "Technical", + "abstract": "We study the classic facility location setting, where we are given n clients and m possible facility locations in some arbitrary metric space, and want to choose a location to build a facility. The exact same setting also arises in spatial social choice, where voters are the clients and the goal is to choose a candidate or outcome, with the distance from a voter to an outcome representing the cost of this outcome for the voter (e.g., based on their ideological differences). Unlike most previous work, we do not focus on a single objective to optimize (e.g., the total distance from clients to the facility, or the maximum distance, etc.), but instead attempt to optimize several different objectives simultaneously. More specifically, we consider the l-centrum family of objectives, which includes the total distance, max distance, and many others. We present tight bounds on how well any pair of such objectives (e.g., max and sum) can be simultaneously approximated compared to their optimum outcomes. In particular, we show that for any such pair of objectives, it is always possible to choose an outcome which simultaneously approximates both objectives within a factor of 1 plus square root of 2, and give a precise characterization of how this factor improves as the two objectives being optimized become more similar. For q>2 different centrum objectives, we show that it is always possible to approximate all q of these objectives within a small constant, and that this constant approaches 3 as q increases. Our results show that when optimizing only a few simultaneous objectives, it is always possible to form an outcome which is a significantly better than 3 approximation for all of these objectives.", + "primary_area": "game theory and economic paradigms", + "author": "Yue Han; Christopher Jerrett; Elliot Anshelevich", + "authorids": "", + "aff": "Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, New York 12180; Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, New York 12180; Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, New York 12180", + "bibtex": "@article{Han_Jerrett_Anshelevich_2023, title={Optimizing Multiple Simultaneous Objectives for Voting and Facility Location}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25703}, DOI={10.1609/aaai.v37i5.25703}, abstractNote={We study the classic facility location setting, where we are given n clients and m possible facility locations in some arbitrary metric space, and want to choose a location to build a facility. The exact same setting also arises in spatial social choice, where voters are the clients and the goal is to choose a candidate or outcome, with the distance from a voter to an outcome representing the cost of this outcome for the voter (e.g., based on their ideological differences). Unlike most previous work, we do not focus on a single objective to optimize (e.g., the total distance from clients to the facility, or the maximum distance, etc.), but instead attempt to optimize several different objectives simultaneously. More specifically, we consider the l-centrum family of objectives, which includes the total distance, max distance, and many others. We present tight bounds on how well any pair of such objectives (e.g., max and sum) can be simultaneously approximated compared to their optimum outcomes. In particular, we show that for any such pair of objectives, it is always possible to choose an outcome which simultaneously approximates both objectives within a factor of 1 plus square root of 2, and give a precise characterization of how this factor improves as the two objectives being optimized become more similar. For q>2 different centrum objectives, we show that it is always possible to approximate all q of these objectives within a small constant, and that this constant approaches 3 as q increases. Our results show that when optimizing only a few simultaneous objectives, it is always possible to form an outcome which is a significantly better than 3 approximation for all of these objectives.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Han, Yue and Jerrett, Christopher and Anshelevich, Elliot}, year={2023}, month={Jun.}, pages={5665-5672} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25703/25475", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25703", + "pdf_size": 239206, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1283844925512781435&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "rpi.edu;rpi.edu;cs.rpi.edu", + "email": "rpi.edu;rpi.edu;cs.rpi.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Rensselaer Polytechnic Institute", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rpi.edu", + "aff_unique_abbr": "RPI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Troy", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26627", + "title": "Orders Are Unwanted: Dynamic Deep Graph Convolutional Network for Personality Detection", + "track": "main", + "status": "Technical", + "abstract": "Predicting personality traits based on online posts has emerged as an important task in many fields such as social network analysis. One of the challenges of this task is assembling information from various posts into an overall profile for each user. While many previous solutions simply concatenate the posts into a long text and then encode the text by sequential or hierarchical models, they introduce unwarranted orders for the posts, which may mislead the models. In this paper, we propose a dynamic deep graph convolutional network (D-DGCN) to overcome the above limitation. Specifically, we design a learn-to-connect approach that adopts a dynamic multi-hop structure instead of a deterministic structure, and combine it with the DGCN module to automatically learn the connections between posts. The modules of post encoder, learn-to-connect, and DGCN are jointly trained in an end-to-end manner. Experimental results on the Kaggle and Pandora datasets show the superior performance of D-DGCN to state-of-the-art baselines. Our code is available at https://github.com/djz233/D-DGCN.", + "primary_area": "speech natural language processing", + "author": "Tao Yang; Jinghao Deng; Xiaojun Quan; Qifan Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; Meta AI", + "bibtex": "@article{Yang_Deng_Quan_Wang_2023, title={Orders Are Unwanted: Dynamic Deep Graph Convolutional Network for Personality Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26627}, DOI={10.1609/aaai.v37i11.26627}, abstractNote={Predicting personality traits based on online posts has emerged as an important task in many fields such as social network analysis. One of the challenges of this task is assembling information from various posts into an overall profile for each user. While many previous solutions simply concatenate the posts into a long text and then encode the text by sequential or hierarchical models, they introduce unwarranted orders for the posts, which may mislead the models. In this paper, we propose a dynamic deep graph convolutional network (D-DGCN) to overcome the above limitation. Specifically, we design a learn-to-connect approach that adopts a dynamic multi-hop structure instead of a deterministic structure, and combine it with the DGCN module to automatically learn the connections between posts. The modules of post encoder, learn-to-connect, and DGCN are jointly trained in an end-to-end manner. Experimental results on the Kaggle and Pandora datasets show the superior performance of D-DGCN to state-of-the-art baselines. Our code is available at https://github.com/djz233/D-DGCN.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Tao and Deng, Jinghao and Quan, Xiaojun and Wang, Qifan}, year={2023}, month={Jun.}, pages={13896-13904} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26627/26399", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26627", + "pdf_size": 409694, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17438755827185052069&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;fb.com", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn;fb.com", + "github": "https://github.com/djz233/D-DGCN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Sun Yat-sen University;Meta Platforms, Inc.", + "aff_unique_dep": "School of Computer Science and Engineering;Meta AI", + "aff_unique_url": "http://www.sysu.edu.cn;https://meta.com", + "aff_unique_abbr": "SYSU;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-27012", + "title": "Ordinal Programmatic Weak Supervision and Crowdsourcing for Estimating Cognitive States (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Crowdsourcing and weak supervision offer methods to efficiently label large datasets. Our work builds on existing weak supervision models to accommodate ordinal target classes, in an effort to recover ground truth from weak, external labels.\nWe define a parameterized factor function and show that our approach improves over other baselines.", + "primary_area": "", + "author": "Prakruthi Pradeep; Benedikt Boecking; Nicholas Gisolfi; Jacob R. Kintz; Torin K. Clark; Artur Dubrawski", + "authorids": "", + "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; The University of Colorado Boulder; The University of Colorado Boulder; Carnegie Mellon University", + "bibtex": "@article{Pradeep_Boecking_Gisolfi_Kintz_Clark_Dubrawski_2024, title={Ordinal Programmatic Weak Supervision and Crowdsourcing for Estimating Cognitive States (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27012}, DOI={10.1609/aaai.v37i13.27012}, abstractNote={Crowdsourcing and weak supervision offer methods to efficiently label large datasets. Our work builds on existing weak supervision models to accommodate ordinal target classes, in an effort to recover ground truth from weak, external labels.\nWe define a parameterized factor function and show that our approach improves over other baselines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pradeep, Prakruthi and Boecking, Benedikt and Gisolfi, Nicholas and Kintz, Jacob R. and Clark, Torin K. and Dubrawski, Artur}, year={2024}, month={Jul.}, pages={16304-16305} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27012/26784", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27012", + "pdf_size": 74157, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:Or1QlJZ8btEJ:scholar.google.com/&scioq=Ordinal+Programmatic+Weak+Supervision+and+Crowdsourcing+for+Estimating+Cognitive+States+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "andrew.cmu.edu; ; ; ; ; ", + "email": "andrew.cmu.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0", + "aff_unique_norm": "Carnegie Mellon University;University of Colorado Boulder", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cmu.edu;https://www.colorado.edu", + "aff_unique_abbr": "CMU;CU Boulder", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Boulder", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26732", + "title": "Out-of-Distribution Detection Is Not All You Need", + "track": "aaai special track", + "status": "Technical", + "abstract": "The usage of deep neural networks in safety-critical systems is limited by our ability to guarantee their correct behavior. Runtime monitors are components aiming to identify unsafe predictions and discard them before they can lead to catastrophic consequences. Several recent works on runtime monitoring have focused on out-of-distribution (OOD) detection, i.e., identifying inputs that are different from the training data. In this work, we argue that OOD detection is not a well-suited framework to design efficient runtime monitors and that it is more relevant to evaluate monitors based on their ability to discard incorrect predictions. We call this setting out-of-model-scope detection and discuss the conceptual differences with OOD. We also conduct extensive experiments on popular datasets from the literature to show that studying monitors in the OOD setting can be misleading: 1. very good OOD results can give a false impression of safety, 2. comparison under the OOD setting does not allow identifying the best monitor to detect errors. Finally, we also show that removing erroneous training data samples helps to train better monitors.", + "primary_area": "safe and robust ai", + "author": "Joris Guerin; Kevin Delmas; Raul Ferreira; J\u00e9r\u00e9mie Guiochet", + "authorids": "", + "aff": "Espace-Dev, IRD, Universit\u00e9 de Montpellier, Montpellier, France + LAAS-CNRS, Universit\u00e9 de Toulouse, Toulouse, France; ONERA, Toulouse, France; LAAS-CNRS, Universit\u00e9 de Toulouse, Toulouse, France; LAAS-CNRS, Universit\u00e9 de Toulouse, Toulouse, France", + "bibtex": "@article{Guerin_Delmas_Ferreira_Guiochet_2023, title={Out-of-Distribution Detection Is Not All You Need}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26732}, DOI={10.1609/aaai.v37i12.26732}, abstractNote={The usage of deep neural networks in safety-critical systems is limited by our ability to guarantee their correct behavior. Runtime monitors are components aiming to identify unsafe predictions and discard them before they can lead to catastrophic consequences. Several recent works on runtime monitoring have focused on out-of-distribution (OOD) detection, i.e., identifying inputs that are different from the training data. In this work, we argue that OOD detection is not a well-suited framework to design efficient runtime monitors and that it is more relevant to evaluate monitors based on their ability to discard incorrect predictions. We call this setting out-of-model-scope detection and discuss the conceptual differences with OOD. We also conduct extensive experiments on popular datasets from the literature to show that studying monitors in the OOD setting can be misleading: 1. very good OOD results can give a false impression of safety, 2. comparison under the OOD setting does not allow identifying the best monitor to detect errors. Finally, we also show that removing erroneous training data samples helps to train better monitors.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guerin, Joris and Delmas, Kevin and Ferreira, Raul and Guiochet, J\u00e9r\u00e9mie}, year={2023}, month={Jun.}, pages={14829-14837} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26732/26504", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26732", + "pdf_size": 3606934, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18200706157470174339&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 12, + "aff_domain": "ird.fr;onera.fr;laas.fr;laas.fr", + "email": "ird.fr;onera.fr;laas.fr;laas.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;1;1", + "aff_unique_norm": "Universit\u00e9 de Montpellier;Universit\u00e9 de Toulouse;ONERA", + "aff_unique_dep": "Espace-Dev;LAAS-CNRS;", + "aff_unique_url": "https://www.univ-montp2.fr;https://www.univ-toulouse.fr;https://www.onera.fr", + "aff_unique_abbr": "UM;UT;ONERA", + "aff_campus_unique_index": "0+1;1;1;1", + "aff_campus_unique": "Montpellier;Toulouse", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26444", + "title": "Out-of-Distribution Generalization by Neural-Symbolic Joint Training", + "track": "main", + "status": "Technical", + "abstract": "This paper develops a novel methodology to simultaneously learn a neural network and extract generalized logic rules. Different from prior neural-symbolic methods that require background knowledge and candidate logical rules to be provided, we aim to induce task semantics with minimal priors. This is achieved by a two-step learning framework that iterates between optimizing neural predictions of task labels and searching for a more accurate representation of the hidden task semantics. Notably, supervision works in both directions: (partially) induced task semantics guide the learning of the neural network and induced neural predictions admit an improved semantic representation. We demonstrate that our proposed framework is capable of achieving superior out-of-distribution generalization performance on two tasks: (i) learning multi-digit addition, where it is trained on short sequences of digits and tested on long sequences of digits; (ii) predicting the optimal action in the Tower of Hanoi, where the model is challenged to discover a policy independent of the number of disks in the puzzle.", + "primary_area": "reasoning under uncertainty", + "author": "Anji Liu; Hongming Xu; Guy Van den Broeck; Yitao Liang", + "authorids": "", + "aff": "Computer Science Department, University of California, Los Angeles; Beijing Institute of General Artificial Intelligence (BIGAI) + Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Institute for Artificial Intelligence, Peking University + Beijing Institute of General Artificial Intelligence (BIGAI)", + "bibtex": "@article{Liu_Xu_Van den Broeck_Liang_2023, title={Out-of-Distribution Generalization by Neural-Symbolic Joint Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26444}, DOI={10.1609/aaai.v37i10.26444}, abstractNote={This paper develops a novel methodology to simultaneously learn a neural network and extract generalized logic rules. Different from prior neural-symbolic methods that require background knowledge and candidate logical rules to be provided, we aim to induce task semantics with minimal priors. This is achieved by a two-step learning framework that iterates between optimizing neural predictions of task labels and searching for a more accurate representation of the hidden task semantics. Notably, supervision works in both directions: (partially) induced task semantics guide the learning of the neural network and induced neural predictions admit an improved semantic representation. We demonstrate that our proposed framework is capable of achieving superior out-of-distribution generalization performance on two tasks: (i) learning multi-digit addition, where it is trained on short sequences of digits and tested on long sequences of digits; (ii) predicting the optimal action in the Tower of Hanoi, where the model is challenged to discover a policy independent of the number of disks in the puzzle.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Anji and Xu, Hongming and Van den Broeck, Guy and Liang, Yitao}, year={2023}, month={Jun.}, pages={12252-12259} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26444/26216", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26444", + "pdf_size": 385565, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14926662589736231851&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cs.ucla.edu;bigai.ai;cs.ucla.edu;pku.edu.cn", + "email": "cs.ucla.edu;bigai.ai;cs.ucla.edu;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+0;0;2+1", + "aff_unique_norm": "University of California, Los Angeles;Beijing Institute of General Artificial Intelligence;Peking University", + "aff_unique_dep": "Computer Science Department;;Institute for Artificial Intelligence", + "aff_unique_url": "https://www.ucla.edu;http://www.bigmodel.cn/;http://www.pku.edu.cn", + "aff_unique_abbr": "UCLA;BIGAI;PKU", + "aff_campus_unique_index": "0;0;0;", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;1+0;0;1+1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26151", + "title": "Overcoming Concept Shift in Domain-Aware Settings through Consolidated Internal Distributions", + "track": "main", + "status": "Technical", + "abstract": "We develop an algorithm to improve the predictive performance of a pre-trained model under \\textit{concept shift} without retraining the model from scratch when only unannotated samples of initial concepts are accessible. We model this problem as a domain adaptation problem, where the source domain data is inaccessible during model adaptation. The core idea is based on consolidating the intermediate internal distribution, learned to represent the source domain data, after adapting the model. We provide theoretical analysis and conduct extensive experiments on five benchmark datasets to demonstrate that the proposed method is effective.", + "primary_area": "machine learning iii", + "author": "Mohammad Rostami; Aram Galstyan", + "authorids": "", + "aff": "Information Sciences Institute, University of Southern California; Information Sciences Institute, University of Southern California", + "bibtex": "@article{Rostami_Galstyan_2023, title={Overcoming Concept Shift in Domain-Aware Settings through Consolidated Internal Distributions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26151}, DOI={10.1609/aaai.v37i8.26151}, abstractNote={We develop an algorithm to improve the predictive performance of a pre-trained model under \\textit{concept shift} without retraining the model from scratch when only unannotated samples of initial concepts are accessible. We model this problem as a domain adaptation problem, where the source domain data is inaccessible during model adaptation. The core idea is based on consolidating the intermediate internal distribution, learned to represent the source domain data, after adapting the model. We provide theoretical analysis and conduct extensive experiments on five benchmark datasets to demonstrate that the proposed method is effective.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rostami, Mohammad and Galstyan, Aram}, year={2023}, month={Jun.}, pages={9623-9631} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26151/25923", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26151", + "pdf_size": 3770365, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10386926569772306824&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "isi.edu;isi.edu", + "email": "isi.edu;isi.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "Information Sciences Institute", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25671", + "title": "Overcoming Forgetting in Fine-Grained Urban Flow Inference via Adaptive Knowledge Replay", + "track": "main", + "status": "Technical", + "abstract": "Fine-grained urban flow inference (FUFI) problem aims at inferring the high-resolution flow maps from the coarse-grained ones, which plays an important role in sustainable and economic urban computing and traffic management. Previous models addressed the FUFI problem from spatial constraint, external factors, and memory cost. However, utilizing the new urban flow maps to calibrate the learned model is very challenging due to the \"catastrophic forgetting\" problem and is still under-explored. In this paper, we make the first step in FUFI and present CUFAR -- Continual Urban Flow inference with Adaptive knowledge Replay -- a novel framework for inferring the fine-grained citywide traffic flows. Specifically, (1) we design a spatial-temporal inference network that can extract better flow map features from both local and global levels; (2) then we present an adaptive knowledge replay (AKR) training algorithm to selectively replay the learned knowledge to facilitate the learning process of the model on new knowledge without forgetting. In addition, we also propose a knowledge discriminator to avoid \"negative replaying\" issue introduced by noisy urban flow maps. Extensive experiments on four large-scale real-world FUFI datasets demonstrate that our proposed model consistently outperforms strong baselines and effectively mitigates the forgetting problem. Source code is available at: https://github.com/PattonYu/CUFAR.", + "primary_area": "domain s of application", + "author": "Haoyang Yu; Xovee Xu; Ting Zhong; Fan Zhou", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; University of Electronic Science and Technology of China", + "bibtex": "@article{Yu_Xu_Zhong_Zhou_2023, title={Overcoming Forgetting in Fine-Grained Urban Flow Inference via Adaptive Knowledge Replay}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25671}, DOI={10.1609/aaai.v37i4.25671}, abstractNote={Fine-grained urban flow inference (FUFI) problem aims at inferring the high-resolution flow maps from the coarse-grained ones, which plays an important role in sustainable and economic urban computing and traffic management. Previous models addressed the FUFI problem from spatial constraint, external factors, and memory cost. However, utilizing the new urban flow maps to calibrate the learned model is very challenging due to the "catastrophic forgetting" problem and is still under-explored. In this paper, we make the first step in FUFI and present CUFAR -- Continual Urban Flow inference with Adaptive knowledge Replay -- a novel framework for inferring the fine-grained citywide traffic flows. Specifically, (1) we design a spatial-temporal inference network that can extract better flow map features from both local and global levels; (2) then we present an adaptive knowledge replay (AKR) training algorithm to selectively replay the learned knowledge to facilitate the learning process of the model on new knowledge without forgetting. In addition, we also propose a knowledge discriminator to avoid "negative replaying" issue introduced by noisy urban flow maps. Extensive experiments on four large-scale real-world FUFI datasets demonstrate that our proposed model consistently outperforms strong baselines and effectively mitigates the forgetting problem. Source code is available at: https://github.com/PattonYu/CUFAR.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Haoyang and Xu, Xovee and Zhong, Ting and Zhou, Fan}, year={2023}, month={Jun.}, pages={5393-5401} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25671/25443", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25671", + "pdf_size": 5694772, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8014130646673063458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "outlook.com;live.com; fzhongting;uestc.edu.cn", + "email": "outlook.com;live.com; fzhongting;uestc.edu.cn", + "github": "https://github.com/PattonYu/CUFAR", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uestc.edu.cn", + "aff_unique_abbr": "UESTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25700", + "title": "PAC Learning and Stabilizing Hedonic Games: Towards a Unifying Approach.", + "track": "main", + "status": "Technical", + "abstract": "We study PAC learnability and PAC stabilizability of Hedonic Games (HGs), i.e., efficiently inferring preferences or core-stable partitions from samples. We first expand the known learnability/stabilizability landscape for some of the most prominent HGs classes, providing results for Friends and Enemies Games, Bottom Responsive, and Anonymous HGs. Then, having a broader view in mind, we attempt to shed light on the structural properties leading to learnability/stabilizability, or lack thereof, for specific HGs classes. Along this path, we focus on the fully expressive Hedonic Coalition Nets representation of HGs. We identify two sets of conditions that lead to efficient learnability, and which encompass all of the known positive learnability results. On the side of stability, we reveal that, while the freedom of choosing an ad hoc adversarial distribution is the most obvious hurdle to achieving PAC stability, it is not the only one. First, we show a distribution independent necessary condition for PAC stability. Then, we focus on W-games, where players have individual preferences over other players and evaluate coalitions based on the least preferred member. We prove that these games are PAC stabilizable under the class of bounded distributions, which assign positive probability mass to all coalitions. Finally, we discuss why such a result is not easily extendable to other HGs classes even in this promising scenario. Namely, we establish a purely computational property necessary for achieving PAC stability.", + "primary_area": "game theory and economic paradigms", + "author": "Simone Fioravanti; Michele Flammini; Bojana Kodric; Giovanna Varricchio", + "authorids": "", + "aff": "Gran Sasso Science Institute (GSSI), L\u2019Aquila, Italy + University of Calabria, Rende, Italy; Gran Sasso Science Institute (GSSI), L\u2019Aquila, Italy + University of Calabria, Rende, Italy; Ca\u2019 Foscari University of Venice, Venice, Italy + Gran Sasso Science Institute (GSSI), L\u2019Aquila, Italy; Goethe-Universit \u00a8at, Frankfurt am Main, Germany", + "bibtex": "@article{Fioravanti_Flammini_Kodric_Varricchio_2023, title={PAC Learning and Stabilizing Hedonic Games: Towards a Unifying Approach.}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25700}, DOI={10.1609/aaai.v37i5.25700}, abstractNote={We study PAC learnability and PAC stabilizability of Hedonic Games (HGs), i.e., efficiently inferring preferences or core-stable partitions from samples. We first expand the known learnability/stabilizability landscape for some of the most prominent HGs classes, providing results for Friends and Enemies Games, Bottom Responsive, and Anonymous HGs. Then, having a broader view in mind, we attempt to shed light on the structural properties leading to learnability/stabilizability, or lack thereof, for specific HGs classes. Along this path, we focus on the fully expressive Hedonic Coalition Nets representation of HGs. We identify two sets of conditions that lead to efficient learnability, and which encompass all of the known positive learnability results. On the side of stability, we reveal that, while the freedom of choosing an ad hoc adversarial distribution is the most obvious hurdle to achieving PAC stability, it is not the only one. First, we show a distribution independent necessary condition for PAC stability. Then, we focus on W-games, where players have individual preferences over other players and evaluate coalitions based on the least preferred member. We prove that these games are PAC stabilizable under the class of bounded distributions, which assign positive probability mass to all coalitions. Finally, we discuss why such a result is not easily extendable to other HGs classes even in this promising scenario. Namely, we establish a purely computational property necessary for achieving PAC stability.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fioravanti, Simone and Flammini, Michele and Kodric, Bojana and Varricchio, Giovanna}, year={2023}, month={Jun.}, pages={5641-5648} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25700/25472", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25700", + "pdf_size": 160649, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12409300334843753803&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "gssi.it;gssi.it;unive.it;em.uni-frankfurt.de", + "email": "gssi.it;gssi.it;unive.it;em.uni-frankfurt.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2+0;3", + "aff_unique_norm": "Gran Sasso Science Institute;University of Calabria;Ca\u2019 Foscari University of Venice;Goethe University Frankfurt", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.gssi.it;https://www.unical.it;https://www.unive.it;https://www.uni-frankfurt.de", + "aff_unique_abbr": "GSSI;;Ca\u2019 Foscari;GU Frankfurt", + "aff_campus_unique_index": "0+1;0+1;2+0;3", + "aff_campus_unique": "L\u2019Aquila;Rende;Venice;Frankfurt am Main", + "aff_country_unique_index": "0+0;0+0;0+0;1", + "aff_country_unique": "Italy;Germany" + }, + { + "id": "article-26873", + "title": "PARCS: A Deployment-Oriented AI System for Robust Parcel-Level Cropland Segmentation of Satellite Images", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Cropland segmentation of satellite images is an essential basis for crop area and yield estimation tasks in the remote sensing and computer vision interdisciplinary community. Instead of common pixel-level segmentation results with salt-and-pepper effects, a parcel-level output conforming to human recognition is required according to the clients' needs during the model deployment. However, leveraging CNN-based models requires fine-grained parcel-level labels, which is an unacceptable annotation burden. To cure these practical pain points, in this paper, we present PARCS, a holistic deployment-oriented AI system for PARcel-level Cropland Segmentation. By consolidating multi-disciplinary knowledge, PARCS has two algorithm branches. The first branch performs pixel-level crop segmentation by learning from limited labeled pixel samples with an active learning strategy to avoid parcel-level annotation costs. The second branch aims at generating the parcel regions without a learning procedure. The final parcel-level segmentation result is achieved by integrating the outputs of these two branches in tandem. The robust effectiveness of PARCS is demonstrated by its outstanding performance on public and in-house datasets (an overall accuracy of 85.3% and an mIoU of 61.7% on the public PASTIS dataset, and an mIoU of 65.16% on the in-house dataset). We also include subjective feedback from clients and discuss the lessons learned from deployment.", + "primary_area": "nnovative inter disciplinary ai integration", + "author": "Chen Du; Yiwei Wang; Zhicheng Yang; Hang Zhou; Mei Han; Jui-Hsin Lai", + "authorids": "", + "aff": "PAII Inc., Palo Alto, CA, USA; University of Science and Technology of China, Hefei, Anhui, China + Ping An Technology, Shenzhen, Guangdong, China; PAII Inc., Palo Alto, CA, USA; PAII Inc., Palo Alto, CA, USA; PAII Inc., Palo Alto, CA, USA; PAII Inc., Palo Alto, CA, USA", + "bibtex": "@article{Du_Wang_Yang_Zhou_Han_Lai_2024, title={PARCS: A Deployment-Oriented AI System for Robust Parcel-Level Cropland Segmentation of Satellite Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26873}, DOI={10.1609/aaai.v37i13.26873}, abstractNote={Cropland segmentation of satellite images is an essential basis for crop area and yield estimation tasks in the remote sensing and computer vision interdisciplinary community. Instead of common pixel-level segmentation results with salt-and-pepper effects, a parcel-level output conforming to human recognition is required according to the clients\u2019 needs during the model deployment. However, leveraging CNN-based models requires fine-grained parcel-level labels, which is an unacceptable annotation burden. To cure these practical pain points, in this paper, we present PARCS, a holistic deployment-oriented AI system for PARcel-level Cropland Segmentation. By consolidating multi-disciplinary knowledge, PARCS has two algorithm branches. The first branch performs pixel-level crop segmentation by learning from limited labeled pixel samples with an active learning strategy to avoid parcel-level annotation costs. The second branch aims at generating the parcel regions without a learning procedure. The final parcel-level segmentation result is achieved by integrating the outputs of these two branches in tandem. The robust effectiveness of PARCS is demonstrated by its outstanding performance on public and in-house datasets (an overall accuracy of 85.3% and an mIoU of 61.7% on the public PASTIS dataset, and an mIoU of 65.16% on the in-house dataset). We also include subjective feedback from clients and discuss the lessons learned from deployment.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Chen and Wang, Yiwei and Yang, Zhicheng and Zhou, Hang and Han, Mei and Lai, Jui-Hsin}, year={2024}, month={Jul.}, pages={15775-15781} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26873/26645", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26873", + "pdf_size": 2317318, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4780784033965140524&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com; ;gmail.com; ; ;gmail.com", + "email": "gmail.com; ;gmail.com; ; ;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+2;0;0;0;0", + "aff_unique_norm": "PAII Inc.;University of Science and Technology of China;Ping An Technology", + "aff_unique_dep": ";;", + "aff_unique_url": ";http://www.ustc.edu.cn;https://www.pingan.com", + "aff_unique_abbr": ";USTC;Ping An", + "aff_campus_unique_index": "0;1+2;0;0;0;0", + "aff_campus_unique": "Palo Alto;Hefei;Shenzhen", + "aff_country_unique_index": "0;1+1;0;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "article-25491", + "title": "PASS: Patch Automatic Skip Scheme for Efficient Real-Time Video Perception on Edge Devices", + "track": "main", + "status": "Technical", + "abstract": "Real-time video perception tasks are often challenging over the resource-constrained edge devices due to the concerns of accuracy drop and hardware overhead, where saving computations is the key to performance improvement. Existing methods either rely on domain-specific neural chips or priorly searched models, which require specialized optimization according to different task properties. In this work, we propose a general and task-independent Patch Automatic Skip Scheme (PASS), a novel end-to-end learning pipeline to support diverse video perception settings by decoupling acceleration and tasks. The gist is to capture the temporal similarity across video frames and skip the redundant computations at patch level, where the patch is a non-overlapping square block in visual. PASS equips each convolution layer with a learnable gate to selectively determine which patches could be safely skipped without degrading model accuracy. As to each layer, a desired gate needs to make flexible skip decisions based on intermediate features without any annotations, which cannot be achieved by conventional supervised learning paradigm. To address this challenge, we are the first to construct a tough self-supervisory procedure for optimizing these gates, which learns to extract contrastive representation, i.e., distinguishing similarity and difference, from frame sequence. These high-capacity gates can serve as a plug-and-play module for convolutional neural network (CNN) backbones to implement patch-skippable architectures, and automatically generate proper skip strategy to accelerate different video-based downstream tasks, e.g., outperforming the state-of-the-art MobileHumanPose (MHP) in 3D pose estimation and FairMOT in multiple object tracking, by up to 9.43 times and 12.19 times speedups, respectively. By directly processing the raw data of frames, PASS can generalize to real-time video streams on commodity edge devices, e.g., NVIDIA Jetson Nano, with efficient performance in realistic deployment.", + "primary_area": "computer vision iii", + "author": "Qihua Zhou; Song Guo; Jun Pan; Jiacheng Liang; Zhenda Xu; Jingren Zhou", + "authorids": "", + "aff": "The Hong Kong Polytechnic University; The Hong Kong Polytechnic University; The Hong Kong Polytechnic University; Pennsylvania State University; The Hong Kong Polytechnic University; Alibaba Group", + "bibtex": "@article{Zhou_Guo_Pan_Liang_Xu_Zhou_2023, title={PASS: Patch Automatic Skip Scheme for Efficient Real-Time Video Perception on Edge Devices}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25491}, DOI={10.1609/aaai.v37i3.25491}, abstractNote={Real-time video perception tasks are often challenging over the resource-constrained edge devices due to the concerns of accuracy drop and hardware overhead, where saving computations is the key to performance improvement. Existing methods either rely on domain-specific neural chips or priorly searched models, which require specialized optimization according to different task properties. In this work, we propose a general and task-independent Patch Automatic Skip Scheme (PASS), a novel end-to-end learning pipeline to support diverse video perception settings by decoupling acceleration and tasks. The gist is to capture the temporal similarity across video frames and skip the redundant computations at patch level, where the patch is a non-overlapping square block in visual. PASS equips each convolution layer with a learnable gate to selectively determine which patches could be safely skipped without degrading model accuracy. As to each layer, a desired gate needs to make flexible skip decisions based on intermediate features without any annotations, which cannot be achieved by conventional supervised learning paradigm. To address this challenge, we are the first to construct a tough self-supervisory procedure for optimizing these gates, which learns to extract contrastive representation, i.e., distinguishing similarity and difference, from frame sequence. These high-capacity gates can serve as a plug-and-play module for convolutional neural network (CNN) backbones to implement patch-skippable architectures, and automatically generate proper skip strategy to accelerate different video-based downstream tasks, e.g., outperforming the state-of-the-art MobileHumanPose (MHP) in 3D pose estimation and FairMOT in multiple object tracking, by up to 9.43 times and 12.19 times speedups, respectively. By directly processing the raw data of frames, PASS can generalize to real-time video streams on commodity edge devices, e.g., NVIDIA Jetson Nano, with efficient performance in realistic deployment.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Qihua and Guo, Song and Pan, Jun and Liang, Jiacheng and Xu, Zhenda and Zhou, Jingren}, year={2023}, month={Jun.}, pages={3787-3795} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25491/25263", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25491", + "pdf_size": 2076735, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16025037008608187124&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "comp.polyu.edu.hk;polyu.edu.hk;comp.polyu.edu.hk;psu.edu;comp.polyu.edu.hk;alibaba-inc.com", + "email": "comp.polyu.edu.hk;polyu.edu.hk;comp.polyu.edu.hk;psu.edu;comp.polyu.edu.hk;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;2", + "aff_unique_norm": "The Hong Kong Polytechnic University;Pennsylvania State University;Alibaba Group", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.psu.edu;https://www.alibaba.com", + "aff_unique_abbr": "PolyU;PSU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25177", + "title": "PATRON: Perspective-Aware Multitask Model for Referring Expression Grounding Using Embodied Multimodal Cues", + "track": "main", + "status": "Technical", + "abstract": "Humans naturally use referring expressions with verbal utterances and nonverbal gestures to refer to objects and events. As these referring expressions can be interpreted differently from the speaker's or the observer's perspective, people effectively decide on the perspective in comprehending the expressions. However, existing models do not explicitly learn perspective grounding, which often causes the models to perform poorly in understanding embodied referring expressions. To make it exacerbate, these models are often trained on datasets collected in non-embodied settings without nonverbal gestures and curated from an exocentric perspective. To address these issues, in this paper, we present a perspective-aware multitask learning model, called PATRON, for relation and object grounding tasks in embodied settings by utilizing verbal utterances and nonverbal cues. In PATRON, we have developed a guided fusion approach, where a perspective grounding task guides the relation and object grounding task. Through this approach, PATRON learns disentangled task-specific and task-guidance representations, where task-guidance representations guide the extraction of salient multimodal features to ground the relation and object accurately. Furthermore, we have curated a synthetic dataset of embodied referring expressions with multimodal cues, called CAESAR-PRO. The experimental results suggest that PATRON outperforms the evaluated state-of-the-art visual-language models. Additionally, the results indicate that learning to ground perspective helps machine learning models to improve the performance of the relation and object grounding task. Furthermore, the insights from the extensive experimental results and the proposed dataset will enable researchers to evaluate visual-language models' effectiveness in understanding referring expressions in other embodied settings.", + "primary_area": "computer vision i", + "author": "Md Mofijul Islam; Alexi Gladstone; Tariq Iqbal", + "authorids": "", + "aff": "School of Engineering and Applied Science, University of Virginia; School of Engineering and Applied Science, University of Virginia; School of Engineering and Applied Science, University of Virginia", + "bibtex": "@article{Islam_Gladstone_Iqbal_2023, title={PATRON: Perspective-Aware Multitask Model for Referring Expression Grounding Using Embodied Multimodal Cues}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25177}, DOI={10.1609/aaai.v37i1.25177}, abstractNote={Humans naturally use referring expressions with verbal utterances and nonverbal gestures to refer to objects and events. As these referring expressions can be interpreted differently from the speaker\u2019s or the observer\u2019s perspective, people effectively decide on the perspective in comprehending the expressions. However, existing models do not explicitly learn perspective grounding, which often causes the models to perform poorly in understanding embodied referring expressions. To make it exacerbate, these models are often trained on datasets collected in non-embodied settings without nonverbal gestures and curated from an exocentric perspective. To address these issues, in this paper, we present a perspective-aware multitask learning model, called PATRON, for relation and object grounding tasks in embodied settings by utilizing verbal utterances and nonverbal cues. In PATRON, we have developed a guided fusion approach, where a perspective grounding task guides the relation and object grounding task. Through this approach, PATRON learns disentangled task-specific and task-guidance representations, where task-guidance representations guide the extraction of salient multimodal features to ground the relation and object accurately. Furthermore, we have curated a synthetic dataset of embodied referring expressions with multimodal cues, called CAESAR-PRO. The experimental results suggest that PATRON outperforms the evaluated state-of-the-art visual-language models. Additionally, the results indicate that learning to ground perspective helps machine learning models to improve the performance of the relation and object grounding task. Furthermore, the insights from the extensive experimental results and the proposed dataset will enable researchers to evaluate visual-language models\u2019 effectiveness in understanding referring expressions in other embodied settings.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Islam, Md Mofijul and Gladstone, Alexi and Iqbal, Tariq}, year={2023}, month={Jun.}, pages={971-979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25177/24949", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25177", + "pdf_size": 3394631, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6311642640349324551&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "virginia.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "School of Engineering and Applied Science", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Charlottesville", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25556", + "title": "PDFormer: Propagation Delay-Aware Dynamic Long-Range Transformer for Traffic Flow Prediction", + "track": "main", + "status": "Technical", + "abstract": "As a core technology of Intelligent Transportation System, traffic flow prediction has a wide range of applications. The fundamental challenge in traffic flow prediction is to effectively model the complex spatial-temporal dependencies in traffic data. Spatial-temporal Graph Neural Network (GNN) models have emerged as one of the most promising methods to solve this problem. However, GNN-based models have three major limitations for traffic prediction: i) Most methods model spatial dependencies in a static manner, which limits the ability to learn dynamic urban traffic patterns; ii) Most methods only consider short-range spatial information and are unable to capture long-range spatial dependencies; iii) These methods ignore the fact that the propagation of traffic conditions between locations has a time delay in traffic systems. To this end, we propose a novel Propagation Delay-aware dynamic long-range transFormer, namely PDFormer, for accurate traffic flow prediction. Specifically, we design a spatial self-attention module to capture the dynamic spatial dependencies. Then, two graph masking matrices are introduced to highlight spatial dependencies from short- and long-range views. Moreover, a traffic delay-aware feature transformation module is proposed to empower PDFormer with the capability of explicitly modeling the time delay of spatial information propagation. Extensive experimental results on six real-world public traffic datasets show that our method can not only achieve state-of-the-art performance but also exhibit competitive computational efficiency. Moreover, we visualize the learned spatial-temporal attention map to make our model highly interpretable.", + "primary_area": "data mining and knowledge management", + "author": "Jiawei Jiang; Chengkai Han; Wayne Xin Zhao; Jingyuan Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, Beijing, China+Pengcheng Laboratory, Shenzhen, China+School of Economics and Management, Beihang University, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China+Pengcheng Laboratory, Shenzhen, China+School of Economics and Management, Beihang University, Beijing, China; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China+Pengcheng Laboratory, Shenzhen, China+School of Economics and Management, Beihang University, Beijing, China", + "bibtex": "@article{Jiang_Han_Zhao_Wang_2023, title={PDFormer: Propagation Delay-Aware Dynamic Long-Range Transformer for Traffic Flow Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25556}, DOI={10.1609/aaai.v37i4.25556}, abstractNote={As a core technology of Intelligent Transportation System, traffic flow prediction has a wide range of applications. The fundamental challenge in traffic flow prediction is to effectively model the complex spatial-temporal dependencies in traffic data. Spatial-temporal Graph Neural Network (GNN) models have emerged as one of the most promising methods to solve this problem. However, GNN-based models have three major limitations for traffic prediction: i) Most methods model spatial dependencies in a static manner, which limits the ability to learn dynamic urban traffic patterns; ii) Most methods only consider short-range spatial information and are unable to capture long-range spatial dependencies; iii) These methods ignore the fact that the propagation of traffic conditions between locations has a time delay in traffic systems. To this end, we propose a novel Propagation Delay-aware dynamic long-range transFormer, namely PDFormer, for accurate traffic flow prediction. Specifically, we design a spatial self-attention module to capture the dynamic spatial dependencies. Then, two graph masking matrices are introduced to highlight spatial dependencies from short- and long-range views. Moreover, a traffic delay-aware feature transformation module is proposed to empower PDFormer with the capability of explicitly modeling the time delay of spatial information propagation. Extensive experimental results on six real-world public traffic datasets show that our method can not only achieve state-of-the-art performance but also exhibit competitive computational efficiency. Moreover, we visualize the learned spatial-temporal attention map to make our model highly interpretable.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Jiawei and Han, Chengkai and Zhao, Wayne Xin and Wang, Jingyuan}, year={2023}, month={Jun.}, pages={4365-4373} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25556/25328", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25556", + "pdf_size": 5769022, + "gs_citation": 362, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7850349405513309497&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;ruc.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;ruc.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+0;0+1+0;2;0+1+0", + "aff_unique_norm": "Beihang University;Pengcheng Laboratory;Renmin University of China", + "aff_unique_dep": "School of Computer Science and Engineering;;Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.buaa.edu.cn;;http://www.ruc.edu.cn", + "aff_unique_abbr": "BUAA;;RUC", + "aff_campus_unique_index": "0+1+0;0+1+0;0;0+1+0", + "aff_campus_unique": "Beijing;Shenzhen", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25295", + "title": "PDRF: Progressively Deblurring Radiance Field for Fast Scene Reconstruction from Blurry Images", + "track": "main", + "status": "Technical", + "abstract": "We present Progressively Deblurring Radiance Field (PDRF), a novel approach to efficiently reconstruct high quality radiance fields from blurry images. While current State-of-The-Art (SoTA) scene reconstruction methods achieve photo-realistic renderings from clean source views, their performances suffer when the source views are affected by blur, which is commonly observed in the wild. Previous deblurring methods either do not account for 3D geometry, or are computationally intense. To addresses these issues, PDRF uses a progressively deblurring scheme for radiance field modeling, which can accurately model blur with 3D scene context. PDRF further uses an efficient importance sampling scheme that results in fast scene optimization. We perform extensive experiments and show that PDRF is 15X faster than previous SoTA while achieving better performance on both synthetic and real scenes.", + "primary_area": "computer vision ii", + "author": "Cheng Peng; Rama Chellappa", + "authorids": "", + "aff": "Johns Hopkins University; Johns Hopkins University", + "bibtex": "@article{Peng_Chellappa_2023, title={PDRF: Progressively Deblurring Radiance Field for Fast Scene Reconstruction from Blurry Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25295}, DOI={10.1609/aaai.v37i2.25295}, abstractNote={We present Progressively Deblurring Radiance Field (PDRF), a novel approach to efficiently reconstruct high quality radiance fields from blurry images. While current State-of-The-Art (SoTA) scene reconstruction methods achieve photo-realistic renderings from clean source views, their performances suffer when the source views are affected by blur, which is commonly observed in the wild. Previous deblurring methods either do not account for 3D geometry, or are computationally intense. To addresses these issues, PDRF uses a progressively deblurring scheme for radiance field modeling, which can accurately model blur with 3D scene context. PDRF further uses an efficient importance sampling scheme that results in fast scene optimization. We perform extensive experiments and show that PDRF is 15X faster than previous SoTA while achieving better performance on both synthetic and real scenes.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Cheng and Chellappa, Rama}, year={2023}, month={Jun.}, pages={2029-2037} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25295/25067", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25295", + "pdf_size": 4419271, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3892559906216498409&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "jhu.edu; ", + "email": "jhu.edu; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25648", + "title": "PEN: Prediction-Explanation Network to Forecast Stock Price Movement with Better Explainability", + "track": "main", + "status": "Technical", + "abstract": "Nowadays explainability in stock price movement prediction is attracting increasing attention in banks, hedge funds and asset managers, primarily due to audit or regulatory reasons. Text data such as financial news and social media posts can be part of the reasons for stock price movement. To this end, we propose a novel framework of Prediction-Explanation Network (PEN) jointly modeling text streams and price streams with alignment. The key component of the PEN model is an shared representation learning module that learns which texts are possibly associated with the stock price movement by modeling the interaction between the text data and stock price data with a salient vector characterizing their correlation. In this way, the PEN model is able to predict the stock price movement by identifying and utilizing abundant messages while on the other hand, the selected text messages also explain the stock price movement. Experiments on real-world datasets demonstrate that we are able to kill two birds with one stone: in terms of accuracy, the proposed PEN model outperforms the state-of-art baseline; on explainability, the PEN model are demonstrated to be far superior to attention mechanism, capable of picking out the crucial texts with a very high confidence.", + "primary_area": "domain s of application", + "author": "Shuqi Li; Weiheng Liao; Yuhan Chen; Rui Yan", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence (GSAI), Renmin University of China; MADE by DATA; Gaoling School of Artificial Intelligence (GSAI), Renmin University of China; Gaoling School of Artificial Intelligence (GSAI), Renmin University of China + Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education", + "bibtex": "@article{Li_Liao_Chen_Yan_2023, title={PEN: Prediction-Explanation Network to Forecast Stock Price Movement with Better Explainability}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25648}, DOI={10.1609/aaai.v37i4.25648}, abstractNote={Nowadays explainability in stock price movement prediction is attracting increasing attention in banks, hedge funds and asset managers, primarily due to audit or regulatory reasons. Text data such as financial news and social media posts can be part of the reasons for stock price movement. To this end, we propose a novel framework of Prediction-Explanation Network (PEN) jointly modeling text streams and price streams with alignment. The key component of the PEN model is an shared representation learning module that learns which texts are possibly associated with the stock price movement by modeling the interaction between the text data and stock price data with a salient vector characterizing their correlation. In this way, the PEN model is able to predict the stock price movement by identifying and utilizing abundant messages while on the other hand, the selected text messages also explain the stock price movement. Experiments on real-world datasets demonstrate that we are able to kill two birds with one stone: in terms of accuracy, the proposed PEN model outperforms the state-of-art baseline; on explainability, the PEN model are demonstrated to be far superior to attention mechanism, capable of picking out the crucial texts with a very high confidence.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shuqi and Liao, Weiheng and Chen, Yuhan and Yan, Rui}, year={2023}, month={Jun.}, pages={5187-5194} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25648/25420", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25648", + "pdf_size": 2204563, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18273836381485498546&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ruc.edu.cn;gmail.com;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;gmail.com;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "https://www.madebydata.com", + "author_num": 4, + "aff_unique_index": "0;1;0;0+2", + "aff_unique_norm": "Renmin University of China;MADE by DATA;Ministry of Education", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;;Engineering Research Center of Next-Generation Intelligent Search and Recommendation", + "aff_unique_url": "http://www.ruc.edu.cn;;", + "aff_unique_abbr": "RUC;;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-26542", + "title": "PGSS: Pitch-Guided Speech Separation", + "track": "main", + "status": "Technical", + "abstract": "Monaural speech separation aims to separate concurrent speakers from a single-microphone mixture recording. Inspired by the effect of pitch priming in auditory scene analysis (ASA) mechanisms, a novel pitch-guided speech separation framework is proposed in this work. The prominent advantage of this framework is that both the permutation problem and the unknown speaker number problem existing in general models can be avoided by using pitch contours as the primary means to guide the target speaker. In addition, adversarial training is applied, instead of a traditional time-frequency mask, to improve the perceptual quality of separated speech. Specifically, the proposed framework can be divided into two phases: pitch extraction and speech separation. The former aims to extract pitch contour candidates for each speaker from the mixture, modeling the bottom-up process in ASA mechanisms. Any pitch contour can be selected as the condition in the second phase to separate the corresponding speaker, where a conditional generative adversarial network (CGAN) is applied. The second phase models the effect of pitch priming in ASA. Experiments on the WSJ0-2mix corpus reveal that the proposed approaches can achieve higher pitch extraction accuracy and better separation performance, compared to the baseline models, and have the potential to be applied to SOTA architectures.", + "primary_area": "speech natural language processing", + "author": "Xiang Li; Yiwen Wang; Yifan Sun; Xihong Wu; Jing Chen", + "authorids": "", + "aff": "School of Intelligence Science and Technology, Peking University, Beijing, China; School of Intelligence Science and Technology, Peking University, Beijing, China; School of Intelligence Science and Technology, Peking University, Beijing, China; School of Intelligence Science and Technology, Peking University, Beijing, China; School of Intelligence Science and Technology, Peking University, Beijing, China", + "bibtex": "@article{Li_Wang_Sun_Wu_Chen_2023, title={PGSS: Pitch-Guided Speech Separation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26542}, DOI={10.1609/aaai.v37i11.26542}, abstractNote={Monaural speech separation aims to separate concurrent speakers from a single-microphone mixture recording. Inspired by the effect of pitch priming in auditory scene analysis (ASA) mechanisms, a novel pitch-guided speech separation framework is proposed in this work. The prominent advantage of this framework is that both the permutation problem and the unknown speaker number problem existing in general models can be avoided by using pitch contours as the primary means to guide the target speaker. In addition, adversarial training is applied, instead of a traditional time-frequency mask, to improve the perceptual quality of separated speech. Specifically, the proposed framework can be divided into two phases: pitch extraction and speech separation. The former aims to extract pitch contour candidates for each speaker from the mixture, modeling the bottom-up process in ASA mechanisms. Any pitch contour can be selected as the condition in the second phase to separate the corresponding speaker, where a conditional generative adversarial network (CGAN) is applied. The second phase models the effect of pitch priming in ASA. Experiments on the WSJ0-2mix corpus reveal that the proposed approaches can achieve higher pitch extraction accuracy and better separation performance, compared to the baseline models, and have the potential to be applied to SOTA architectures.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiang and Wang, Yiwen and Sun, Yifan and Wu, Xihong and Chen, Jing}, year={2023}, month={Jun.}, pages={13130-13138} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26542/26314", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26542", + "pdf_size": 872977, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4024257243934608358&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;cis.pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;cis.pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Intelligence Science and Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "Peking U", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26076", + "title": "PINAT: A Permutation INvariance Augmented Transformer for NAS Predictor", + "track": "main", + "status": "Technical", + "abstract": "Time-consuming performance evaluation is the bottleneck of traditional Neural Architecture Search (NAS) methods. Predictor-based NAS can speed up performance evaluation by directly predicting performance, rather than training a large number of sub-models and then validating their performance. Most predictor-based NAS approaches use a proxy dataset to train model-based predictors efficiently but suffer from performance degradation and generalization problems. We attribute these problems to the poor abilities of existing predictors to character the sub-models' structure, specifically the topology information extraction and the node feature representation of the input graph data. To address these problems, we propose a Transformer-like NAS predictor PINAT, consisting of a Permutation INvariance Augmentation module serving as both token embedding layer and self-attention head, as well as a Laplacian matrix to be the positional encoding. Our design produces more representative features of the encoded architecture and outperforms state-of-the-art NAS predictors on six search spaces: NAS-Bench-101, NAS-Bench-201, DARTS, ProxylessNAS, PPI, and ModelNet. The code is available at https://github.com/ShunLu91/PINAT.", + "primary_area": "machine learning ii", + "author": "Shun Lu; Yu Hu; Peihao Wang; Yan Han; Jianchao Tan; Jixiang Li; Sen Yang; Ji Liu", + "authorids": "", + "aff": "Research Center for Intelligent Computing Systems, Institute of Computing Technology, Chinese Academy of Sciences+School of Computer Science and Technology, University of Chinese Academy of Sciences; Research Center for Intelligent Computing Systems, Institute of Computing Technology, Chinese Academy of Sciences+School of Computer Science and Technology, University of Chinese Academy of Sciences; University of Texas at Austin; University of Texas at Austin; Kuaishou Technology.; Kuaishou Technology.; Snap Inc.; Meta Platforms, Inc.", + "bibtex": "@article{Lu_Hu_Wang_Han_Tan_Li_Yang_Liu_2023, title={PINAT: A Permutation INvariance Augmented Transformer for NAS Predictor}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26076}, DOI={10.1609/aaai.v37i7.26076}, abstractNote={Time-consuming performance evaluation is the bottleneck of traditional Neural Architecture Search (NAS) methods. Predictor-based NAS can speed up performance evaluation by directly predicting performance, rather than training a large number of sub-models and then validating their performance. Most predictor-based NAS approaches use a proxy dataset to train model-based predictors efficiently but suffer from performance degradation and generalization problems. We attribute these problems to the poor abilities of existing predictors to character the sub-models\u2019 structure, specifically the topology information extraction and the node feature representation of the input graph data. To address these problems, we propose a Transformer-like NAS predictor PINAT, consisting of a Permutation INvariance Augmentation module serving as both token embedding layer and self-attention head, as well as a Laplacian matrix to be the positional encoding. Our design produces more representative features of the encoded architecture and outperforms state-of-the-art NAS predictors on six search spaces: NAS-Bench-101, NAS-Bench-201, DARTS, ProxylessNAS, PPI, and ModelNet. The code is available at https://github.com/ShunLu91/PINAT.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Shun and Hu, Yu and Wang, Peihao and Han, Yan and Tan, Jianchao and Li, Jixiang and Yang, Sen and Liu, Ji}, year={2023}, month={Jun.}, pages={8957-8965} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26076/25848", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26076", + "pdf_size": 222454, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1100718635952607320&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "flushun19s;ict.ac.cn; fpeihaowang;utexas.edu; fjianchaotan;kuaishou.com;snap.com;gmail.com", + "email": "flushun19s;ict.ac.cn; fpeihaowang;utexas.edu; fjianchaotan;kuaishou.com;snap.com;gmail.com", + "github": "https://github.com/ShunLu91/PINAT", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;2;2;3;3;4;5", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;University of Texas at Austin;Kuaishou Technology;Snap Inc.;Meta Platforms, Inc.", + "aff_unique_dep": "Institute of Computing Technology;School of Computer Science and Technology;;;;", + "aff_unique_url": "http://www.cas.ac.cn;http://www.ucas.ac.cn;https://www.utexas.edu;https://www.kuaishou.com;https://www.snapinc.com;https://www.meta.com", + "aff_unique_abbr": "CAS;UCAS;UT Austin;Kuaishou;Snap;Meta", + "aff_campus_unique_index": ";;1;1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0+0;0+0;1;1;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25988", + "title": "PIXEL: Physics-Informed Cell Representations for Fast and Accurate PDE Solvers", + "track": "main", + "status": "Technical", + "abstract": "With the increases in computational power and advances in machine learning, data-driven learning-based methods have gained significant attention in solving PDEs. Physics-informed neural networks (PINNs) have recently emerged and succeeded in various forward and inverse PDE problems thanks to their excellent properties, such as flexibility, mesh-free solutions, and unsupervised training. However, their slower convergence speed and relatively inaccurate solutions often limit their broader applicability in many science and engineering domains. This paper proposes a new kind of data-driven PDEs solver, physics-informed cell representations (PIXEL), elegantly combining classical numerical methods and learning-based approaches. We adopt a grid structure from the numerical methods to improve accuracy and convergence speed and overcome the spectral bias presented in PINNs. Moreover, the proposed method enjoys the same benefits in PINNs, e.g., using the same optimization frameworks to solve both forward and inverse PDE problems and readily enforcing PDE constraints with modern automatic differentiation techniques. We provide experimental results on various challenging PDEs that the original PINNs have struggled with and show that PIXEL achieves fast convergence speed and high accuracy. Project page: https://namgyukang.github.io/PIXEL/", + "primary_area": "machine learning ii", + "author": "Namgyu Kang; Byeonghyeon Lee; Youngjoon Hong; Seok-Bae Yun; Eunbyung Park", + "authorids": "", + "aff": "Department of Artificial Intelligence, Sungkyunkwan University; Department of Artificial Intelligence, Sungkyunkwan University; Department of Mathematics, Sungkyunkwan University; Department of Mathematics, Sungkyunkwan University; Department of Electrical and Computer Engineering, Sungkyunkwan University + Department of Artificial Intelligence, Sungkyunkwan University", + "bibtex": "@article{Kang_Lee_Hong_Yun_Park_2023, title={PIXEL: Physics-Informed Cell Representations for Fast and Accurate PDE Solvers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25988}, DOI={10.1609/aaai.v37i7.25988}, abstractNote={With the increases in computational power and advances in machine learning, data-driven learning-based methods have gained significant attention in solving PDEs. Physics-informed neural networks (PINNs) have recently emerged and succeeded in various forward and inverse PDE problems thanks to their excellent properties, such as flexibility, mesh-free solutions, and unsupervised training. However, their slower convergence speed and relatively inaccurate solutions often limit their broader applicability in many science and engineering domains. This paper proposes a new kind of data-driven PDEs solver, physics-informed cell representations (PIXEL), elegantly combining classical numerical methods and learning-based approaches. We adopt a grid structure from the numerical methods to improve accuracy and convergence speed and overcome the spectral bias presented in PINNs. Moreover, the proposed method enjoys the same benefits in PINNs, e.g., using the same optimization frameworks to solve both forward and inverse PDE problems and readily enforcing PDE constraints with modern automatic differentiation techniques. We provide experimental results on various challenging PDEs that the original PINNs have struggled with and show that PIXEL achieves fast convergence speed and high accuracy. Project page: https://namgyukang.github.io/PIXEL/}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Namgyu and Lee, Byeonghyeon and Hong, Youngjoon and Yun, Seok-Bae and Park, Eunbyung}, year={2023}, month={Jun.}, pages={8186-8194} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25988/25760", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25988", + "pdf_size": 11739825, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1926492097660979827&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 8, + "aff_domain": "skku.edu;skku.edu;skku.edu;skku.edu;skku.edu", + "email": "skku.edu;skku.edu;skku.edu;skku.edu;skku.edu", + "github": "", + "project": "https://namgyukang.github.io/PIXEL/", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+0", + "aff_unique_norm": "Sungkyunkwan University", + "aff_unique_dep": "Department of Artificial Intelligence", + "aff_unique_url": "https://www.sungkyunkwan.ac.kr", + "aff_unique_abbr": "SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26750", + "title": "PLMmark: A Secure and Robust Black-Box Watermarking Framework for Pre-trained Language Models", + "track": "aaai special track", + "status": "Technical", + "abstract": "The huge training overhead, considerable commercial value, and various potential security risks make it urgent to protect the intellectual property (IP) of Deep Neural Networks (DNNs). DNN watermarking has become a plausible method to meet this need. However, most of the existing watermarking schemes focus on image classification tasks. The schemes designed for the textual domain lack security and reliability. Moreover, how to protect the IP of widely-used pre-trained language models (PLMs) remains a blank. \nTo fill these gaps, we propose PLMmark, the first secure and robust black-box watermarking framework for PLMs. It consists of three phases: (1) In order to generate watermarks that contain owners\u2019 identity information, we propose a novel encoding method to establish a strong link between a digital signature and trigger words by leveraging the original vocabulary tables of PLMs. Combining this with public key cryptography ensures the security of our scheme. (2) To embed robust, task-agnostic, and highly transferable watermarks in PLMs, we introduce a supervised contrastive loss to deviate the output representations of trigger sets from that of clean samples. In this way, the watermarked models will respond to the trigger sets anomaly and thus can identify the ownership. (3) To make the model ownership verification results reliable, we perform double verification, which guarantees the unforgeability of ownership. Extensive experiments on text classification tasks demonstrate that the embedded watermark can transfer to all the downstream tasks and can be effectively extracted and verified. The watermarking scheme is robust to watermark removing attacks (fine-pruning and re-initializing) and is secure enough to resist forgery attacks.", + "primary_area": "safe and robust ai", + "author": "Peixuan Li; Pengzhou Cheng; Fangqi Li; Wei Du; Haodong Zhao; Gongshen Liu", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Li_Cheng_Li_Du_Zhao_Liu_2023, title={PLMmark: A Secure and Robust Black-Box Watermarking Framework for Pre-trained Language Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26750}, DOI={10.1609/aaai.v37i12.26750}, abstractNote={The huge training overhead, considerable commercial value, and various potential security risks make it urgent to protect the intellectual property (IP) of Deep Neural Networks (DNNs). DNN watermarking has become a plausible method to meet this need. However, most of the existing watermarking schemes focus on image classification tasks. The schemes designed for the textual domain lack security and reliability. Moreover, how to protect the IP of widely-used pre-trained language models (PLMs) remains a blank. To fill these gaps, we propose PLMmark, the first secure and robust black-box watermarking framework for PLMs. It consists of three phases: (1) In order to generate watermarks that contain owners\u2019 identity information, we propose a novel encoding method to establish a strong link between a digital signature and trigger words by leveraging the original vocabulary tables of PLMs. Combining this with public key cryptography ensures the security of our scheme. (2) To embed robust, task-agnostic, and highly transferable watermarks in PLMs, we introduce a supervised contrastive loss to deviate the output representations of trigger sets from that of clean samples. In this way, the watermarked models will respond to the trigger sets anomaly and thus can identify the ownership. (3) To make the model ownership verification results reliable, we perform double verification, which guarantees the unforgeability of ownership. Extensive experiments on text classification tasks demonstrate that the embedded watermark can transfer to all the downstream tasks and can be effectively extracted and verified. The watermarking scheme is robust to watermark removing attacks (fine-pruning and re-initializing) and is secure enough to resist forgery attacks.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Peixuan and Cheng, Pengzhou and Li, Fangqi and Du, Wei and Zhao, Haodong and Liu, Gongshen}, year={2023}, month={Jun.}, pages={14991-14999} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26750/26522", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26750", + "pdf_size": 1072620, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2404990639696337868&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;gmail.com;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;gmail.com;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25984", + "title": "POEM: Polarization of Embeddings for Domain-Invariant Representations", + "track": "main", + "status": "Technical", + "abstract": "Handling out-of-distribution samples is a long-lasting challenge for deep visual models. In particular, domain generalization (DG) is one of the most relevant tasks that aims to train a model with a generalization capability on novel domains. Most existing DG approaches share the same philosophy to minimize the discrepancy between domains by finding the domain-invariant representations. On the contrary, our proposed method called POEM acquires a strong DG capability by learning domain-invariant and domain-specific representations and polarizing them. Specifically, POEM co-trains category-classifying and domain-classifying embeddings while regularizing them to be orthogonal via minimizing the cosine-similarity between their features, i.e., the polarization of embeddings. The clear separation of embeddings suppresses domain-specific features in the domain-invariant embeddings. The concept of POEM shows a unique direction to enhance the domain robustness of representations that brings considerable and consistent performance gains when combined with existing DG methods. Extensive simulation results in popular DG benchmarks with the PACS, VLCS, OfficeHome, TerraInc, and DomainNet datasets show that POEM indeed facilitates the category-classifying embedding to be more domain-invariant.", + "primary_area": "machine learning ii", + "author": "Sang-Yeong Jo; Sung Whan Yoon", + "authorids": "", + "aff": "Graduate School of Artificial Intelligence, Ulsan National Institute of Science and Technology (UNIST), Republic of Korea; Graduate School of Artificial Intelligence, Ulsan National Institute of Science and Technology (UNIST), Republic of Korea", + "bibtex": "@article{Jo_Yoon_2023, title={POEM: Polarization of Embeddings for Domain-Invariant Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25984}, DOI={10.1609/aaai.v37i7.25984}, abstractNote={Handling out-of-distribution samples is a long-lasting challenge for deep visual models. In particular, domain generalization (DG) is one of the most relevant tasks that aims to train a model with a generalization capability on novel domains. Most existing DG approaches share the same philosophy to minimize the discrepancy between domains by finding the domain-invariant representations. On the contrary, our proposed method called POEM acquires a strong DG capability by learning domain-invariant and domain-specific representations and polarizing them. Specifically, POEM co-trains category-classifying and domain-classifying embeddings while regularizing them to be orthogonal via minimizing the cosine-similarity between their features, i.e., the polarization of embeddings. The clear separation of embeddings suppresses domain-specific features in the domain-invariant embeddings. The concept of POEM shows a unique direction to enhance the domain robustness of representations that brings considerable and consistent performance gains when combined with existing DG methods. Extensive simulation results in popular DG benchmarks with the PACS, VLCS, OfficeHome, TerraInc, and DomainNet datasets show that POEM indeed facilitates the category-classifying embedding to be more domain-invariant.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jo, Sang-Yeong and Yoon, Sung Whan}, year={2023}, month={Jun.}, pages={8150-8158} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25984/25756", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25984", + "pdf_size": 2662253, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4891899689208632141&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "unist.ac.kr;unist.ac.kr", + "email": "unist.ac.kr;unist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ulsan National Institute of Science and Technology", + "aff_unique_dep": "Graduate School of Artificial Intelligence", + "aff_unique_url": "https://www.unist.ac.kr", + "aff_unique_abbr": "UNIST", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Ulsan", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-25566", + "title": "PPGenCDR: A Stable and Robust Framework for Privacy-Preserving Cross-Domain Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Privacy-preserving cross-domain recommendation (PPCDR) refers to preserving the privacy of users when transferring the knowledge from source domain to target domain for better performance, which is vital for the long-term development of recommender systems. Existing work on cross-domain recommendation (CDR) reaches advanced and satisfying recommendation performance, but mostly neglects preserving privacy. To fill this gap, we propose a privacy-preserving generative cross-domain recommendation (PPGenCDR) framework for PPCDR. PPGenCDR includes two main modules, i.e., stable privacy-preserving generator module, and robust cross-domain recommendation module. Specifically, the former isolates data from different domains with a generative adversarial network (GAN) based model, which stably estimates the distribution of private data in the source domain with \u0301Renyi differential privacy (RDP) technique. Then the latter aims to robustly leverage the perturbed but effective knowledge from the source domain with the raw data in target domain to improve recommendation performance. Three key modules, i.e., (1) selective privacy preserver, (2) GAN stabilizer, and (3) robustness conductor, guarantee the cost-effective trade-off between utility and privacy, the stability of GAN when using RDP, and the robustness of leveraging transferable knowledge accordingly. The extensive empirical studies on Douban and Amazon datasets demonstrate that PPGenCDR significantly outperforms the state-of-the-art recommendation models while preserving privacy.", + "primary_area": "data mining and knowledge management", + "author": "Xinting Liao; Weiming Liu; Xiaolin Zheng; Binhui Yao; Chaochao Chen", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University, China; College of Computer Science and Technology, Zhejiang University, China; College of Computer Science and Technology, Zhejiang University, China; Midea Group, Foshan, China; College of Computer Science and Technology, Zhejiang University, China", + "bibtex": "@article{Liao_Liu_Zheng_Yao_Chen_2023, title={PPGenCDR: A Stable and Robust Framework for Privacy-Preserving Cross-Domain Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25566}, DOI={10.1609/aaai.v37i4.25566}, abstractNote={Privacy-preserving cross-domain recommendation (PPCDR) refers to preserving the privacy of users when transferring the knowledge from source domain to target domain for better performance, which is vital for the long-term development of recommender systems. Existing work on cross-domain recommendation (CDR) reaches advanced and satisfying recommendation performance, but mostly neglects preserving privacy. To fill this gap, we propose a privacy-preserving generative cross-domain recommendation (PPGenCDR) framework for PPCDR. PPGenCDR includes two main modules, i.e., stable privacy-preserving generator module, and robust cross-domain recommendation module. Specifically, the former isolates data from different domains with a generative adversarial network (GAN) based model, which stably estimates the distribution of private data in the source domain with \u0301Renyi differential privacy (RDP) technique. Then the latter aims to robustly leverage the perturbed but effective knowledge from the source domain with the raw data in target domain to improve recommendation performance. Three key modules, i.e., (1) selective privacy preserver, (2) GAN stabilizer, and (3) robustness conductor, guarantee the cost-effective trade-off between utility and privacy, the stability of GAN when using RDP, and the robustness of leveraging transferable knowledge accordingly. The extensive empirical studies on Douban and Amazon datasets demonstrate that PPGenCDR significantly outperforms the state-of-the-art recommendation models while preserving privacy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liao, Xinting and Liu, Weiming and Zheng, Xiaolin and Yao, Binhui and Chen, Chaochao}, year={2023}, month={Jun.}, pages={4453-4461} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25566/25338", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25566", + "pdf_size": 1167284, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15609037205577360807&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;midea.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;midea.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Zhejiang University;Midea Group", + "aff_unique_dep": "College of Computer Science and Technology;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.mideaglobal.com", + "aff_unique_abbr": "ZJU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25329", + "title": "PUPS: Point Cloud Unified Panoptic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Point cloud panoptic segmentation is a challenging task that seeks a holistic solution for both semantic and instance segmentation to predict groupings of coherent points. Previous approaches treat semantic and instance segmentation as surrogate tasks, and they either use clustering methods or bounding boxes to gather instance groupings with costly computation and hand-craft designs in the instance segmentation task. In this paper, we propose a simple but effective point cloud unified panoptic segmentation (PUPS) framework, which use a set of point-level classifiers to directly predict semantic and instance groupings in an end-to-end manner. To realize PUPS, we introduce bipartite matching to our training pipeline so that our classifiers are able to exclusively predict groupings of instances, getting rid of hand-crafted designs, e.g. anchors and Non-Maximum Suppression (NMS). In order to achieve better grouping results, we utilize a transformer decoder to iteratively refine the point classifiers and develop a context-aware CutMix augmentation to overcome the class imbalance problem. As a result, PUPS achieves 1st place on the leader board of SemanticKITTI panoptic segmentation task and state-of-the-art results on nuScenes.", + "primary_area": "computer vision ii", + "author": "Shihao Su; Jianyun Xu; Huanyu Wang; Zhenwei Miao; Xin Zhan; Dayang Hao; Xi Li", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; Alibaba Group; College of Computer Science and Technology, Zhejiang University; Alibaba Group; Alibaba Group; Alibaba Group; College of Computer Science and Technology, Zhejiang University + Shanghai Institute for Advanced Study, Zhejiang University + Shanghai AI Laboratory", + "bibtex": "@article{Su_Xu_Wang_Miao_Zhan_Hao_Li_2023, title={PUPS: Point Cloud Unified Panoptic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25329}, DOI={10.1609/aaai.v37i2.25329}, abstractNote={Point cloud panoptic segmentation is a challenging task that seeks a holistic solution for both semantic and instance segmentation to predict groupings of coherent points. Previous approaches treat semantic and instance segmentation as surrogate tasks, and they either use clustering methods or bounding boxes to gather instance groupings with costly computation and hand-craft designs in the instance segmentation task. In this paper, we propose a simple but effective point cloud unified panoptic segmentation (PUPS) framework, which use a set of point-level classifiers to directly predict semantic and instance groupings in an end-to-end manner. To realize PUPS, we introduce bipartite matching to our training pipeline so that our classifiers are able to exclusively predict groupings of instances, getting rid of hand-crafted designs, e.g. anchors and Non-Maximum Suppression (NMS). In order to achieve better grouping results, we utilize a transformer decoder to iteratively refine the point classifiers and develop a context-aware CutMix augmentation to overcome the class imbalance problem. As a result, PUPS achieves 1st place on the leader board of SemanticKITTI panoptic segmentation task and state-of-the-art results on nuScenes.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Shihao and Xu, Jianyun and Wang, Huanyu and Miao, Zhenwei and Zhan, Xin and Hao, Dayang and Li, Xi}, year={2023}, month={Jun.}, pages={2339-2347} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25329/25101", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25329", + "pdf_size": 2250906, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1488406341136507145&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;alibaba-inc.com;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;gmail.com;zju.edu.cn", + "email": "zju.edu.cn;alibaba-inc.com;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;gmail.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;1;1;1;0+0+2", + "aff_unique_norm": "Zhejiang University;Alibaba Group;Shanghai AI Laboratory", + "aff_unique_dep": "College of Computer Science and Technology;;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com;https://www.shanghai-ai-lab.com", + "aff_unique_abbr": "ZJU;Alibaba;SAIL", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26564", + "title": "PUnifiedNER: A Prompting-Based Unified NER System for Diverse Datasets", + "track": "main", + "status": "Technical", + "abstract": "Much of named entity recognition (NER) research focuses on developing dataset-specific models based on data from the domain of interest, and a limited set of related entity types. This is frustrating as each new dataset requires a new model to be trained and stored. In this work, we present a ``versatile'' model---the Prompting-based Unified NER system (PUnifiedNER)---that works with data from different domains and can recognise up to 37 entity types simultaneously, and theoretically it could be as many as possible. By using prompt learning, PUnifiedNER is a novel approach that is able to jointly train across multiple corpora, implementing intelligent on-demand entity recognition. Experimental results show that PUnifiedNER leads to significant prediction benefits compared to dataset-specific models with impressively reduced model deployment costs. Furthermore, the performance of PUnifiedNER can achieve competitive or even better performance than state-of-the-art domain-specific methods for some datasets. We also perform comprehensive pilot and ablation studies to support in-depth analysis of each component in PUnifiedNER.", + "primary_area": "speech natural language processing", + "author": "Jinghui Lu; Rui Zhao; Brian Mac Namee; Fei Tan", + "authorids": "", + "aff": "SenseTime Research; SenseTime Research; The Insight Centre for Data Analytics, University College Dublin + School of Computer Science, University College Dublin; SenseTime Research", + "bibtex": "@article{Lu_Zhao_Mac Namee_Tan_2023, title={PUnifiedNER: A Prompting-Based Unified NER System for Diverse Datasets}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26564}, DOI={10.1609/aaai.v37i11.26564}, abstractNote={Much of named entity recognition (NER) research focuses on developing dataset-specific models based on data from the domain of interest, and a limited set of related entity types. This is frustrating as each new dataset requires a new model to be trained and stored. In this work, we present a ``versatile\u2019\u2019 model---the Prompting-based Unified NER system (PUnifiedNER)---that works with data from different domains and can recognise up to 37 entity types simultaneously, and theoretically it could be as many as possible. By using prompt learning, PUnifiedNER is a novel approach that is able to jointly train across multiple corpora, implementing intelligent on-demand entity recognition. Experimental results show that PUnifiedNER leads to significant prediction benefits compared to dataset-specific models with impressively reduced model deployment costs. Furthermore, the performance of PUnifiedNER can achieve competitive or even better performance than state-of-the-art domain-specific methods for some datasets. We also perform comprehensive pilot and ablation studies to support in-depth analysis of each component in PUnifiedNER.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Jinghui and Zhao, Rui and Mac Namee, Brian and Tan, Fei}, year={2023}, month={Jun.}, pages={13327-13335} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26564/26336", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26564", + "pdf_size": 303800, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12948776437219960218&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "sensetime.com;sensetime.com;ucd.ie;sensetime.com", + "email": "sensetime.com;sensetime.com;ucd.ie;sensetime.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+1;0", + "aff_unique_norm": "SenseTime;University College Dublin", + "aff_unique_dep": "SenseTime Research;The Insight Centre for Data Analytics", + "aff_unique_url": "https://www.sensetime.com;https://www.ucd.ie", + "aff_unique_abbr": "SenseTime;UCD", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Dublin", + "aff_country_unique_index": "0;0;1+1;0", + "aff_country_unique": "China;Ireland" + }, + { + "id": "article-25450", + "title": "PaRot: Patch-Wise Rotation-Invariant Network via Feature Disentanglement and Pose Restoration", + "track": "main", + "status": "Technical", + "abstract": "Recent interest in point cloud analysis has led rapid progress in designing deep learning methods for 3D models. However, state-of-the-art models are not robust to rotations, which remains an unknown prior to real applications and harms the model performance. In this work, we introduce a novel Patch-wise Rotation-invariant network (PaRot), which achieves rotation invariance via feature disentanglement and produces consistent predictions for samples with arbitrary rotations. Specifically, we design a siamese training module which disentangles rotation invariance and equivariance from patches defined over different scales, e.g., the local geometry and global shape, via a pair of rotations. However, our disentangled invariant feature loses the intrinsic pose information of each patch. To solve this problem, we propose a rotation-invariant geometric relation to restore the relative pose with equivariant information for patches defined over different scales. Utilising the pose information, we propose a hierarchical module which implements intra-scale and inter-scale feature aggregation for 3D shape learning. Moreover, we introduce a pose-aware feature propagation process with the rotation-invariant relative pose information embedded. Experiments show that our disentanglement module extracts high-quality rotation-robust features and the proposed lightweight model achieves competitive results in rotated 3D object classification and part segmentation tasks.", + "primary_area": "computer vision iii", + "author": "Dingxin Zhang; Jianhui Yu; Chaoyi Zhang; Weidong Cai", + "authorids": "", + "aff": "School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia", + "bibtex": "@article{Zhang_Yu_Zhang_Cai_2023, title={PaRot: Patch-Wise Rotation-Invariant Network via Feature Disentanglement and Pose Restoration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25450}, DOI={10.1609/aaai.v37i3.25450}, abstractNote={Recent interest in point cloud analysis has led rapid progress in designing deep learning methods for 3D models. However, state-of-the-art models are not robust to rotations, which remains an unknown prior to real applications and harms the model performance. In this work, we introduce a novel Patch-wise Rotation-invariant network (PaRot), which achieves rotation invariance via feature disentanglement and produces consistent predictions for samples with arbitrary rotations. Specifically, we design a siamese training module which disentangles rotation invariance and equivariance from patches defined over different scales, e.g., the local geometry and global shape, via a pair of rotations. However, our disentangled invariant feature loses the intrinsic pose information of each patch. To solve this problem, we propose a rotation-invariant geometric relation to restore the relative pose with equivariant information for patches defined over different scales. Utilising the pose information, we propose a hierarchical module which implements intra-scale and inter-scale feature aggregation for 3D shape learning. Moreover, we introduce a pose-aware feature propagation process with the rotation-invariant relative pose information embedded. Experiments show that our disentanglement module extracts high-quality rotation-robust features and the proposed lightweight model achieves competitive results in rotated 3D object classification and part segmentation tasks.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Dingxin and Yu, Jianhui and Zhang, Chaoyi and Cai, Weidong}, year={2023}, month={Jun.}, pages={3418-3426} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25450/25222", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25450", + "pdf_size": 1597388, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8560305201155180741&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "uni.sydney.edu.au;sydney.edu.au;sydney.edu.au;sydney.edu.au", + "email": "uni.sydney.edu.au;sydney.edu.au;sydney.edu.au;sydney.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Sydney", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.sydney.edu.au", + "aff_unique_abbr": "USYD", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Sydney", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25533", + "title": "PaTeCon: A Pattern-Based Temporal Constraint Mining Method for Conflict Detection on Knowledge Graphs", + "track": "main", + "status": "Technical", + "abstract": "Temporal facts, the facts for characterizing events that hold in specific time periods, are attracting rising attention in the knowledge graph (KG) research communities. In terms of quality management, the introduction of time restrictions brings new challenges to maintaining the temporal consistency of KGs and detecting potential temporal conflicts. Previous studies rely on manually enumerated temporal constraints to detect conflicts, which are labor-intensive and may have granularity issues. We start from the common pattern of temporal facts and constraints and propose a pattern-based temporal constraint mining method, PaTeCon. PaTeCon uses automatically determined graph patterns and their relevant statistical information over the given KG instead of human experts to generate time constraints. Specifically, PaTeCon dynamically attaches type restriction to candidate constraints according to their measuring scores. We evaluate PaTeCon on two large-scale datasets based on Wikidata and Freebase respectively, the experimental results show that pattern-based automatic constraint mining is powerful in generating valuable temporal constraints.", + "primary_area": "data mining and knowledge management", + "author": "Jianhao Chen; Junyang Ren; Wentao Ding; Yuzhong Qu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Chen_Ren_Ding_Qu_2023, title={PaTeCon: A Pattern-Based Temporal Constraint Mining Method for Conflict Detection on Knowledge Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25533}, DOI={10.1609/aaai.v37i4.25533}, abstractNote={Temporal facts, the facts for characterizing events that hold in specific time periods, are attracting rising attention in the knowledge graph (KG) research communities. In terms of quality management, the introduction of time restrictions brings new challenges to maintaining the temporal consistency of KGs and detecting potential temporal conflicts. Previous studies rely on manually enumerated temporal constraints to detect conflicts, which are labor-intensive and may have granularity issues. We start from the common pattern of temporal facts and constraints and propose a pattern-based temporal constraint mining method, PaTeCon. PaTeCon uses automatically determined graph patterns and their relevant statistical information over the given KG instead of human experts to generate time constraints. Specifically, PaTeCon dynamically attaches type restriction to candidate constraints according to their measuring scores. We evaluate PaTeCon on two large-scale datasets based on Wikidata and Freebase respectively, the experimental results show that pattern-based automatic constraint mining is powerful in generating valuable temporal constraints.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jianhao and Ren, Junyang and Ding, Wentao and Qu, Yuzhong}, year={2023}, month={Jun.}, pages={4166-4172} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25533/25305", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25533", + "pdf_size": 274690, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15619685335119925983&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25099", + "title": "Painterly Image Harmonization in Dual Domains", + "track": "main", + "status": "Technical", + "abstract": "Image harmonization aims to produce visually harmonious composite images by adjusting the foreground appearance to be compatible with the background. When the composite image has photographic foreground and painterly background, the task is called painterly image harmonization. There are only few works on this task, which are either time-consuming or weak in generating well-harmonized results. In this work, we propose a novel painterly harmonization network consisting of a dual-domain generator and a dual-domain discriminator, which harmonizes the composite image in both spatial domain and frequency domain. The dual-domain generator performs harmonization by using AdaIN modules in the spatial domain and our proposed ResFFT modules in the frequency domain. The dual-domain discriminator attempts to distinguish the inharmonious patches based on the spatial feature and frequency feature of each patch, which can enhance the ability of generator in an adversarial manner. Extensive experiments on the benchmark dataset show the effectiveness of our method. Our code and model are available at https://github.com/bcmi/PHDNet-Painterly-Image-Harmonization.", + "primary_area": "computer vision i", + "author": "Junyan Cao; Yan Hong; Li Niu", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University", + "bibtex": "@article{Cao_Hong_Niu_2023, title={Painterly Image Harmonization in Dual Domains}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25099}, DOI={10.1609/aaai.v37i1.25099}, abstractNote={Image harmonization aims to produce visually harmonious composite images by adjusting the foreground appearance to be compatible with the background. When the composite image has photographic foreground and painterly background, the task is called painterly image harmonization. There are only few works on this task, which are either time-consuming or weak in generating well-harmonized results. In this work, we propose a novel painterly harmonization network consisting of a dual-domain generator and a dual-domain discriminator, which harmonizes the composite image in both spatial domain and frequency domain. The dual-domain generator performs harmonization by using AdaIN modules in the spatial domain and our proposed ResFFT modules in the frequency domain. The dual-domain discriminator attempts to distinguish the inharmonious patches based on the spatial feature and frequency feature of each patch, which can enhance the ability of generator in an adversarial manner. Extensive experiments on the benchmark dataset show the effectiveness of our method. Our code and model are available at https://github.com/bcmi/PHDNet-Painterly-Image-Harmonization.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Junyan and Hong, Yan and Niu, Li}, year={2023}, month={Jun.}, pages={268-276} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25099/24871", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25099", + "pdf_size": 8240785, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16280072046896140076&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;gmail.com", + "github": "https://github.com/bcmi/PHDNet-Painterly-Image-Harmonization", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "MoE Key Lab of Artificial Intelligence", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26939", + "title": "PanTop: Pandemic Topic Detection and Monitoring System (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Diverse efforts to combat the COVID-19 pandemic have continued throughout the past two years. Governments have announced plans for unprecedentedly rapid vaccine development, quarantine measures, and economic revitalization. They contribute to a more effective pandemic response by determining the precise opinions of individuals regarding these mitigation measures. In this paper, we propose a deep learning-based topic monitoring and storyline extraction system for COVID-19 that is capable of analyzing public sentiment and pandemic trends. The proposed method is able to retrieve Twitter data related to COVID-19 and conduct spatiotemporal analysis. Furthermore, a deep learning component of the system provides monitoring and modeling capabilities for topics based on advanced natural language processing models. A variety of visualization methods are applied to the project to show the distribution of each topic. Our proposed system accurately reflects how public reactions change over time along with pandemic topics.", + "primary_area": "", + "author": "Yangxiao Bai; Kaiqun Fu", + "authorids": "", + "aff": "Department of Electrical Engineering and Computer Science, South Dakota State University; Department of Electrical Engineering and Computer Science, South Dakota State University", + "bibtex": "@article{Bai_Fu_2024, title={PanTop: Pandemic Topic Detection and Monitoring System (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26939}, DOI={10.1609/aaai.v37i13.26939}, abstractNote={Diverse efforts to combat the COVID-19 pandemic have continued throughout the past two years. Governments have announced plans for unprecedentedly rapid vaccine development, quarantine measures, and economic revitalization. They contribute to a more effective pandemic response by determining the precise opinions of individuals regarding these mitigation measures. In this paper, we propose a deep learning-based topic monitoring and storyline extraction system for COVID-19 that is capable of analyzing public sentiment and pandemic trends. The proposed method is able to retrieve Twitter data related to COVID-19 and conduct spatiotemporal analysis. Furthermore, a deep learning component of the system provides monitoring and modeling capabilities for topics based on advanced natural language processing models. A variety of visualization methods are applied to the project to show the distribution of each topic. Our proposed system accurately reflects how public reactions change over time along with pandemic topics.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Yangxiao and Fu, Kaiqun}, year={2024}, month={Jul.}, pages={16158-16159} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26939/26711", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26939", + "pdf_size": 1329353, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:cg6yzGxtr0AJ:scholar.google.com/&scioq=PanTop:+Pandemic+Topic+Detection+and+Monitoring+System+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "sdstate.edu;sdstate.edu", + "email": "sdstate.edu;sdstate.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "South Dakota State University", + "aff_unique_dep": "Department of Electrical Engineering and Computer Science", + "aff_unique_url": "https://www.sdsu.edu", + "aff_unique_abbr": "SDSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25227", + "title": "Panoramic Video Salient Object Detection with Ambisonic Audio Guidance", + "track": "main", + "status": "Technical", + "abstract": "Video salient object detection (VSOD), as a fundamental computer vision problem, has been extensively discussed in the last decade. However, all existing works focus on addressing the VSOD problem in 2D scenarios. With the rapid development of VR devices, panoramic videos have been a promising alternative to 2D videos to provide immersive feelings of the real world. In this paper, we aim to tackle the video salient object detection problem for panoramic videos, with their corresponding ambisonic audios. A multimodal fusion module equipped with two pseudo-siamese audio-visual context fusion (ACF) blocks is proposed to effectively conduct audio-visual interaction. The ACF block equipped with spherical positional encoding enables the fusion in the 3D context to capture the spatial correspondence between pixels and sound sources from the equirectangular frames and ambisonic audios. Experimental results verify the effectiveness of our proposed components and demonstrate that our method achieves state-of-the-art performance on the ASOD60K dataset.", + "primary_area": "computer vision ii", + "author": "Xiang Li; Haoyuan Cao; Shijie Zhao; Junlin Li; Li Zhang; Bhiksha Raj", + "authorids": "", + "aff": "Carnegie Mellon University, PA, USA; Bytedance Inc., San Diego, CA, USA; Bytedance Inc., Shenzhen, China; Bytedance Inc., San Diego, CA, USA; Bytedance Inc., San Diego, CA, USA; Carnegie Mellon University, PA, USA + Mohammed bin Zayed University of AI, Abu Dhabi, UAE", + "bibtex": "@article{Li_Cao_Zhao_Li_Zhang_Raj_2023, title={Panoramic Video Salient Object Detection with Ambisonic Audio Guidance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25227}, DOI={10.1609/aaai.v37i2.25227}, abstractNote={Video salient object detection (VSOD), as a fundamental computer vision problem, has been extensively discussed in the last decade. However, all existing works focus on addressing the VSOD problem in 2D scenarios. With the rapid development of VR devices, panoramic videos have been a promising alternative to 2D videos to provide immersive feelings of the real world. In this paper, we aim to tackle the video salient object detection problem for panoramic videos, with their corresponding ambisonic audios. A multimodal fusion module equipped with two pseudo-siamese audio-visual context fusion (ACF) blocks is proposed to effectively conduct audio-visual interaction. The ACF block equipped with spherical positional encoding enables the fusion in the 3D context to capture the spatial correspondence between pixels and sound sources from the equirectangular frames and ambisonic audios. Experimental results verify the effectiveness of our proposed components and demonstrate that our method achieves state-of-the-art performance on the ASOD60K dataset.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiang and Cao, Haoyuan and Zhao, Shijie and Li, Junlin and Zhang, Li and Raj, Bhiksha}, year={2023}, month={Jun.}, pages={1424-1432} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25227/24999", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25227", + "pdf_size": 6396510, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16900389821938730044&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "andrew.cmu.edu;bytedance.com;bytedance.com;bytedance.com;bytedance.com;andrew.cmu.edu", + "email": "andrew.cmu.edu;bytedance.com;bytedance.com;bytedance.com;bytedance.com;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0+2", + "aff_unique_norm": "Carnegie Mellon University;Bytedance Inc.;Mohammed bin Zayed University of AI", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cmu.edu;https://www.bytedance.com;https://mbzuai.ac.ae", + "aff_unique_abbr": "CMU;Bytedance;MBZUAI", + "aff_campus_unique_index": "0;1;2;1;1;0+3", + "aff_campus_unique": "Pittsburgh;San Diego;Shenzhen;Abu Dhabi", + "aff_country_unique_index": "0;0;1;0;0;0+2", + "aff_country_unique": "United States;China;United Arab Emirates" + }, + { + "id": "article-25275", + "title": "ParaFormer: Parallel Attention Transformer for Efficient Feature Matching", + "track": "main", + "status": "Technical", + "abstract": "Heavy computation is a bottleneck limiting deep-learning-based feature matching algorithms to be applied in many real-time applications. However, existing lightweight networks optimized for Euclidean data cannot address classical feature matching tasks, since sparse keypoint based descriptors are expected to be matched. This paper tackles this problem and proposes two concepts: 1) a novel parallel attention model entitled ParaFormer and 2) a graph based U-Net architecture with attentional pooling. First, ParaFormer fuses features and keypoint positions through the concept of amplitude and phase, and integrates self- and cross-attention in a parallel manner which achieves a win-win performance in terms of accuracy and efficiency. Second, with U-Net architecture and proposed attentional pooling, the ParaFormer-U variant significantly reduces computational complexity, and minimize performance loss caused by downsampling. Sufficient experiments on various applications, including homography estimation, pose estimation, and image matching, demonstrate that ParaFormer achieves state-of-the-art performance while maintaining high efficiency. The efficient ParaFormer-U variant achieves comparable performance with less than 50% FLOPs of the existing attention-based models.", + "primary_area": "computer vision ii", + "author": "Xiaoyong Lu; Yaping Yan; Bin Kang; Songlin Du", + "authorids": "", + "aff": "Southeast University; Southeast University; Nanjing University of Posts and Telecommunication; Southeast University", + "bibtex": "@article{Lu_Yan_Kang_Du_2023, title={ParaFormer: Parallel Attention Transformer for Efficient Feature Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25275}, DOI={10.1609/aaai.v37i2.25275}, abstractNote={Heavy computation is a bottleneck limiting deep-learning-based feature matching algorithms to be applied in many real-time applications. However, existing lightweight networks optimized for Euclidean data cannot address classical feature matching tasks, since sparse keypoint based descriptors are expected to be matched. This paper tackles this problem and proposes two concepts: 1) a novel parallel attention model entitled ParaFormer and 2) a graph based U-Net architecture with attentional pooling. First, ParaFormer fuses features and keypoint positions through the concept of amplitude and phase, and integrates self- and cross-attention in a parallel manner which achieves a win-win performance in terms of accuracy and efficiency. Second, with U-Net architecture and proposed attentional pooling, the ParaFormer-U variant significantly reduces computational complexity, and minimize performance loss caused by downsampling. Sufficient experiments on various applications, including homography estimation, pose estimation, and image matching, demonstrate that ParaFormer achieves state-of-the-art performance while maintaining high efficiency. The efficient ParaFormer-U variant achieves comparable performance with less than 50% FLOPs of the existing attention-based models.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Xiaoyong and Yan, Yaping and Kang, Bin and Du, Songlin}, year={2023}, month={Jun.}, pages={1853-1860} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25275/25047", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25275", + "pdf_size": 3900571, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13785477522495331795&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "seu.edu.cn;seu.edu.cn;njupt.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;njupt.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Southeast University;Nanjing University of Posts and Telecommunications", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.seu.edu.cn/;http://www.njupt.edu.cn", + "aff_unique_abbr": "SEU;NUPT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27033", + "title": "Parallel Index-Based Search Algorithm for Coalition Structure Generation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this paper, we propose a novel algorithm to address the Coalition Structure Generation (CSG) problem. Specifically, we use a novel representation of the search space that enables it to be explored in a new way. We introduce an index-based exact algorithm. Our algorithm is anytime, produces optimal solutions, and can be run on large-scale problems with hundreds of agents. Our experimental evaluation on a benchmark with several value distributions shows that our representation of the search space that we combined with the proposed algorithm provides high-quality results for the CSG problem and outperforms existing state-of-the-art algorithms.", + "primary_area": "", + "author": "Redha Taguelmimt; Samir Aknine; Djamila Boukredera; Narayan Changder", + "authorids": "", + "aff": "LIRIS, Lyon 1 University, France; LIRIS, Lyon 1 University, France; Laboratory of Applied Mathematics, Faculty of Exact Sciences, University of Bejaia, 06000 Bejaia, Algeria; TCG Centres for Research and Education in Science and Technology, Kolkata, India", + "bibtex": "@article{Taguelmimt_Aknine_Boukredera_Changder_2024, title={Parallel Index-Based Search Algorithm for Coalition Structure Generation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27033}, DOI={10.1609/aaai.v37i13.27033}, abstractNote={In this paper, we propose a novel algorithm to address the Coalition Structure Generation (CSG) problem. Specifically, we use a novel representation of the search space that enables it to be explored in a new way. We introduce an index-based exact algorithm. Our algorithm is anytime, produces optimal solutions, and can be run on large-scale problems with hundreds of agents. Our experimental evaluation on a benchmark with several value distributions shows that our representation of the search space that we combined with the proposed algorithm provides high-quality results for the CSG problem and outperforms existing state-of-the-art algorithms.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Taguelmimt, Redha and Aknine, Samir and Boukredera, Djamila and Changder, Narayan}, year={2024}, month={Jul.}, pages={16346-16347} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27033/26805", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27033", + "pdf_size": 170749, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14739353352960323994&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;univ-lyon1.fr;univ-bejaia.dz;tcgcrest.org", + "email": "gmail.com;univ-lyon1.fr;univ-bejaia.dz;tcgcrest.org", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "Lyon 1 University;University of Bejaia;TCG Centres for Research and Education in Science and Technology", + "aff_unique_dep": "LIRIS;Faculty of Exact Sciences;", + "aff_unique_url": "https://www.universite-lyon1.fr;;", + "aff_unique_abbr": ";;", + "aff_campus_unique_index": "0;0;1;2", + "aff_campus_unique": "Lyon;Bejaia;Kolkata", + "aff_country_unique_index": "0;0;1;2", + "aff_country_unique": "France;Algeria;India" + }, + { + "id": "article-25160", + "title": "Parameter-Efficient Model Adaptation for Vision Transformers", + "track": "main", + "status": "Technical", + "abstract": "In computer vision, it has achieved great transfer learning performance via adapting large-scale pretrained vision models (e.g., vision transformers) to downstream tasks. Common approaches for model adaptation either update all model parameters or leverage linear probes. In this paper, we aim to study parameter-efficient model adaptation strategies for vision transformers on the image classification task. We formulate efficient model adaptation as a subspace training problem and perform a comprehensive benchmarking over different efficient adaptation methods. We conduct an empirical study on each efficient model adaptation method focusing on its performance alongside parameter cost. Furthermore, we propose a parameter-efficient model adaptation framework, which first selects submodules by measuring local intrinsic dimensions and then projects them into subspace for further decomposition via a novel Kronecker Adaptation method. We analyze and compare our method with a diverse set of baseline model adaptation methods (including state-of-the-art methods for pretrained language models). Our method performs the best in terms of the tradeoff between accuracy and parameter efficiency across 20 datasets under the few-shot setting and 7 image classification datasets under the full-shot setting.", + "primary_area": "computer vision i", + "author": "Xuehai He; Chunyuan Li; Pengchuan Zhang; Jianwei Yang; Xin Eric Wang", + "authorids": "", + "aff": "UC Santa Cruz; Microsoft Research at Redmond; Microsoft Research at Redmond; Microsoft Research at Redmond; UC Santa Cruz", + "bibtex": "@article{He_Li_Zhang_Yang_Wang_2023, title={Parameter-Efficient Model Adaptation for Vision Transformers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25160}, DOI={10.1609/aaai.v37i1.25160}, abstractNote={In computer vision, it has achieved great transfer learning performance via adapting large-scale pretrained vision models (e.g., vision transformers) to downstream tasks. Common approaches for model adaptation either update all model parameters or leverage linear probes. In this paper, we aim to study parameter-efficient model adaptation strategies for vision transformers on the image classification task. We formulate efficient model adaptation as a subspace training problem and perform a comprehensive benchmarking over different efficient adaptation methods. We conduct an empirical study on each efficient model adaptation method focusing on its performance alongside parameter cost. Furthermore, we propose a parameter-efficient model adaptation framework, which first selects submodules by measuring local intrinsic dimensions and then projects them into subspace for further decomposition via a novel Kronecker Adaptation method. We analyze and compare our method with a diverse set of baseline model adaptation methods (including state-of-the-art methods for pretrained language models). Our method performs the best in terms of the tradeoff between accuracy and parameter efficiency across 20 datasets under the few-shot setting and 7 image classification datasets under the full-shot setting.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Xuehai and Li, Chunyuan and Zhang, Pengchuan and Yang, Jianwei and Wang, Xin Eric}, year={2023}, month={Jun.}, pages={817-825} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25160/24932", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25160", + "pdf_size": 448920, + "gs_citation": 106, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14159298308920154307&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ucsc.edu;microsoft.com;microsoft.com;microsoft.com;ucsc.edu", + "email": "ucsc.edu;microsoft.com;microsoft.com;microsoft.com;ucsc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "University of California, Santa Cruz;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "https://www.ucsc.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "UCSC;MSR", + "aff_campus_unique_index": "0;1;1;1;0", + "aff_campus_unique": "Santa Cruz;Redmond", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25560", + "title": "Parameterized Algorithms for Colored Clustering", + "track": "main", + "status": "Technical", + "abstract": "In the Colored Clustering problem, one is asked to cluster edge-colored (hyper-)graphs whose colors represent interaction types. More specifically, the goal is to select as many edges as possible without choosing two edges that share an endpoint and are colored differently. Equivalently, the goal can also be described as assigning colors to the vertices in a way that fits the edge-coloring as well as possible.\n\nAs this problem is NP-hard, we build on previous work by studying its parameterized complexity. We give a 2\u1d3c\u207d\u1d4f\u207e\u00b7n\u1d3c\u207d\u00b9\u207e-time algorithm where k is the number of edges to be selected and n the number of vertices. We also prove the existence of a problem kernel of size O(k\u2075\u141f\u00b2), resolving an open problem posed in the literature. We consider parameters that are smaller than k, the number of edges to be selected, and r, the number of edges that can be deleted. Such smaller parameters are obtained by considering the difference between k or r and some lower bound on these values. We give both algorithms and lower bounds for Colored Clustering with such parameterizations. Finally, we settle the parameterized complexity of Colored Clustering with respect to structural graph parameters by showing that it is W[1]-hard with respect to both vertex cover number and tree-cut width, but fixed-parameter tractable with respect to local feedback edge number.", + "primary_area": "data mining and knowledge management", + "author": "Leon Kellerhals; Tomohiro Koana; Pascal Kunz; Rolf Niedermeier", + "authorids": "", + "aff": "Technische Universit\u00e4t Berlin, Algorithmics and Computational Complexity, Berlin, Germany; Technische Universit\u00e4t Berlin, Algorithmics and Computational Complexity, Berlin, Germany; Technische Universit\u00e4t Berlin, Algorithmics and Computational Complexity, Berlin, Germany + Humboldt-Universit\u00e4t zu Berlin, Algorithm Engineering, Berlin, Germany; Technische Universit\u00e4t Berlin, Algorithmics and Computational Complexity, Berlin, Germany", + "bibtex": "@article{Kellerhals_Koana_Kunz_Niedermeier_2023, title={Parameterized Algorithms for Colored Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25560}, DOI={10.1609/aaai.v37i4.25560}, abstractNote={In the Colored Clustering problem, one is asked to cluster edge-colored (hyper-)graphs whose colors represent interaction types. More specifically, the goal is to select as many edges as possible without choosing two edges that share an endpoint and are colored differently. Equivalently, the goal can also be described as assigning colors to the vertices in a way that fits the edge-coloring as well as possible. As this problem is NP-hard, we build on previous work by studying its parameterized complexity. We give a 2\u1d3c\u207d\u1d4f\u207e\u00b7n\u1d3c\u207d\u00b9\u207e-time algorithm where k is the number of edges to be selected and n the number of vertices. We also prove the existence of a problem kernel of size O(k\u2075\u141f\u00b2), resolving an open problem posed in the literature. We consider parameters that are smaller than k, the number of edges to be selected, and r, the number of edges that can be deleted. Such smaller parameters are obtained by considering the difference between k or r and some lower bound on these values. We give both algorithms and lower bounds for Colored Clustering with such parameterizations. Finally, we settle the parameterized complexity of Colored Clustering with respect to structural graph parameters by showing that it is W[1]-hard with respect to both vertex cover number and tree-cut width, but fixed-parameter tractable with respect to local feedback edge number.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kellerhals, Leon and Koana, Tomohiro and Kunz, Pascal and Niedermeier, Rolf}, year={2023}, month={Jun.}, pages={4400-4408} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25560/25332", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25560", + "pdf_size": 166821, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12197110662905801969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "tu-berlin.de;tu-berlin.de;tu-berlin.de; ", + "email": "tu-berlin.de;tu-berlin.de;tu-berlin.de; ", + "github": "", + "project": "https://www.samhsa.gov/data/data-we-collect/dawn-drug-abuse-warning-network", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;Humboldt-Universit\u00e4t zu Berlin", + "aff_unique_dep": "Algorithmics and Computational Complexity;Algorithm Engineering", + "aff_unique_url": "https://www.tu-berlin.de;https://www.hu-berlin.de", + "aff_unique_abbr": "TU Berlin;HU Berlin", + "aff_campus_unique_index": "0;0;0+0;0", + "aff_campus_unique": "Berlin", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25097", + "title": "Parametric Surface Constrained Upsampler Network for Point Cloud", + "track": "main", + "status": "Technical", + "abstract": "Designing a point cloud upsampler, which aims to generate a clean and dense point cloud given a sparse point representation, is a fundamental and challenging problem in computer vision. A line of attempts achieves this goal by establishing a point-to-point mapping function via deep neural networks. However, these approaches are prone to produce outlier points due to the lack of explicit surface-level constraints. To solve this problem, we introduce a novel surface regularizer into the upsampler network by forcing the neural network to learn the underlying parametric surface represented by bicubic functions and rotation functions, where the new generated points are then constrained on the underlying surface. These designs are integrated into two different networks for two tasks that take advantages of upsampling layers -- point cloud upsampling and point cloud completion for evaluation. The state-of-the-art experimental results on both tasks demonstrate the effectiveness of the proposed method. The implementation code will be available at https://github.com/corecai163/PSCU.", + "primary_area": "computer vision i", + "author": "Pingping Cai; Zhenyao Wu; Xinyi Wu; Song Wang", + "authorids": "", + "aff": "University of South Carolina, USA; University of South Carolina, USA; University of South Carolina, USA; University of South Carolina, USA", + "bibtex": "@article{Cai_Wu_Wu_Wang_2023, title={Parametric Surface Constrained Upsampler Network for Point Cloud}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25097}, DOI={10.1609/aaai.v37i1.25097}, abstractNote={Designing a point cloud upsampler, which aims to generate a clean and dense point cloud given a sparse point representation, is a fundamental and challenging problem in computer vision. A line of attempts achieves this goal by establishing a point-to-point mapping function via deep neural networks. However, these approaches are prone to produce outlier points due to the lack of explicit surface-level constraints. To solve this problem, we introduce a novel surface regularizer into the upsampler network by forcing the neural network to learn the underlying parametric surface represented by bicubic functions and rotation functions, where the new generated points are then constrained on the underlying surface. These designs are integrated into two different networks for two tasks that take advantages of upsampling layers -- point cloud upsampling and point cloud completion for evaluation. The state-of-the-art experimental results on both tasks demonstrate the effectiveness of the proposed method. The implementation code will be available at https://github.com/corecai163/PSCU.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Pingping and Wu, Zhenyao and Wu, Xinyi and Wang, Song}, year={2023}, month={Jun.}, pages={250-258} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25097/24869", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25097", + "pdf_size": 1514177, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8727147256667484101&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "email.sc.edu;email.sc.edu;email.sc.edu;cec.sc.edu", + "email": "email.sc.edu;email.sc.edu;email.sc.edu;cec.sc.edu", + "github": "https://github.com/corecai163/PSCU", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of South Carolina", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25871", + "title": "Partial-Label Regression", + "track": "main", + "status": "Technical", + "abstract": "Partial-label learning is a popular weakly supervised learning setting that allows each training example to be annotated with a set of candidate labels. Previous studies on partial-label learning only focused on the classification setting where candidate labels are all discrete, which cannot handle continuous labels with real values. In this paper, we provide the first attempt to investigate partial-label regression, where each training example is annotated with a set of real-valued candidate labels. To solve this problem, we first propose a simple baseline method that takes the average loss incurred by candidate labels as the predictive loss. The drawback of this method lies in that the loss incurred by the true label may be overwhelmed by other false labels. To overcome this drawback, we propose an identification method that takes the least loss incurred by candidate labels as the predictive loss. We further improve it by proposing a progressive identification method to differentiate candidate labels using progressively updated weights for incurred losses. We prove that the latter two methods are model-consistent and provide convergence analysis showing the optimal parametric convergence rate. Our proposed methods are theoretically grounded and can be compatible with any models, optimizers, and losses. Experiments validate the effectiveness of our proposed methods.", + "primary_area": "machine learning i", + "author": "Xin Cheng; Deng-Bao Wang; Lei Feng; Min-Ling Zhang; Bo An", + "authorids": "", + "aff": "College of Computer Science, Chongqing University, Chongqing, China; School of Computer Science and Engineering, Southeast University, Nanjing, China; College of Computer Science, Chongqing University, Chongqing, China; School of Computer Science and Engineering, Southeast University, Nanjing, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Cheng_Wang_Feng_Zhang_An_2023, title={Partial-Label Regression}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25871}, DOI={10.1609/aaai.v37i6.25871}, abstractNote={Partial-label learning is a popular weakly supervised learning setting that allows each training example to be annotated with a set of candidate labels. Previous studies on partial-label learning only focused on the classification setting where candidate labels are all discrete, which cannot handle continuous labels with real values. In this paper, we provide the first attempt to investigate partial-label regression, where each training example is annotated with a set of real-valued candidate labels. To solve this problem, we first propose a simple baseline method that takes the average loss incurred by candidate labels as the predictive loss. The drawback of this method lies in that the loss incurred by the true label may be overwhelmed by other false labels. To overcome this drawback, we propose an identification method that takes the least loss incurred by candidate labels as the predictive loss. We further improve it by proposing a progressive identification method to differentiate candidate labels using progressively updated weights for incurred losses. We prove that the latter two methods are model-consistent and provide convergence analysis showing the optimal parametric convergence rate. Our proposed methods are theoretically grounded and can be compatible with any models, optimizers, and losses. Experiments validate the effectiveness of our proposed methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Xin and Wang, Deng-Bao and Feng, Lei and Zhang, Min-Ling and An, Bo}, year={2023}, month={Jun.}, pages={7140-7147} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25871/25643", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25871", + "pdf_size": 192099, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7989825527359106707&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "stu.cqu.edu.cn;seu.edu.cn;cqu.edu.cn;seu.edu.cn;ntu.edu.sg", + "email": "stu.cqu.edu.cn;seu.edu.cn;cqu.edu.cn;seu.edu.cn;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;2", + "aff_unique_norm": "Chongqing University;Southeast University;Nanyang Technological University", + "aff_unique_dep": "College of Computer Science;School of Computer Science and Engineering;School of Computer Science and Engineering", + "aff_unique_url": "http://en.cqu.edu.cn/;https://www.seu.edu.cn/;https://www.ntu.edu.sg", + "aff_unique_abbr": "CQU;SEU;NTU", + "aff_campus_unique_index": "0;1;0;1;2", + "aff_campus_unique": "Chongqing;Nanjing;Singapore", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25699", + "title": "Participatory Budgeting Designs for the Real World", + "track": "main", + "status": "Technical", + "abstract": "Participatory budgeting engages the public in the process of allocating public money to different types of projects. PB designs differ in how voters are asked to express their preferences over candidate projects and how these preferences are aggregated to determine which projects to fund. This paper studies two fundamental questions in PB design. Which voting format and aggregation method to use, and how to evaluate the outcomes of these design decisions? We conduct an extensive empirical study in which 1 800 participants vote in four participatory budgeting elections in a controlled setting to evaluate the practical effects of the choice of voting format and aggregation rule.We find that k-approval leads to the best user experience. With respect to the aggregation rule, greedy aggregation leads to outcomes that are highly sensitive to the input format used and the fraction of the population that participates. The method of equal shares, in contrast, leads to outcomes that are not sensitive to the type of voting format used, and these outcomes are remarkably stable even when the majority of the population does not participate in the election. These results carry valuable insights for PB practitioners and social choice researchers.", + "primary_area": "game theory and economic paradigms", + "author": "Roy Fairstein; Gerdus Benad\u00e8; Kobi Gal", + "authorids": "", + "aff": "Ben-Gurion University of the Negev, Israel; Boston University, USA; Ben-Gurion University of the Negev, Israel + University of Edinburgh, UK", + "bibtex": "@article{Fairstein_Benad\u00e8_Gal_2023, title={Participatory Budgeting Designs for the Real World}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25699}, DOI={10.1609/aaai.v37i5.25699}, abstractNote={Participatory budgeting engages the public in the process of allocating public money to different types of projects. PB designs differ in how voters are asked to express their preferences over candidate projects and how these preferences are aggregated to determine which projects to fund. This paper studies two fundamental questions in PB design. Which voting format and aggregation method to use, and how to evaluate the outcomes of these design decisions? We conduct an extensive empirical study in which 1 800 participants vote in four participatory budgeting elections in a controlled setting to evaluate the practical effects of the choice of voting format and aggregation rule.We find that k-approval leads to the best user experience. With respect to the aggregation rule, greedy aggregation leads to outcomes that are highly sensitive to the input format used and the fraction of the population that participates. The method of equal shares, in contrast, leads to outcomes that are not sensitive to the type of voting format used, and these outcomes are remarkably stable even when the majority of the population does not participate in the election. These results carry valuable insights for PB practitioners and social choice researchers.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fairstein, Roy and Benad\u00e8, Gerdus and Gal, Kobi}, year={2023}, month={Jun.}, pages={5633-5640} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25699/25471", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25699", + "pdf_size": 4018177, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=44948566716702011&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "post.bgu.ac.il;bu.edu;bgu.ac.il", + "email": "post.bgu.ac.il;bu.edu;bgu.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "Ben-Gurion University of the Negev;Boston University;University of Edinburgh", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.bgu.ac.il;https://www.bu.edu;https://www.ed.ac.uk", + "aff_unique_abbr": "BGU;BU;Edinburgh", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0+2", + "aff_country_unique": "Israel;United States;United Kingdom" + }, + { + "id": "article-25713", + "title": "Partitioning Friends Fairly", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of partitioning n agents in an undirected social network into k almost equal in size (differing by at most one) groups, where the utility of an agent for a group is the number of her neighbors in the group. The core and envy-freeness are two compelling axiomatic fairness guarantees in such settings. The former demands that there be no coalition of agents such that each agent in the coalition has more utility for that coalition than for her own group, while the latter demands that no agent envy another agent for the group they are in. We provide (often tight) approximations to both fairness guarantees, and many of our positive results are obtained via efficient algorithms.", + "primary_area": "game theory and economic paradigms", + "author": "Lily Li; Evi Micha; Aleksandar Nikolov; Nisarg Shah", + "authorids": "", + "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", + "bibtex": "@article{Li_Micha_Nikolov_Shah_2023, title={Partitioning Friends Fairly}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25713}, DOI={10.1609/aaai.v37i5.25713}, abstractNote={We consider the problem of partitioning n agents in an undirected social network into k almost equal in size (differing by at most one) groups, where the utility of an agent for a group is the number of her neighbors in the group. The core and envy-freeness are two compelling axiomatic fairness guarantees in such settings. The former demands that there be no coalition of agents such that each agent in the coalition has more utility for that coalition than for her own group, while the latter demands that no agent envy another agent for the group they are in. We provide (often tight) approximations to both fairness guarantees, and many of our positive results are obtained via efficient algorithms.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Lily and Micha, Evi and Nikolov, Aleksandar and Shah, Nisarg}, year={2023}, month={Jun.}, pages={5747-5754} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25713/25485", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25713", + "pdf_size": 160079, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10627097819446979075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", + "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Toronto", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utoronto.ca", + "aff_unique_abbr": "U of T", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Toronto", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26730", + "title": "PatchNAS: Repairing DNNs in Deployment with Patched Network Architecture Search", + "track": "aaai special track", + "status": "Technical", + "abstract": "Despite being widely deployed in safety-critical applications such as autonomous driving and health care, deep neural networks (DNNs) still suffer from non-negligible reliability issues. Numerous works had reported that DNNs were vulnerable to either natural environmental noises or man-made adversarial noises. How to repair DNNs in deployment with noisy samples is a crucial topic for the robustness of neural networks. While many network repairing methods based on data argumentation and weight adjustment have been proposed, they require retraining and redeploying the whole model, which causes high overhead and is infeasible for varying faulty cases on different deployment environments. In this paper, we propose a novel network repairing framework called PatchNAS from the architecture perspective, where we freeze the pretrained DNNs and introduce a small patch network to deal with failure samples at runtime. PatchNAS introduces a novel network instrumentation method to determine the faulty stage of the network structure given the collected failure samples. A small patch network structure is searched unsupervisedly using neural architecture search (NAS) technique with data samples from deployment environment. The patch network repairs the DNNs by correcting the output feature maps of the faulty stage, which helps to maintain network performance on normal samples and enhance robustness in noisy environments. Extensive experiments based on several DNNs across 15 types of natural noises show that the proposed PatchNAS outperforms the state-of-the-arts with significant performance improvement as well as much lower deployment overhead.", + "primary_area": "safe and robust ai", + "author": "Yuchu Fang; Wenzhong Li; Yao Zeng; Yang Zheng; Zheng Hu; Sanglu Lu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; State Key Laboratory for Novel Software Technology, Nanjing University; TTE Lab, Huawei Technologies Co., Ltd.; TTE Lab, Huawei Technologies Co., Ltd.; State Key Laboratory for Novel Software Technology, Nanjing University", + "bibtex": "@article{Fang_Li_Zeng_Zheng_Hu_Lu_2023, title={PatchNAS: Repairing DNNs in Deployment with Patched Network Architecture Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26730}, DOI={10.1609/aaai.v37i12.26730}, abstractNote={Despite being widely deployed in safety-critical applications such as autonomous driving and health care, deep neural networks (DNNs) still suffer from non-negligible reliability issues. Numerous works had reported that DNNs were vulnerable to either natural environmental noises or man-made adversarial noises. How to repair DNNs in deployment with noisy samples is a crucial topic for the robustness of neural networks. While many network repairing methods based on data argumentation and weight adjustment have been proposed, they require retraining and redeploying the whole model, which causes high overhead and is infeasible for varying faulty cases on different deployment environments. In this paper, we propose a novel network repairing framework called PatchNAS from the architecture perspective, where we freeze the pretrained DNNs and introduce a small patch network to deal with failure samples at runtime. PatchNAS introduces a novel network instrumentation method to determine the faulty stage of the network structure given the collected failure samples. A small patch network structure is searched unsupervisedly using neural architecture search (NAS) technique with data samples from deployment environment. The patch network repairs the DNNs by correcting the output feature maps of the faulty stage, which helps to maintain network performance on normal samples and enhance robustness in noisy environments. Extensive experiments based on several DNNs across 15 types of natural noises show that the proposed PatchNAS outperforms the state-of-the-arts with significant performance improvement as well as much lower deployment overhead.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Yuchu and Li, Wenzhong and Zeng, Yao and Zheng, Yang and Hu, Zheng and Lu, Sanglu}, year={2023}, month={Jun.}, pages={14811-14819} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26730/26502", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26730", + "pdf_size": 732720, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11149498372871276606&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;huawei.com;huawei.com;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;huawei.com;huawei.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0", + "aff_unique_norm": "Nanjing University;Huawei Technologies Co., Ltd.", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;TTE Lab", + "aff_unique_url": "http://www.nju.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "Nanjing University;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26700", + "title": "PateGail: A Privacy-Preserving Mobility Trajectory Generator with Imitation Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Generating human mobility trajectories is of great importance to solve the lack of large-scale trajectory data in numerous applications, which is caused by privacy concerns. However, existing mobility trajectory generation methods still require real-world human trajectories centrally collected as the training data, where there exists an inescapable risk of privacy leakage. To overcome this limitation, in this paper, we propose PateGail, a privacy-preserving imitation learning model to generate mobility trajectories, which utilizes the powerful generative adversary imitation learning model to simulate the decision-making process of humans. Further, in order to protect user privacy, we train this model collectively based on decentralized mobility data stored in user devices, where personal discriminators are trained locally to distinguish and reward the real and generated human trajectories. In the training process, only the generated trajectories and their rewards obtained based on personal discriminators are shared between the server and devices, whose privacy is further preserved by our proposed perturbation mechanisms with theoretical proof to satisfy differential privacy. Further, to better model the human decision-making process, we propose a novel aggregation mechanism of the rewards obtained from personal discriminators. We theoretically prove that under the reward obtained based on the aggregation mechanism, our proposed model maximizes the lower bound of the discounted total rewards of users. Extensive experiments show that the trajectories generated by our model are able to resemble real-world trajectories in terms of five key statistical metrics, outperforming state-of-the-art algorithms by over 48.03%. Furthermore, we demonstrate that the synthetic trajectories are able to efficiently support practical applications, including mobility prediction and location recommendation.", + "primary_area": "ai for social impact", + "author": "Huandong Wang; Changzheng Gao; Yuchen Wu; Depeng Jin; Lina Yao; Yong Li", + "authorids": "", + "aff": "Beijing National Research Center for Information Science and Technology (BNRist), Department of Electronic Engineering, Tsinghua University, China; Beijing National Research Center for Information Science and Technology (BNRist), Department of Electronic Engineering, Tsinghua University, China; Carnegie Mellon University, USA; Beijing National Research Center for Information Science and Technology (BNRist), Department of Electronic Engineering, Tsinghua University, China; CSIRO\u2019s Data61 and University of New South Wales, USA; Beijing National Research Center for Information Science and Technology (BNRist), Department of Electronic Engineering, Tsinghua University, China", + "bibtex": "@article{Wang_Gao_Wu_Jin_Yao_Li_2023, title={PateGail: A Privacy-Preserving Mobility Trajectory Generator with Imitation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26700}, DOI={10.1609/aaai.v37i12.26700}, abstractNote={Generating human mobility trajectories is of great importance to solve the lack of large-scale trajectory data in numerous applications, which is caused by privacy concerns. However, existing mobility trajectory generation methods still require real-world human trajectories centrally collected as the training data, where there exists an inescapable risk of privacy leakage. To overcome this limitation, in this paper, we propose PateGail, a privacy-preserving imitation learning model to generate mobility trajectories, which utilizes the powerful generative adversary imitation learning model to simulate the decision-making process of humans. Further, in order to protect user privacy, we train this model collectively based on decentralized mobility data stored in user devices, where personal discriminators are trained locally to distinguish and reward the real and generated human trajectories. In the training process, only the generated trajectories and their rewards obtained based on personal discriminators are shared between the server and devices, whose privacy is further preserved by our proposed perturbation mechanisms with theoretical proof to satisfy differential privacy. Further, to better model the human decision-making process, we propose a novel aggregation mechanism of the rewards obtained from personal discriminators. We theoretically prove that under the reward obtained based on the aggregation mechanism, our proposed model maximizes the lower bound of the discounted total rewards of users. Extensive experiments show that the trajectories generated by our model are able to resemble real-world trajectories in terms of five key statistical metrics, outperforming state-of-the-art algorithms by over 48.03%. Furthermore, we demonstrate that the synthetic trajectories are able to efficiently support practical applications, including mobility prediction and location recommendation.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Huandong and Gao, Changzheng and Wu, Yuchen and Jin, Depeng and Yao, Lina and Li, Yong}, year={2023}, month={Jun.}, pages={14539-14547} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26700/26472", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26700", + "pdf_size": 499151, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17107136291368167635&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn; ; ; ; ", + "email": "tsinghua.edu.cn;tsinghua.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;0", + "aff_unique_norm": "Tsinghua University;Carnegie Mellon University;CSIRO\u2019s Data61", + "aff_unique_dep": "Department of Electronic Engineering;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.cmu.edu;https://www.csiro.au/en/Research/Data61", + "aff_unique_abbr": "Tsinghua;CMU;Data61", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;1;0;2;0", + "aff_country_unique": "China;United States;Australia" + }, + { + "id": "article-25130", + "title": "PeCo: Perceptual Codebook for BERT Pre-training of Vision Transformers", + "track": "main", + "status": "Technical", + "abstract": "This paper explores a better prediction target for BERT pre-training of vision transformers. We observe that current prediction targets disagree with human perception judgment. This contradiction motivates us to learn a perceptual prediction target. We argue that perceptually similar images should stay close to each other in the prediction target space. We surprisingly find one simple yet effective idea: enforcing perceptual similarity during the dVAE training. Moreover, we adopt a self-supervised transformer model for deep feature extraction and show that it works well for calculating perceptual similarity. We demonstrate that such learned visual tokens indeed exhibit better semantic meanings, and help pre-training achieve superior transfer performance in various downstream tasks. For example, we achieve 84.5% Top-1 accuracy on ImageNet-1K with ViT-B backbone, outperforming the competitive method BEiT by +1.3% under the same pre-training epochs. Our approach also gets significant improvement on object detection and segmentation on COCO and semantic segmentation on ADE20K. Equipped with a larger backbone ViT-H, we achieve the state-of-the-art ImageNet accuracy (88.3%) among methods using only ImageNet-1K data.", + "primary_area": "computer vision i", + "author": "Xiaoyi Dong; Jianmin Bao; Ting Zhang; Dongdong Chen; Weiming Zhang; Lu Yuan; Dong Chen; Fang Wen; Nenghai Yu; Baining Guo", + "authorids": "", + "aff": "University of Science and Technology of China; Microsoft Research Asia; Microsoft Research Asia; Microsoft Cloud + AI; University of Science and Technology of China; Microsoft Cloud + AI; Microsoft Research Asia; Microsoft Research Asia; University of Science and Technology of China; Microsoft Research Asia", + "bibtex": "@article{Dong_Bao_Zhang_Chen_Zhang_Yuan_Chen_Wen_Yu_Guo_2023, title={PeCo: Perceptual Codebook for BERT Pre-training of Vision Transformers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25130}, DOI={10.1609/aaai.v37i1.25130}, abstractNote={This paper explores a better prediction target for BERT pre-training of vision transformers. We observe that current prediction targets disagree with human perception judgment. This contradiction motivates us to learn a perceptual prediction target. We argue that perceptually similar images should stay close to each other in the prediction target space. We surprisingly find one simple yet effective idea: enforcing perceptual similarity during the dVAE training. Moreover, we adopt a self-supervised transformer model for deep feature extraction and show that it works well for calculating perceptual similarity. We demonstrate that such learned visual tokens indeed exhibit better semantic meanings, and help pre-training achieve superior transfer performance in various downstream tasks. For example, we achieve 84.5% Top-1 accuracy on ImageNet-1K with ViT-B backbone, outperforming the competitive method BEiT by +1.3% under the same pre-training epochs. Our approach also gets significant improvement on object detection and segmentation on COCO and semantic segmentation on ADE20K. Equipped with a larger backbone ViT-H, we achieve the state-of-the-art ImageNet accuracy (88.3%) among methods using only ImageNet-1K data.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Xiaoyi and Bao, Jianmin and Zhang, Ting and Chen, Dongdong and Zhang, Weiming and Yuan, Lu and Chen, Dong and Wen, Fang and Yu, Nenghai and Guo, Baining}, year={2023}, month={Jun.}, pages={552-560} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25130/24902", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25130", + "pdf_size": 659891, + "gs_citation": 273, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11984225630922461341&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.ustc.edu.cn;microsoft.com;microsoft.com;gmail.com;ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", + "email": "mail.ustc.edu.cn;microsoft.com;microsoft.com;gmail.com;ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;1;2+3;0;2+3;1;1;0;1", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research;Microsoft Corporation;AI", + "aff_unique_dep": ";Research;Cloud Computing;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.microsoft.com/en-us/cloud;", + "aff_unique_abbr": "USTC;MSR Asia;Microsoft;", + "aff_campus_unique_index": "1;1;;;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;1;0;1;0;0;0;0", + "aff_country_unique": "China;United States;" + }, + { + "id": "article-26008", + "title": "Peeling the Onion: Hierarchical Reduction of Data Redundancy for Efficient Vision Transformer Training", + "track": "main", + "status": "Technical", + "abstract": "Vision transformers (ViTs) have recently obtained success in many applications, but their intensive computation and heavy memory usage at both training and inference time limit their generalization. Previous compression algorithms usually start from the pre-trained dense models and only focus on efficient inference, while time-consuming training is still unavoidable. In contrast, this paper points out that the million-scale training data is redundant, which is the fundamental reason for the tedious training. To address the issue, this paper aims to introduce sparsity into data and proposes an end-to-end efficient training framework from three sparse perspectives, dubbed Tri-Level E-ViT. Specifically, we leverage a hierarchical data redundancy reduction scheme, by exploring the sparsity under three levels: number of training examples in the dataset, number of patches (tokens) in each example, and number of connections between tokens that lie in attention weights. With extensive experiments, we demonstrate that our proposed technique can noticeably accelerate training for various ViT architectures while maintaining accuracy. Remarkably, under certain ratios, we are able to improve the ViT accuracy rather than compromising it. For example, we can achieve 15.2% speedup with 72.6% (+0.4) Top-1 accuracy on Deit-T, and 15.7% speedup with 79.9% (+0.1) Top-1 accuracy on Deit-S. This proves the existence of data redundancy in ViT. Our code\u2028is released at https://github.com/ZLKong/Tri-Level-ViT", + "primary_area": "machine learning ii", + "author": "Zhenglun Kong; Haoyu Ma; Geng Yuan; Mengshu Sun; Yanyue Xie; Peiyan Dong; Xin Meng; Xuan Shen; Hao Tang; Minghai Qin; Tianlong Chen; Xiaolong Ma; Xiaohui Xie; Zhangyang Wang; Yanzhi Wang", + "authorids": "", + "aff": "Northeastern University; University of California, Irvine; Northeastern University; Northeastern University; Northeastern University; Northeastern University; Peking university; Northeastern University; CVL, ETH Zurich; Western Digital Research; Unversity of Texas at Austin; Clemson University; University of California, Irvine; Unversity of Texas at Austin; Northeastern University", + "bibtex": "@article{Kong_Ma_Yuan_Sun_Xie_Dong_Meng_Shen_Tang_Qin_Chen_Ma_Xie_Wang_Wang_2023, title={Peeling the Onion: Hierarchical Reduction of Data Redundancy for Efficient Vision Transformer Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26008}, DOI={10.1609/aaai.v37i7.26008}, abstractNote={Vision transformers (ViTs) have recently obtained success in many applications, but their intensive computation and heavy memory usage at both training and inference time limit their generalization. Previous compression algorithms usually start from the pre-trained dense models and only focus on efficient inference, while time-consuming training is still unavoidable. In contrast, this paper points out that the million-scale training data is redundant, which is the fundamental reason for the tedious training. To address the issue, this paper aims to introduce sparsity into data and proposes an end-to-end efficient training framework from three sparse perspectives, dubbed Tri-Level E-ViT. Specifically, we leverage a hierarchical data redundancy reduction scheme, by exploring the sparsity under three levels: number of training examples in the dataset, number of patches (tokens) in each example, and number of connections between tokens that lie in attention weights. With extensive experiments, we demonstrate that our proposed technique can noticeably accelerate training for various ViT architectures while maintaining accuracy. Remarkably, under certain ratios, we are able to improve the ViT accuracy rather than compromising it. For example, we can achieve 15.2% speedup with 72.6% (+0.4) Top-1 accuracy on Deit-T, and 15.7% speedup with 79.9% (+0.1) Top-1 accuracy on Deit-S. This proves the existence of data redundancy in ViT. Our code\u2028is released at https://github.com/ZLKong/Tri-Level-ViT}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kong, Zhenglun and Ma, Haoyu and Yuan, Geng and Sun, Mengshu and Xie, Yanyue and Dong, Peiyan and Meng, Xin and Shen, Xuan and Tang, Hao and Qin, Minghai and Chen, Tianlong and Ma, Xiaolong and Xie, Xiaohui and Wang, Zhangyang and Wang, Yanzhi}, year={2023}, month={Jun.}, pages={8360-8368} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26008/25780", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26008", + "pdf_size": 3191896, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10732222633899521799&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "northeastern.edu;uci.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;pku.edu.cn;northeastern.edu;vision.ee.ethz.ch;gmail.com;utexas.edu;clemson.edu;uci.edu;utexas.edu;northeastern.edu", + "email": "northeastern.edu;uci.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;pku.edu.cn;northeastern.edu;vision.ee.ethz.ch;gmail.com;utexas.edu;clemson.edu;uci.edu;utexas.edu;northeastern.edu", + "github": "https://github.com/ZLKong/Tri-Level-ViT", + "project": "", + "author_num": 15, + "aff_unique_index": "0;1;0;0;0;0;2;0;3;4;5;6;1;5;0", + "aff_unique_norm": "Northeastern University;University of California, Irvine;Peking University;ETH Zurich;Western Digital Corporation;University of Texas at Austin;Clemson University", + "aff_unique_dep": ";;;Computer Vision Laboratory;Research;;", + "aff_unique_url": "https://www.northeastern.edu;https://www.uci.edu;http://www.pku.edu.cn;https://www.ethz.ch;https://www.westerndigital.com;https://www.utexas.edu;https://www.clemson.edu", + "aff_unique_abbr": "NEU;UCI;Peking U;ETHZ;WDC;UT Austin;Clemson", + "aff_campus_unique_index": "1;2;1;2", + "aff_campus_unique": ";Irvine;Austin", + "aff_country_unique_index": "0;0;0;0;0;0;1;0;2;0;0;0;0;0;0", + "aff_country_unique": "United States;China;Switzerland" + }, + { + "id": "article-26712", + "title": "People Taking Photos That Faces Never Share: Privacy Protection and Fairness Enhancement from Camera to User", + "track": "aaai special track", + "status": "Technical", + "abstract": "The soaring number of personal mobile devices and public cameras poses a threat to fundamental human rights and ethical principles. For example, the stolen of private information such as face image by malicious third parties will lead to catastrophic consequences. By manipulating appearance of face in the image, most of existing protection algorithms are effective but irreversible. Here, we propose a practical and systematic solution to invertiblely protect face information in the full-process pipeline from camera to final users. Specifically, We design a novel lightweight Flow-based Face Encryption Method (FFEM) on the local embedded system privately connected to the camera, minimizing the risk of eavesdropping during data transmission. FFEM uses a flow-based face encoder to encode each face to a Gaussian distribution and encrypts the encoded face feature by random rotating the Gaussian distribution with the rotation matrix is as the password. While encrypted latent-variable face images are sent to users through public but less reliable channels, password will be protected through more secure channels through technologies such as asymmetric encryption, blockchain, or other sophisticated security schemes. User could select to decode an image with fake faces from the encrypted image on the public channel. Only trusted users are able to recover the original face using the encrypted matrix transmitted in secure channel. More interestingly, by tuning Gaussian ball in latent space, we could control the fairness of the replaced face on attributes such as gender and race. Extensive experiments demonstrate that our solution could protect privacy and enhance fairness with minimal effect on high-level downstream task.", + "primary_area": "ai for social impact", + "author": "Junjie Zhu; Lin Gu; Xiaoxiao Wu; Zheng Li; Tatsuya Harada; Yingying Zhu", + "authorids": "", + "aff": "Shenzhen University; RIKEN, The University of Tokyo; Shenzhen University; Stockton University; The University of Tokyo, RIKEN; University of Texas, Arlington", + "bibtex": "@article{Zhu_Gu_Wu_Li_Harada_Zhu_2023, title={People Taking Photos That Faces Never Share: Privacy Protection and Fairness Enhancement from Camera to User}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26712}, DOI={10.1609/aaai.v37i12.26712}, abstractNote={The soaring number of personal mobile devices and public cameras poses a threat to fundamental human rights and ethical principles. For example, the stolen of private information such as face image by malicious third parties will lead to catastrophic consequences. By manipulating appearance of face in the image, most of existing protection algorithms are effective but irreversible. Here, we propose a practical and systematic solution to invertiblely protect face information in the full-process pipeline from camera to final users. Specifically, We design a novel lightweight Flow-based Face Encryption Method (FFEM) on the local embedded system privately connected to the camera, minimizing the risk of eavesdropping during data transmission. FFEM uses a flow-based face encoder to encode each face to a Gaussian distribution and encrypts the encoded face feature by random rotating the Gaussian distribution with the rotation matrix is as the password. While encrypted latent-variable face images are sent to users through public but less reliable channels, password will be protected through more secure channels through technologies such as asymmetric encryption, blockchain, or other sophisticated security schemes. User could select to decode an image with fake faces from the encrypted image on the public channel. Only trusted users are able to recover the original face using the encrypted matrix transmitted in secure channel. More interestingly, by tuning Gaussian ball in latent space, we could control the fairness of the replaced face on attributes such as gender and race. Extensive experiments demonstrate that our solution could protect privacy and enhance fairness with minimal effect on high-level downstream task.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Junjie and Gu, Lin and Wu, Xiaoxiao and Li, Zheng and Harada, Tatsuya and Zhu, Yingying}, year={2023}, month={Jun.}, pages={14646-14654} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26712/26484", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26712", + "pdf_size": 3611048, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5963752712719500332&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "email.szu.edu.cn; ;szu.edu.cn; ; ;uta.edu", + "email": "email.szu.edu.cn; ;szu.edu.cn; ; ;uta.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2;1;3", + "aff_unique_norm": "Shenzhen University;The University of Tokyo;Stockton University;University of Texas at Arlington", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.szu.edu.cn;https://www.u-tokyo.ac.jp;https://www.stockton.edu;https://www.uta.edu", + "aff_unique_abbr": "SZU;UTokyo;Stockton;UTA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Arlington", + "aff_country_unique_index": "0;1;0;2;1;2", + "aff_country_unique": "China;Japan;United States" + }, + { + "id": "article-26802", + "title": "Perception for General-purpose Robot Manipulation", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "To autonomously perform tasks, a robot should continually perceive the state of its environment, reason with the task at hand, plan and execute appropriate actions. In this pipeline, perception is largely unsolved and one of the more challenging problems. Common indoor environments typically pose two main problems: 1) inherent occlusions leading to unreliable observations of objects, and 2) the presence and involvement of a wide range of objects with varying physical and visual attributes (i.e., rigid, articulated, deformable, granular, transparent, etc.). Thus, we need algorithms that can accommodate perceptual uncertainty in the state estimation and generalize to a wide range of objects. Probabilistic inference methods have been highly suitable for modeling perceptual uncertainty, and data-driven approaches using deep learning techniques have shown promising advancements toward generalization. Perception for manipulation is a more intricate setting requiring the best from both worlds. My research aims to develop robot perception algorithms that can generalize over objects and tasks while accommodating perceptual uncertainty to support robust task execution in the real world. In this presentation, I will briefly highlight my research in these two research threads.", + "primary_area": "", + "author": "Karthik Desingh", + "authorids": "", + "aff": "Department of Computer Science & Engineering, Minnesota Robotics Institute (MnRI), University of Minnesota - Twin Cities", + "bibtex": "@article{Desingh_2024, title={Perception for General-purpose Robot Manipulation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26802}, DOI={10.1609/aaai.v37i13.26802}, abstractNote={To autonomously perform tasks, a robot should continually perceive the state of its environment, reason with the task at hand, plan and execute appropriate actions. In this pipeline, perception is largely unsolved and one of the more challenging problems. Common indoor environments typically pose two main problems: 1) inherent occlusions leading to unreliable observations of objects, and 2) the presence and involvement of a wide range of objects with varying physical and visual attributes (i.e., rigid, articulated, deformable, granular, transparent, etc.). Thus, we need algorithms that can accommodate perceptual uncertainty in the state estimation and generalize to a wide range of objects. Probabilistic inference methods have been highly suitable for modeling perceptual uncertainty, and data-driven approaches using deep learning techniques have shown promising advancements toward generalization. Perception for manipulation is a more intricate setting requiring the best from both worlds. My research aims to develop robot perception algorithms that can generalize over objects and tasks while accommodating perceptual uncertainty to support robust task execution in the real world. In this presentation, I will briefly highlight my research in these two research threads.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Desingh, Karthik}, year={2024}, month={Jul.}, pages={15435-15435} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26802/26574", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26802", + "pdf_size": 385165, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:_sdCnPl-WDMJ:scholar.google.com/&scioq=Perception+for+General-purpose+Robot+Manipulation&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "umn.edu", + "email": "umn.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Minnesota - Twin Cities", + "aff_unique_dep": "Department of Computer Science & Engineering", + "aff_unique_url": "https://www.umn.edu", + "aff_unique_abbr": "UMN", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Twin Cities", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26960", + "title": "Performance Disparities between Accents in Automatic Speech Recognition (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this work, we expand the discussion of bias in Automatic Speech Recognition (ASR) through a large-scale audit. Using a large and global data set of speech, we perform an audit of some of the most popular English ASR services. We show that, even when controlling for multiple linguistic covariates, ASR service performance has a statistically significant relationship to the political alignment of the speaker's birth country with respect to the United States' geopolitical power.", + "primary_area": "", + "author": "Alex DiChristofano; Henry Shuster; Shefali Chandra; Neal Patwari", + "authorids": "", + "aff": "Division of Computational & Data Sciences, Washington University in St. Louis; Department of Computer Science & Engineering, Washington University in St. Louis; Department of Women, Gender, and Sexuality Studies, Washington University in St. Louis+Department of History, Washington University in St. Louis; Department of Computational & Data Sciences, Washington University in St. Louis+Department of Computer Science & Engineering, Washington University in St. Louis+Department of Electrical & Systems Engineering, Washington University in St. Louis", + "bibtex": "@article{DiChristofano_Shuster_Chandra_Patwari_2024, title={Performance Disparities between Accents in Automatic Speech Recognition (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26960}, DOI={10.1609/aaai.v37i13.26960}, abstractNote={In this work, we expand the discussion of bias in Automatic Speech Recognition (ASR) through a large-scale audit. Using a large and global data set of speech, we perform an audit of some of the most popular English ASR services. We show that, even when controlling for multiple linguistic covariates, ASR service performance has a statistically significant relationship to the political alignment of the speaker\u2019s birth country with respect to the United States\u2019 geopolitical power.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={DiChristofano, Alex and Shuster, Henry and Chandra, Shefali and Patwari, Neal}, year={2024}, month={Jul.}, pages={16200-16201} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26960/26732", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26960", + "pdf_size": 881119, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15580956419106759572&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "wustl.edu;wustl.edu;wustl.edu;wustl.edu", + "email": "wustl.edu;wustl.edu;wustl.edu;wustl.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+0;0+0+0", + "aff_unique_norm": "Washington University in St. Louis", + "aff_unique_dep": "Division of Computational & Data Sciences", + "aff_unique_url": "https://wustl.edu", + "aff_unique_abbr": "WUSTL", + "aff_campus_unique_index": "0;0;0+0;0+0+0", + "aff_campus_unique": "St. Louis", + "aff_country_unique_index": "0;0;0+0;0+0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25762", + "title": "Periodic Multi-Agent Path Planning", + "track": "main", + "status": "Technical", + "abstract": "Multi-agent path planning (MAPP) is the problem of planning collision-free trajectories from start to goal locations for a team of agents. This work explores a relatively unexplored setting of MAPP where streams of agents have to go through the starts and goals with high throughput. We tackle this problem by formulating a new variant of MAPP called periodic MAPP in which the timing of agent appearances is periodic. The objective with periodic MAPP is to find a periodic plan, a set of collision-free trajectories that the agent streams can use repeatedly over periods, with periods that are as small as possible. To meet this objective, we propose a solution method that is based on constraint relaxation and optimization. We show that the periodic plans once found can be used for a more practical case in which agents in a stream can appear at random times. We confirm the effectiveness of our method compared with baseline methods in terms of throughput in several scenarios that abstract autonomous intersection management tasks.", + "primary_area": "intelligent robotics", + "author": "Kazumi Kasaura; Ryo Yonetani; Mai Nishimura", + "authorids": "", + "aff": "OMRON SINIC X Corporation Hongo 5-24-5, Bunkyo-ku, Tokyo, Japan; OMRON SINIC X Corporation Hongo 5-24-5, Bunkyo-ku, Tokyo, Japan; OMRON SINIC X Corporation Hongo 5-24-5, Bunkyo-ku, Tokyo, Japan", + "bibtex": "@article{Kasaura_Yonetani_Nishimura_2023, title={Periodic Multi-Agent Path Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25762}, DOI={10.1609/aaai.v37i5.25762}, abstractNote={Multi-agent path planning (MAPP) is the problem of planning collision-free trajectories from start to goal locations for a team of agents. This work explores a relatively unexplored setting of MAPP where streams of agents have to go through the starts and goals with high throughput. We tackle this problem by formulating a new variant of MAPP called periodic MAPP in which the timing of agent appearances is periodic. The objective with periodic MAPP is to find a periodic plan, a set of collision-free trajectories that the agent streams can use repeatedly over periods, with periods that are as small as possible. To meet this objective, we propose a solution method that is based on constraint relaxation and optimization. We show that the periodic plans once found can be used for a more practical case in which agents in a stream can appear at random times. We confirm the effectiveness of our method compared with baseline methods in terms of throughput in several scenarios that abstract autonomous intersection management tasks.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kasaura, Kazumi and Yonetani, Ryo and Nishimura, Mai}, year={2023}, month={Jun.}, pages={6183-6191} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25762/25534", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25762", + "pdf_size": 380787, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10590580550650780170&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sinicx.com;sinicx.com;sinicx.com", + "email": "sinicx.com;sinicx.com;sinicx.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "OMRON SINIC X Corporation", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-27026", + "title": "Persistent Homology through Image Segmentation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The efficacy of topological data analysis (TDA) has been demonstrated in many different machine learning pipelines, particularly those in which structural characteristics of data are highly relevant. However, TDA's usability in large scale machine learning applications is hindered by the significant computational cost of generating persistence diagrams. In this work, a method that allows this computationally expensive process to be approximated by deep neural networks is proposed. Moreover, the method's practicality in estimating 0-dimensional persistence diagrams across a diverse range of images is shown.", + "primary_area": "", + "author": "Joshua Slater; Thomas Weighill", + "authorids": "", + "aff": "University of North Carolina at Greensboro; University of North Carolina at Greensboro", + "bibtex": "@article{Slater_Weighill_2024, title={Persistent Homology through Image Segmentation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27026}, DOI={10.1609/aaai.v37i13.27026}, abstractNote={The efficacy of topological data analysis (TDA) has been demonstrated in many different machine learning pipelines, particularly those in which structural characteristics of data are highly relevant. However, TDA\u2019s usability in large scale machine learning applications is hindered by the significant computational cost of generating persistence diagrams. In this work, a method that allows this computationally expensive process to be approximated by deep neural networks is proposed. Moreover, the method\u2019s practicality in estimating 0-dimensional persistence diagrams across a diverse range of images is shown.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Slater, Joshua and Weighill, Thomas}, year={2024}, month={Jul.}, pages={16332-16333} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27026/26798", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27026", + "pdf_size": 121743, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:feOMXZE1fXIJ:scholar.google.com/&scioq=Persistent+Homology+through+Image+Segmentation+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "uncg.edu;uncg.edu", + "email": "uncg.edu;uncg.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of North Carolina at Greensboro", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uncg.edu", + "aff_unique_abbr": "UNCG", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Greensboro", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26518", + "title": "Personalized Dialogue Generation with Persona-Adaptive Attention", + "track": "main", + "status": "Technical", + "abstract": "Persona-based dialogue systems aim to generate consistent responses based on historical context and predefined persona. Unlike conventional dialogue generation, the persona-based dialogue needs to consider both dialogue context and persona, posing a challenge for coherent training. Specifically, this requires a delicate weight balance between context and persona. To achieve that, in this paper, we propose an effective framework with Persona-Adaptive Attention (PAA), which adaptively integrates the weights from the persona and context information via our designed attention. In addition, a dynamic masking mechanism is applied to the PAA to not only drop redundant information in context and persona but also serve as a regularization mechanism to avoid overfitting. Experimental results demonstrate the superiority of the proposed PAA framework compared to the strong baselines in both automatic and human evaluation. Moreover, the proposed PAA approach can perform equivalently well in a low-resource regime compared to models trained in a full-data setting, which achieve a similar result with only 20% to 30% of data compared to the larger models trained in the full-data setting. To fully exploit the effectiveness of our design, we designed several variants for handling the weighted information in different ways, showing the necessity and sufficiency of our weighting and masking designs.", + "primary_area": "speech natural language processing", + "author": "Qiushi Huang; Yu Zhang; Tom Ko; Xubo Liu; Bo Wu; Wenwu Wang; H Tang", + "authorids": "", + "aff": "University of Surrey + Southern University of Science and Technology; Southern University of Science and Technology; ByteDance AI Lab; University of Surrey; MIT-IBM Watson AI Lab; University of Surrey; University of Surrey + Southern University of Science and Technology", + "bibtex": "@article{Huang_Zhang_Ko_Liu_Wu_Wang_Tang_2023, title={Personalized Dialogue Generation with Persona-Adaptive Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26518}, DOI={10.1609/aaai.v37i11.26518}, abstractNote={Persona-based dialogue systems aim to generate consistent responses based on historical context and predefined persona. Unlike conventional dialogue generation, the persona-based dialogue needs to consider both dialogue context and persona, posing a challenge for coherent training. Specifically, this requires a delicate weight balance between context and persona. To achieve that, in this paper, we propose an effective framework with Persona-Adaptive Attention (PAA), which adaptively integrates the weights from the persona and context information via our designed attention. In addition, a dynamic masking mechanism is applied to the PAA to not only drop redundant information in context and persona but also serve as a regularization mechanism to avoid overfitting. Experimental results demonstrate the superiority of the proposed PAA framework compared to the strong baselines in both automatic and human evaluation. Moreover, the proposed PAA approach can perform equivalently well in a low-resource regime compared to models trained in a full-data setting, which achieve a similar result with only 20% to 30% of data compared to the larger models trained in the full-data setting. To fully exploit the effectiveness of our design, we designed several variants for handling the weighted information in different ways, showing the necessity and sufficiency of our weighting and masking designs.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Qiushi and Zhang, Yu and Ko, Tom and Liu, Xubo and Wu, Bo and Wang, Wenwu and Tang, H}, year={2023}, month={Jun.}, pages={12916-12923} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26518/26290", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26518", + "pdf_size": 255111, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14196358981225571069&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "surrey.ac.uk;gmail.com;gmail.com;surrey.ac.uk;ibm.com;surrey.ac.uk;surrey.ac.uk", + "email": "surrey.ac.uk;gmail.com;gmail.com;surrey.ac.uk;ibm.com;surrey.ac.uk;surrey.ac.uk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;2;0;3;0;0+1", + "aff_unique_norm": "University of Surrey;Southern University of Science and Technology;ByteDance;Massachusetts Institute of Technology", + "aff_unique_dep": ";;AI Lab;MIT-IBM Watson AI Lab", + "aff_unique_url": "https://www.surrey.ac.uk;https://www.sustech.edu.cn;https://www.bytedance.com;https://www.mitibmwatsonailab.org", + "aff_unique_abbr": "Surrey;SUSTech;ByteDance;MIT-IBM AI Lab", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1;0;2;0;0+1", + "aff_country_unique": "United Kingdom;China;United States" + }, + { + "id": "article-25076", + "title": "Persuasion Strategies in Advertisements", + "track": "main", + "status": "Technical", + "abstract": "Modeling what makes an advertisement persuasive, i.e., eliciting the desired response from consumer, is critical to the\nstudy of propaganda, social psychology, and marketing. Despite its importance, computational modeling of persuasion\nin computer vision is still in its infancy, primarily due to\nthe lack of benchmark datasets that can provide persuasion-strategy labels associated with ads. Motivated by persuasion literature in social psychology and marketing, we introduce an extensive vocabulary of persuasion strategies and\nbuild the first ad image corpus annotated with persuasion\nstrategies. We then formulate the task of persuasion strategy prediction with multi-modal learning, where we design\na multi-task attention fusion model that can leverage other\nad-understanding tasks to predict persuasion strategies. The\ndataset also provides image segmentation masks, which labels persuasion strategies in the corresponding ad images on\nthe test split. We publicly release our code and dataset at https://midas-research.github.io/persuasion-advertisements/.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Yaman Kumar; Rajat Jha; Arunim Gupta; Milan Aggarwal; Aditya Garg; Tushar Malyan; Ayush Bhardwaj; Rajiv Ratn Shah; Balaji Krishnamurthy; Changyou Chen", + "authorids": "", + "aff": "IIIT-Delhi; IIIT-Delhi; IIIT-Delhi; Adobe Media and Data Science Research (MDSR); IIIT-Delhi; IIIT-Delhi; IIIT-Delhi; IIIT-Delhi; Adobe Media and Data Science Research (MDSR); University at Buffalo", + "bibtex": "@article{Kumar_Jha_Gupta_Aggarwal_Garg_Malyan_Bhardwaj_Ratn Shah_Krishnamurthy_Chen_2023, title={Persuasion Strategies in Advertisements}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25076}, DOI={10.1609/aaai.v37i1.25076}, abstractNote={Modeling what makes an advertisement persuasive, i.e., eliciting the desired response from consumer, is critical to the\nstudy of propaganda, social psychology, and marketing. Despite its importance, computational modeling of persuasion\nin computer vision is still in its infancy, primarily due to\nthe lack of benchmark datasets that can provide persuasion-strategy labels associated with ads. Motivated by persuasion literature in social psychology and marketing, we introduce an extensive vocabulary of persuasion strategies and\nbuild the first ad image corpus annotated with persuasion\nstrategies. We then formulate the task of persuasion strategy prediction with multi-modal learning, where we design\na multi-task attention fusion model that can leverage other\nad-understanding tasks to predict persuasion strategies. The\ndataset also provides image segmentation masks, which labels persuasion strategies in the corresponding ad images on\nthe test split. We publicly release our code and dataset at https://midas-research.github.io/persuasion-advertisements/.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Yaman and Jha, Rajat and Gupta, Arunim and Aggarwal, Milan and Garg, Aditya and Malyan, Tushar and Bhardwaj, Ayush and Ratn Shah, Rajiv and Krishnamurthy, Balaji and Chen, Changyou}, year={2023}, month={Jun.}, pages={57-66} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25076/24848", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25076", + "pdf_size": 7982489, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1724422076151237323&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "iiitd.ac.in; ; ; ; ; ; ; ; ;adobe.com", + "email": "iiitd.ac.in; ; ; ; ; ; ; ; ;adobe.com", + "github": "https://midas-research.github.io/persuasion-advertisements/", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;1;0;0;0;0;1;2", + "aff_unique_norm": "Indraprastha Institute of Information Technology, Delhi;Adobe;University at Buffalo", + "aff_unique_dep": ";Media and Data Science Research;", + "aff_unique_url": "https://www.iiitdelhi.ac.in;https://www.adobe.com;https://www.buffalo.edu", + "aff_unique_abbr": "IIIT-D;Adobe;UB", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Delhi;", + "aff_country_unique_index": "0;0;0;1;0;0;0;0;1;1", + "aff_country_unique": "India;United States" + }, + { + "id": "article-26855", + "title": "Phase-Informed Bayesian Ensemble Models Improve Performance of COVID-19 Forecasts", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Despite hundreds of methods published in the literature, forecasting epidemic dynamics remains challenging yet important. The challenges stem from multiple sources, including: the need for timely data, co-evolution of epidemic dynamics with behavioral and immunological adaptations, and the evolution of new pathogen strains. The ongoing COVID-19 pandemic highlighted these challenges; in an important article, Reich et al. did a comprehensive analysis highlighting many of these challenges.\n\nIn this paper, we take another step in critically evaluating existing epidemic forecasting methods. Our methods are based on a simple yet crucial observation - epidemic dynamics go through a number of phases (waves). Armed with this understanding, we propose a modification to our deployed Bayesian ensembling case time series forecasting framework. We show that ensembling methods employing the phase information and using different weighting schemes for each phase can produce improved forecasts. We evaluate our proposed method with both the currently deployed model and the COVID-19 forecasthub models. The overall performance of the proposed model is consistent across the pandemic but more importantly, it is ranked third and first during two critical rapid growth phases in cases, regimes where the performance of most models from the CDC forecasting hub dropped significantly.", + "primary_area": "emerging applications of ai", + "author": "Aniruddha Adiga; Gursharn Kaur; Lijing Wang; Benjamin Hurt; Przemyslaw Porebski; Srinivasan Venkatramanan; Bryan Lewis; Madhav V. Marathe", + "authorids": "", + "aff": "Biocomplexity Institute, University of Virginia; Biocomplexity Institute, University of Virginia; Boston Children\u2019s Hospital and Harvard Medical School; Biocomplexity Institute, University of Virginia; Biocomplexity Institute, University of Virginia; Biocomplexity Institute, University of Virginia; Biocomplexity Institute, University of Virginia; Biocomplexity Institute, University of Virginia + Dept. of Computer Science, University of Virginia", + "bibtex": "@article{Adiga_Kaur_Wang_Hurt_Porebski_Venkatramanan_Lewis_Marathe_2024, title={Phase-Informed Bayesian Ensemble Models Improve Performance of COVID-19 Forecasts}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26855}, DOI={10.1609/aaai.v37i13.26855}, abstractNote={Despite hundreds of methods published in the literature, forecasting epidemic dynamics remains challenging yet important. The challenges stem from multiple sources, including: the need for timely data, co-evolution of epidemic dynamics with behavioral and immunological adaptations, and the evolution of new pathogen strains. The ongoing COVID-19 pandemic highlighted these challenges; in an important article, Reich et al. did a comprehensive analysis highlighting many of these challenges. In this paper, we take another step in critically evaluating existing epidemic forecasting methods. Our methods are based on a simple yet crucial observation - epidemic dynamics go through a number of phases (waves). Armed with this understanding, we propose a modification to our deployed Bayesian ensembling case time series forecasting framework. We show that ensembling methods employing the phase information and using different weighting schemes for each phase can produce improved forecasts. We evaluate our proposed method with both the currently deployed model and the COVID-19 forecasthub models. The overall performance of the proposed model is consistent across the pandemic but more importantly, it is ranked third and first during two critical rapid growth phases in cases, regimes where the performance of most models from the CDC forecasting hub dropped significantly.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Adiga, Aniruddha and Kaur, Gursharn and Wang, Lijing and Hurt, Benjamin and Porebski, Przemyslaw and Venkatramanan, Srinivasan and Lewis, Bryan and Marathe, Madhav V.}, year={2024}, month={Jul.}, pages={15647-15653} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26855/26627", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26855", + "pdf_size": 528473, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14762374594781614360&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "virginia.edu;virginia.edu;virginia.edu; ; ; ; ; ", + "email": "virginia.edu;virginia.edu;virginia.edu; ; ; ; ; ", + "github": "", + "project": "https://viz.covid19forecasthub.org/", + "author_num": 8, + "aff_unique_index": "0;0;1;0;0;0;0;0+0", + "aff_unique_norm": "University of Virginia;Boston Children's Hospital", + "aff_unique_dep": "Biocomplexity Institute;", + "aff_unique_url": "https://www.virginia.edu;https://www.childrenshospital.org", + "aff_unique_abbr": "UVA;BCH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-27016", + "title": "Photogrammetry and VR for Comparing 2D and Immersive Linguistic Data Collection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The overarching goal of this work is to enable the collection of language describing a wide variety of objects viewed in virtual reality. We aim to create full 3D models from a small number of \u2018keyframe\u2019 images of objects found in the publicly available Grounded Language Dataset (GoLD) using photogrammetry. We will then collect linguistic descriptions by placing our models in virtual reality and having volunteers describe them. To evaluate the impact of virtual reality immersion on linguistic descriptions of the objects, we intend to apply contrastive learning to perform grounded language learning, then compare the descriptions collected from images (in GoLD) versus our models.", + "primary_area": "", + "author": "Jacob Rubinstein; Cynthia Matuszek; Don Engel", + "authorids": "", + "aff": "University of Maryland, Baltimore County \u2013 Baltimore, MD 21250; University of Maryland, Baltimore County \u2013 Baltimore, MD 21250; University of Maryland, Baltimore County \u2013 Baltimore, MD 21250", + "bibtex": "@article{Rubinstein_Matuszek_Engel_2024, title={Photogrammetry and VR for Comparing 2D and Immersive Linguistic Data Collection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27016}, DOI={10.1609/aaai.v37i13.27016}, abstractNote={The overarching goal of this work is to enable the collection of language describing a wide variety of objects viewed in virtual reality. We aim to create full 3D models from a small number of \u2018keyframe\u2019 images of objects found in the publicly available Grounded Language Dataset (GoLD) using photogrammetry. We will then collect linguistic descriptions by placing our models in virtual reality and having volunteers describe them. To evaluate the impact of virtual reality immersion on linguistic descriptions of the objects, we intend to apply contrastive learning to perform grounded language learning, then compare the descriptions collected from images (in GoLD) versus our models.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rubinstein, Jacob and Matuszek, Cynthia and Engel, Don}, year={2024}, month={Jul.}, pages={16312-16313} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27016/26788", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27016", + "pdf_size": 2864347, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:PcD2a5SbL4YJ:scholar.google.com/&scioq=Photogrammetry+and+VR+for+Comparing+2D+and+Immersive+Linguistic+Data+Collection+(Student+Abstract)&hl=en&as_sdt=0,14", + "gs_version_total": 4, + "aff_domain": "umbc.edu;umbc.edu;umbc.edu", + "email": "umbc.edu;umbc.edu;umbc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Maryland, Baltimore County", + "aff_unique_dep": "", + "aff_unique_url": "https://www.umbc.edu", + "aff_unique_abbr": "UMBC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Baltimore", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25478", + "title": "Phrase-Level Temporal Relationship Mining for Temporal Sentence Localization", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we address the problem of video temporal sentence localization, which aims to localize a target moment from videos according to a given language query. We observe that existing models suffer from a sheer performance drop when dealing with simple phrases contained in the sentence. It reveals the limitation that existing models only capture the annotation bias of the datasets but lack sufficient understanding of the semantic phrases in the query. To address this problem, we propose a phrase-level Temporal Relationship Mining (TRM) framework employing the temporal relationship relevant to the phrase and the whole sentence to have a better understanding of each semantic entity in the sentence. Specifically, we use phrase-level predictions to refine the sentence-level prediction, and use Multiple Instance Learning to improve the quality of phrase-level predictions. We also exploit the consistency and exclusiveness constraints of phrase-level and sentence-level predictions to regularize the training process, thus alleviating the ambiguity of each phrase prediction. The proposed approach sheds light on how machines can understand detailed phrases in a sentence and their compositions in their generality rather than learning the annotation biases. Experiments on the ActivityNet Captions and Charades-STA datasets show the effectiveness of our method on both phrase and sentence temporal localization and enable better model interpretability and generalization when dealing with unseen compositions of seen concepts. Code can be found at https://github.com/minghangz/TRM.", + "primary_area": "computer vision iii", + "author": "Minghang Zheng; Sizhe Li; Qingchao Chen; Yuxin Peng; Yang Liu", + "authorids": "", + "aff": "Wangxuan Institute of Computer Technology, Peking University, Beijing, China; Wangxuan Institute of Computer Technology, Peking University, Beijing, China; National Institute of Health Data Science, Peking University, Beijing, China; Wangxuan Institute of Computer Technology, Peking University, Beijing, China + Peng Cheng Laboratory, Shenzhen, China + National Key Laboratory of General Artificial Intelligence, BIGAI, Beijing, China; Wangxuan Institute of Computer Technology, Peking University, Beijing, China + National Key Laboratory of General Artificial Intelligence, BIGAI, Beijing, China", + "bibtex": "@article{Zheng_Li_Chen_Peng_Liu_2023, title={Phrase-Level Temporal Relationship Mining for Temporal Sentence Localization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25478}, DOI={10.1609/aaai.v37i3.25478}, abstractNote={In this paper, we address the problem of video temporal sentence localization, which aims to localize a target moment from videos according to a given language query. We observe that existing models suffer from a sheer performance drop when dealing with simple phrases contained in the sentence. It reveals the limitation that existing models only capture the annotation bias of the datasets but lack sufficient understanding of the semantic phrases in the query. To address this problem, we propose a phrase-level Temporal Relationship Mining (TRM) framework employing the temporal relationship relevant to the phrase and the whole sentence to have a better understanding of each semantic entity in the sentence. Specifically, we use phrase-level predictions to refine the sentence-level prediction, and use Multiple Instance Learning to improve the quality of phrase-level predictions. We also exploit the consistency and exclusiveness constraints of phrase-level and sentence-level predictions to regularize the training process, thus alleviating the ambiguity of each phrase prediction. The proposed approach sheds light on how machines can understand detailed phrases in a sentence and their compositions in their generality rather than learning the annotation biases. Experiments on the ActivityNet Captions and Charades-STA datasets show the effectiveness of our method on both phrase and sentence temporal localization and enable better model interpretability and generalization when dealing with unseen compositions of seen concepts. Code can be found at https://github.com/minghangz/TRM.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Minghang and Li, Sizhe and Chen, Qingchao and Peng, Yuxin and Liu, Yang}, year={2023}, month={Jun.}, pages={3669-3677} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25478/25250", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25478", + "pdf_size": 1077687, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=233673431558001774&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/minghangz/TRM", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+1+2;0+2", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory;National Key Laboratory of General Artificial Intelligence", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;;BIGAI", + "aff_unique_url": "http://www.pku.edu.cn;;", + "aff_unique_abbr": "PKU;;", + "aff_campus_unique_index": "0;0;0;0+1;0", + "aff_campus_unique": "Beijing;Shenzhen;", + "aff_country_unique_index": "0;0;0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26664", + "title": "Physics Guided Neural Networks for Time-Aware Fairness: An Application in Crop Yield Prediction", + "track": "aaai special track", + "status": "Technical", + "abstract": "This paper proposes a physics-guided neural network model to predict crop yield and maintain the fairness over space. Failures to preserve the spatial fairness in predicted maps of crop yields can result in biased policies and intervention strategies in the distribution of assistance or subsidies in supporting individuals at risk. Existing methods for fairness enforcement are not designed for capturing the complex physical processes that underlie the crop growing process, and thus are unable to produce good predictions over large regions under different weather conditions and soil properties. More importantly, the fairness is often degraded when existing methods are applied to different years due to the change of weather conditions and farming practices. To address these issues, we propose a physics-guided neural network model, which leverages the physical knowledge from existing physics-based models to guide the extraction of representative physical information and discover the temporal data shift across years. In particular, we use a reweighting strategy to discover the relationship between training years and testing years using the physics-aware representation. Then the physics-guided neural network will be refined via a bi-level optimization process based on the reweighted fairness objective. The proposed method has been evaluated using real county-level crop yield data and simulated data produced by a physics-based model. The results demonstrate that this method can significantly improve the predictive performance and preserve the spatial fairness when generalized to different years.", + "primary_area": "ai for social impact", + "author": "Erhu He; Yiqun Xie; Licheng Liu; Weiye Chen; Zhenong Jin; Xiaowei Jia", + "authorids": "", + "aff": "Department of Computer Science, University of Pittsburgh; Department of Geographical Sciences, University of Maryland; Department of Bioproducts and Biosystems Engineering, University of Minnesota; Department of Geographical Sciences, University of Maryland; Department of Bioproducts and Biosystems Engineering, University of Minnesota; Department of Computer Science, University of Pittsburgh", + "bibtex": "@article{He_Xie_Liu_Chen_Jin_Jia_2023, title={Physics Guided Neural Networks for Time-Aware Fairness: An Application in Crop Yield Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26664}, DOI={10.1609/aaai.v37i12.26664}, abstractNote={This paper proposes a physics-guided neural network model to predict crop yield and maintain the fairness over space. Failures to preserve the spatial fairness in predicted maps of crop yields can result in biased policies and intervention strategies in the distribution of assistance or subsidies in supporting individuals at risk. Existing methods for fairness enforcement are not designed for capturing the complex physical processes that underlie the crop growing process, and thus are unable to produce good predictions over large regions under different weather conditions and soil properties. More importantly, the fairness is often degraded when existing methods are applied to different years due to the change of weather conditions and farming practices. To address these issues, we propose a physics-guided neural network model, which leverages the physical knowledge from existing physics-based models to guide the extraction of representative physical information and discover the temporal data shift across years. In particular, we use a reweighting strategy to discover the relationship between training years and testing years using the physics-aware representation. Then the physics-guided neural network will be refined via a bi-level optimization process based on the reweighted fairness objective. The proposed method has been evaluated using real county-level crop yield data and simulated data produced by a physics-based model. The results demonstrate that this method can significantly improve the predictive performance and preserve the spatial fairness when generalized to different years.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Erhu and Xie, Yiqun and Liu, Licheng and Chen, Weiye and Jin, Zhenong and Jia, Xiaowei}, year={2023}, month={Jun.}, pages={14223-14231} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26664/26436", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26664", + "pdf_size": 1034834, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=567340394981051855&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "pitt.edu;umd.edu;umn.edu;umd.edu;umn.edu;pitt.edu", + "email": "pitt.edu;umd.edu;umn.edu;umd.edu;umn.edu;pitt.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;2;0", + "aff_unique_norm": "University of Pittsburgh;University of Maryland;University of Minnesota", + "aff_unique_dep": "Department of Computer Science;Department of Geographical Sciences;Department of Bioproducts and Biosystems Engineering", + "aff_unique_url": "https://www.pitt.edu;https://www/umd.edu;https://www.umn.edu", + "aff_unique_abbr": "Pitt;UMD;UMN", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25825", + "title": "PiCor: Multi-Task Deep Reinforcement Learning with Policy Correction", + "track": "main", + "status": "Technical", + "abstract": "Multi-task deep reinforcement learning (DRL) ambitiously aims to train a general agent that masters multiple tasks simultaneously. However, varying learning speeds of different tasks compounding with negative gradients interference makes policy learning inefficient. In this work, we propose PiCor, an efficient multi-task DRL framework that splits learning into policy optimization and policy correction phases. The policy optimization phase improves the policy by any DRL algothrim on the sampled single task without considering other tasks. The policy correction phase first constructs an adaptive adjusted performance constraint set. Then the intermediate policy learned by the first phase is constrained to the set, which controls the negative interference and balances the learning speeds across tasks. Empirically, we demonstrate that PiCor outperforms previous methods and significantly improves sample efficiency on simulated robotic manipulation and continuous control tasks. We additionally show that adaptive weight adjusting can further improve data efficiency and performance.", + "primary_area": "machine learning i", + "author": "Fengshuo Bai; Hongming Zhang; Tianyang Tao; Zhiheng Wu; Yanna Wang; Bo Xu", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences+Institute of Automation, Chinese Academy of Sciences; University of Alberta; Universit \u00b4e Paris-Saclay; School of Artificial Intelligence, University of Chinese Academy of Sciences+Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences+Nanjing Artificial Intelligence Research of IA; Institute of Automation, Chinese Academy of Sciences+Nanjing Artificial Intelligence Research of IA", + "bibtex": "@article{Bai_Zhang_Tao_Wu_Wang_Xu_2023, title={PiCor: Multi-Task Deep Reinforcement Learning with Policy Correction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25825}, DOI={10.1609/aaai.v37i6.25825}, abstractNote={Multi-task deep reinforcement learning (DRL) ambitiously aims to train a general agent that masters multiple tasks simultaneously. However, varying learning speeds of different tasks compounding with negative gradients interference makes policy learning inefficient. In this work, we propose PiCor, an efficient multi-task DRL framework that splits learning into policy optimization and policy correction phases. The policy optimization phase improves the policy by any DRL algothrim on the sampled single task without considering other tasks. The policy correction phase first constructs an adaptive adjusted performance constraint set. Then the intermediate policy learned by the first phase is constrained to the set, which controls the negative interference and balances the learning speeds across tasks. Empirically, we demonstrate that PiCor outperforms previous methods and significantly improves sample efficiency on simulated robotic manipulation and continuous control tasks. We additionally show that adaptive weight adjusting can further improve data efficiency and performance.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Fengshuo and Zhang, Hongming and Tao, Tianyang and Wu, Zhiheng and Wang, Yanna and Xu, Bo}, year={2023}, month={Jun.}, pages={6728-6736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25825/25597", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25825", + "pdf_size": 1702991, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4860479104381694383&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "ia.ac.cn; ; ;ia.ac.cn; ;ia.ac.cn", + "email": "ia.ac.cn; ; ;ia.ac.cn; ;ia.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;3;0+1;1+4;1+4", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;University of Alberta;Universit\u00e9 Paris-Saclay;Nanjing Artificial Intelligence Research Institute", + "aff_unique_dep": "School of Artificial Intelligence;Institute of Automation;;;Artificial Intelligence Research", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ia.cas.cn;https://www.ualberta.ca;https://www.universite-paris-saclay.fr;", + "aff_unique_abbr": "UCAS;CAS;UAlberta;UPS;", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;2;0+0;0+0;0+0", + "aff_country_unique": "China;Canada;France" + }, + { + "id": "article-25390", + "title": "Pixel Is All You Need: Adversarial Trajectory-Ensemble Active Learning for Salient Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Although weakly-supervised techniques can reduce the labeling effort, it is unclear whether a saliency model trained with weakly-supervised data (e.g., point annotation) can achieve the equivalent performance of its fully-supervised version. This paper attempts to answer this unexplored question by proving a hypothesis: there is a point-labeled dataset where saliency models trained on it can achieve equivalent performance when trained on the densely annotated dataset. To prove this conjecture, we proposed a novel yet effective adversarial trajectory-ensemble active learning (ATAL). Our contributions are three-fold: 1) Our proposed adversarial attack triggering uncertainty can conquer the overconfidence of existing active learning methods and accurately locate these uncertain pixels. 2) Our proposed trajectory-ensemble uncertainty estimation method maintains the advantages of the ensemble networks while significantly reducing the computational cost. 3) Our proposed relationship-aware diversity sampling algorithm can conquer oversampling while boosting performance. Experimental results show that our ATAL can find such a point-labeled dataset, where a saliency model trained on it obtained 97%-99% performance of its fully-supervised version with only 10 annotated points per image.", + "primary_area": "computer vision iii", + "author": "Zhenyu Wu; Lin Wang; Wei Wang; Qing Xia; Chenglizhao Chen; Aimin Hao; Shuo Li", + "authorids": "", + "aff": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University; School of Transportation Science and Engineering, Beihang University; Harbin Institute of Technology; SenseTime Group Limited; China University of Petroleum; Peng Cheng Laboratory; Case Western Reserve University", + "bibtex": "@article{Wu_Wang_Wang_Xia_Chen_Hao_Li_2023, title={Pixel Is All You Need: Adversarial Trajectory-Ensemble Active Learning for Salient Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25390}, DOI={10.1609/aaai.v37i3.25390}, abstractNote={Although weakly-supervised techniques can reduce the labeling effort, it is unclear whether a saliency model trained with weakly-supervised data (e.g., point annotation) can achieve the equivalent performance of its fully-supervised version. This paper attempts to answer this unexplored question by proving a hypothesis: there is a point-labeled dataset where saliency models trained on it can achieve equivalent performance when trained on the densely annotated dataset. To prove this conjecture, we proposed a novel yet effective adversarial trajectory-ensemble active learning (ATAL). Our contributions are three-fold: 1) Our proposed adversarial attack triggering uncertainty can conquer the overconfidence of existing active learning methods and accurately locate these uncertain pixels. 2) Our proposed trajectory-ensemble uncertainty estimation method maintains the advantages of the ensemble networks while significantly reducing the computational cost. 3) Our proposed relationship-aware diversity sampling algorithm can conquer oversampling while boosting performance. Experimental results show that our ATAL can find such a point-labeled dataset, where a saliency model trained on it obtained 97%-99% performance of its fully-supervised version with only 10 annotated points per image.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Zhenyu and Wang, Lin and Wang, Wei and Xia, Qing and Chen, Chenglizhao and Hao, Aimin and Li, Shuo}, year={2023}, month={Jun.}, pages={2883-2891} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25390/25162", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25390", + "pdf_size": 1465392, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8645435969580490008&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "163.com; ; ; ; ; ; ", + "email": "163.com; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;3;4;5", + "aff_unique_norm": "Beihang University;Harbin Institute of Technology;SenseTime Group Limited;China University of Petroleum;Peng Cheng Laboratory;Case Western Reserve University", + "aff_unique_dep": "State Key Laboratory of Virtual Reality Technology and Systems;;;;;", + "aff_unique_url": "http://www.buaa.edu.cn;http://www.hit.edu.cn/;https://www.sensetime.com;http://www.cup.edu.cn;http://www.pcl.ac.cn;https://www.case.edu", + "aff_unique_abbr": "Beihang;HIT;SenseTime;CUP;PCL;CWRU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25202", + "title": "Pixel-Wise Warping for Deep Image Stitching", + "track": "main", + "status": "Technical", + "abstract": "Existing image stitching approaches based on global or local homography estimation are not free from the parallax problem and suffer from undesired artifacts. In this paper, instead of relying on the homography-based warp, we propose a novel deep image stitching framework exploiting the pixel-wise warp field to handle the large-parallax problem. The proposed deep image stitching framework consists of a Pixel-wise Warping Module (PWM) and a Stitched Image Generating Module (SIGMo). For PWM, we obtain pixel-wise warp in a similar manner as estimating an optical flow (OF).\nIn the stitching scenario, the input images usually include non-overlap (NOV) regions of which warp cannot be directly estimated, unlike the overlap (OV) regions. To help the PWM predict a reasonable warp on the NOV region, we impose two geometrical constraints: an epipolar loss and a line-preservation loss. With the obtained warp field, we relocate the pixels of the target image using forward warping. Finally, the SIGMo is trained by the proposed multi-branch training framework to generate a stitched image from a reference image and a warped target image. For training and evaluating the proposed framework, we build and publish a novel dataset including image pairs with corresponding pixel-wise ground truth warp and stitched result images. We show that the results of the proposed framework are quantitatively and qualitatively superior to those of the conventional methods.", + "primary_area": "computer vision i", + "author": "Hyeokjun Kweon; Hyeonseong Kim; Yoonsu Kang; Youngho Yoon; WooSeong Jeong; Kuk-Jin Yoon", + "authorids": "", + "aff": "Korean Advanced Institute of Science and Technology; Korean Advanced Institute of Science and Technology; Korean Advanced Institute of Science and Technology; Korean Advanced Institute of Science and Technology; Korean Advanced Institute of Science and Technology; Korean Advanced Institute of Science and Technology", + "bibtex": "@article{Kweon_Kim_Kang_Yoon_Jeong_Yoon_2023, title={Pixel-Wise Warping for Deep Image Stitching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25202}, DOI={10.1609/aaai.v37i1.25202}, abstractNote={Existing image stitching approaches based on global or local homography estimation are not free from the parallax problem and suffer from undesired artifacts. In this paper, instead of relying on the homography-based warp, we propose a novel deep image stitching framework exploiting the pixel-wise warp field to handle the large-parallax problem. The proposed deep image stitching framework consists of a Pixel-wise Warping Module (PWM) and a Stitched Image Generating Module (SIGMo). For PWM, we obtain pixel-wise warp in a similar manner as estimating an optical flow (OF).\nIn the stitching scenario, the input images usually include non-overlap (NOV) regions of which warp cannot be directly estimated, unlike the overlap (OV) regions. To help the PWM predict a reasonable warp on the NOV region, we impose two geometrical constraints: an epipolar loss and a line-preservation loss. With the obtained warp field, we relocate the pixels of the target image using forward warping. Finally, the SIGMo is trained by the proposed multi-branch training framework to generate a stitched image from a reference image and a warped target image. For training and evaluating the proposed framework, we build and publish a novel dataset including image pairs with corresponding pixel-wise ground truth warp and stitched result images. We show that the results of the proposed framework are quantitatively and qualitatively superior to those of the conventional methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kweon, Hyeokjun and Kim, Hyeonseong and Kang, Yoonsu and Yoon, Youngho and Jeong, WooSeong and Yoon, Kuk-Jin}, year={2023}, month={Jun.}, pages={1196-1204} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25202/24974", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25202", + "pdf_size": 4797871, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8775683017674945441&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Korean Advanced Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26767", + "title": "Planning and Learning for Non-markovian Negative Side Effects Using Finite State Controllers", + "track": "aaai special track", + "status": "Technical", + "abstract": "Autonomous systems are often deployed in the open world where it is hard to obtain complete specifications of objectives and constraints. Operating based on an incomplete model can produce negative side effects (NSEs), which affect the safety and reliability of the system. We focus on mitigating NSEs in environments modeled as Markov decision processes (MDPs). First, we learn a model of NSEs using observed data that contains state-action trajectories and severity of associated NSEs. Unlike previous works that associate NSEs with state-action pairs, our framework associates NSEs with entire trajectories, which is more general and captures non-Markovian dependence on states and actions. Second, we learn finite state controllers (FSCs) that predict NSE severity for a given trajectory and generalize well to unseen data. Finally, we develop a constrained MDP model that uses information from the underlying MDP and the learned FSC for planning while avoiding NSEs. Our empirical evaluation demonstrates the effectiveness of our approach in learning and mitigating Markovian and non-Markovian NSEs.", + "primary_area": "safe and robust ai", + "author": "Aishwarya Srivastava; Sandhya Saisubramanian; Praveen Paruchuri; Akshat Kumar; Shlomo Zilberstein", + "authorids": "", + "aff": "IIIT Hyderabad; Oregon State University; IIIT Hyderabad; Singapore Management University; University of Massachusetts Amherst", + "bibtex": "@article{Srivastava_Saisubramanian_Paruchuri_Kumar_Zilberstein_2023, title={Planning and Learning for Non-markovian Negative Side Effects Using Finite State Controllers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26767}, DOI={10.1609/aaai.v37i12.26767}, abstractNote={Autonomous systems are often deployed in the open world where it is hard to obtain complete specifications of objectives and constraints. Operating based on an incomplete model can produce negative side effects (NSEs), which affect the safety and reliability of the system. We focus on mitigating NSEs in environments modeled as Markov decision processes (MDPs). First, we learn a model of NSEs using observed data that contains state-action trajectories and severity of associated NSEs. Unlike previous works that associate NSEs with state-action pairs, our framework associates NSEs with entire trajectories, which is more general and captures non-Markovian dependence on states and actions. Second, we learn finite state controllers (FSCs) that predict NSE severity for a given trajectory and generalize well to unseen data. Finally, we develop a constrained MDP model that uses information from the underlying MDP and the learned FSC for planning while avoiding NSEs. Our empirical evaluation demonstrates the effectiveness of our approach in learning and mitigating Markovian and non-Markovian NSEs.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Srivastava, Aishwarya and Saisubramanian, Sandhya and Paruchuri, Praveen and Kumar, Akshat and Zilberstein, Shlomo}, year={2023}, month={Jun.}, pages={15144-15151} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26767/26539", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26767", + "pdf_size": 404844, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8974087915106125561&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "research.iiit.ac.in;oregonstate.edu;iiit.ac.in;smu.edu.sg;cs.umass.edu", + "email": "research.iiit.ac.in;oregonstate.edu;iiit.ac.in;smu.edu.sg;cs.umass.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2;3", + "aff_unique_norm": "International Institute of Information Technology, Hyderabad;Oregon State University;Singapore Management University;University of Massachusetts Amherst", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://iiit Hyderabad.ac.in;https://oregonstate.edu;https://www.smu.edu.sg;https://www.umass.edu", + "aff_unique_abbr": "IIIT-H;OSU;SMU;UMass Amherst", + "aff_campus_unique_index": "0;0;2", + "aff_campus_unique": "Hyderabad;;Amherst", + "aff_country_unique_index": "0;1;0;2;1", + "aff_country_unique": "India;United States;Singapore" + }, + { + "id": "article-26819", + "title": "Planning and Learning for Reliable Autonomy in the Open World", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Safe and reliable decision-making is critical for long-term deployment of autonomous systems. Despite the recent advances in artificial intelligence, ensuring safe and reliable operation of human-aligned autonomous systems in open-world environments remains a challenge. My research focuses on developing planning and learning algorithms that support reliable autonomy in fully and partially observable environments, in the presence of uncertainty, limited information, and limited resources. This talk covers a summary of my recent research towards reliable autonomy.", + "primary_area": "", + "author": "Sandhya Saisubramanian", + "authorids": "", + "aff": "Assistant Professor, Oregon State University, Oregon, USA", + "bibtex": "@article{Saisubramanian_2024, title={Planning and Learning for Reliable Autonomy in the Open World}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26819}, DOI={10.1609/aaai.v37i13.26819}, abstractNote={Safe and reliable decision-making is critical for long-term deployment of autonomous systems. Despite the recent advances in artificial intelligence, ensuring safe and reliable operation of human-aligned autonomous systems in open-world environments remains a challenge. My research focuses on developing planning and learning algorithms that support reliable autonomy in fully and partially observable environments, in the presence of uncertainty, limited information, and limited resources. This talk covers a summary of my recent research towards reliable autonomy.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Saisubramanian, Sandhya}, year={2024}, month={Jul.}, pages={15452-15452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26819/26591", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26819", + "pdf_size": 44328, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:cUVJ_yHzk8cJ:scholar.google.com/&scioq=Planning+and+Learning+for+Reliable+Autonomy+in+the+Open+World&hl=en&as_sdt=0,31", + "gs_version_total": 3, + "aff_domain": "research.iiit.ac.in;oregonstate.edu;iiit.ac.in;smu.edu.sg;cs.umass.edu", + "email": "research.iiit.ac.in;oregonstate.edu;iiit.ac.in;smu.edu.sg;cs.umass.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Oregon State University", + "aff_unique_dep": "", + "aff_unique_url": "https://oregonstate.edu", + "aff_unique_abbr": "OSU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26149", + "title": "Planning and Learning with Adaptive Lookahead", + "track": "main", + "status": "Technical", + "abstract": "Some of the most powerful reinforcement learning frameworks use planning for action selection. Interestingly, their planning horizon is either fixed or determined arbitrarily by the state visitation history. Here, we expand beyond the naive fixed horizon and propose a theoretically justified strategy for adaptive selection of the planning horizon as a function of the state-dependent value estimate. We propose two variants for lookahead selection and analyze the trade-off between iteration count and computational complexity per iteration. We then devise a corresponding deep Q-network algorithm with an adaptive tree search horizon. We separate the value estimation per depth to compensate for the off-policy discrepancy between depths. Lastly, we demonstrate the efficacy of our adaptive lookahead method in a maze environment and Atari.", + "primary_area": "machine learning iii", + "author": "Aviv Rosenberg; Assaf Hallak; Shie Mannor; Gal Chechik; Gal Dalal", + "authorids": "", + "aff": "Amazon Science; Nvidia Research; Nvidia Research + Technion; Nvidia Research + Bar-Ilan University; Nvidia Research", + "bibtex": "@article{Rosenberg_Hallak_Mannor_Chechik_Dalal_2023, title={Planning and Learning with Adaptive Lookahead}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26149}, DOI={10.1609/aaai.v37i8.26149}, abstractNote={Some of the most powerful reinforcement learning frameworks use planning for action selection. Interestingly, their planning horizon is either fixed or determined arbitrarily by the state visitation history. Here, we expand beyond the naive fixed horizon and propose a theoretically justified strategy for adaptive selection of the planning horizon as a function of the state-dependent value estimate. We propose two variants for lookahead selection and analyze the trade-off between iteration count and computational complexity per iteration. We then devise a corresponding deep Q-network algorithm with an adaptive tree search horizon. We separate the value estimation per depth to compensate for the off-policy discrepancy between depths. Lastly, we demonstrate the efficacy of our adaptive lookahead method in a maze environment and Atari.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Rosenberg, Aviv and Hallak, Assaf and Mannor, Shie and Chechik, Gal and Dalal, Gal}, year={2023}, month={Jun.}, pages={9606-9613} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26149/25921", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26149", + "pdf_size": 1353912, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16008815704193864465&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff_domain": "amazon.com; ; ; ; ", + "email": "amazon.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1+2;1+3;1", + "aff_unique_norm": "Amazon;NVIDIA Corporation;Technion - Israel Institute of Technology;Bar-Ilan University", + "aff_unique_dep": "Amazon Science;NVIDIA Research;;", + "aff_unique_url": "https://www.amazon.science;https://www.nvidia.com/research;https://www.technion.ac.il/en/;https://www.biu.ac.il", + "aff_unique_abbr": "Amazon Science;NVIDIA;Technion;BIU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1;0+1;0", + "aff_country_unique": "United States;Israel" + }, + { + "id": "article-26416", + "title": "Planning for Learning Object Properties", + "track": "main", + "status": "Technical", + "abstract": "Autonomous agents embedded in a physical environment need the ability to recognize objects and their properties from sensory data. Such a perceptual ability is often implemented by supervised machine learning models, which are pre-trained using a set of labelled data. In real-world, open-ended deployments, however, it is unrealistic to assume to have a pre-trained model for all possible environments. Therefore, agents need to dynamically learn/adapt/extend their perceptual abilities online, in an autonomous way, by exploring and interacting with the environment where they operate. This paper describes a way to do so, by exploiting symbolic planning. Specifically, we formalize the problem of automatically training a neural network to recognize object properties as a symbolic planning problem (using PDDL). We use planning techniques to produce a strategy for automating the training dataset creation and the learning process. Finally, we provide an experimental evaluation in both a simulated and a real environment, which shows that the proposed approach is able to successfully learn how to recognize new object properties.", + "primary_area": "planning routing and scheduling", + "author": "Leonardo Lamanna; Luciano Serafini; Mohamadreza Faridghasemnia; Alessandro Saffiotti; Alessandro Saetti; Alfonso Gerevini; Paolo Traverso", + "authorids": "", + "aff": "Fondazione Bruno Kessler, Trento, Italy+Department of Information Engineering, University of Brescia, Italy; Fondazione Bruno Kessler, Trento, Italy; Center for Applied Autonomous Sensor Systems, University of \u00a8Orebro, Sweden; Center for Applied Autonomous Sensor Systems, University of \u00a8Orebro, Sweden; Department of Information Engineering, University of Brescia, Italy; Department of Information Engineering, University of Brescia, Italy; Fondazione Bruno Kessler, Trento, Italy", + "bibtex": "@article{Lamanna_Serafini_Faridghasemnia_Saffiotti_Saetti_Gerevini_Traverso_2023, title={Planning for Learning Object Properties}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26416}, DOI={10.1609/aaai.v37i10.26416}, abstractNote={Autonomous agents embedded in a physical environment need the ability to recognize objects and their properties from sensory data. Such a perceptual ability is often implemented by supervised machine learning models, which are pre-trained using a set of labelled data. In real-world, open-ended deployments, however, it is unrealistic to assume to have a pre-trained model for all possible environments. Therefore, agents need to dynamically learn/adapt/extend their perceptual abilities online, in an autonomous way, by exploring and interacting with the environment where they operate. This paper describes a way to do so, by exploiting symbolic planning. Specifically, we formalize the problem of automatically training a neural network to recognize object properties as a symbolic planning problem (using PDDL). We use planning techniques to produce a strategy for automating the training dataset creation and the learning process. Finally, we provide an experimental evaluation in both a simulated and a real environment, which shows that the proposed approach is able to successfully learn how to recognize new object properties.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lamanna, Leonardo and Serafini, Luciano and Faridghasemnia, Mohamadreza and Saffiotti, Alessandro and Saetti, Alessandro and Gerevini, Alfonso and Traverso, Paolo}, year={2023}, month={Jun.}, pages={12005-12013} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26416/26188", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26416", + "pdf_size": 8691764, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1812481685066011623&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "fbk.eu;fbk.eu;oru.se;aass.oru.se;unibs.it;unibs.it;fbk.eu", + "email": "fbk.eu;fbk.eu;oru.se;aass.oru.se;unibs.it;unibs.it;fbk.eu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;2;2;1;1;0", + "aff_unique_norm": "Fondazione Bruno Kessler;University of Brescia;University of Orebro", + "aff_unique_dep": ";Department of Information Engineering;Center for Applied Autonomous Sensor Systems", + "aff_unique_url": "https://www.fbk.eu;https://www.unibs.it;https://www.oru.se", + "aff_unique_abbr": "FBK;;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Trento;", + "aff_country_unique_index": "0+0;0;1;1;0;0;0", + "aff_country_unique": "Italy;Sweden" + }, + { + "id": "article-26411", + "title": "Planning with Hidden Parameter Polynomial MDPs", + "track": "main", + "status": "Technical", + "abstract": "For many applications of Markov Decision Processes (MDPs), the transition function cannot be specified exactly. Bayes-Adaptive MDPs (BAMDPs) extend MDPs to consider transition probabilities governed by latent parameters. To act optimally in BAMDPs, one must maintain a belief distribution over the latent parameters. Typically, this distribution is described by a set of sample (particle) MDPs, and associated weights which represent the likelihood of a sample MDP being the true underlying MDP. However, as the number of dimensions of the latent parameter space increases, the number of sample MDPs required to sufficiently represent the belief distribution grows exponentially. Thus, maintaining an accurate belief in the form of a set of sample MDPs over complex latent spaces is computationally intensive, which in turn affects the performance of planning for these models. In this paper, we propose an alternative approach for maintaining the belief over the latent parameters. We consider a class of BAMDPs where the transition probabilities can be expressed in closed form as a polynomial of the latent parameters, and outline a method to maintain a closed-form belief distribution for the latent parameters which results in an accurate belief representation. Furthermore, the closed-form representation does away with the need to tune the number of sample MDPs required to represent the belief. We evaluate two domains and empirically show that the polynomial, closed-form, belief representation results in better plans than a sampling-based belief representation.", + "primary_area": "planning routing and scheduling", + "author": "Clarissa Costen; Marc Rigter; Bruno Lacerda; Nick Hawes", + "authorids": "", + "aff": "Oxford Robotics Institute, University of Oxford; Oxford Robotics Institute, University of Oxford; Oxford Robotics Institute, University of Oxford; Oxford Robotics Institute, University of Oxford", + "bibtex": "@article{Costen_Rigter_Lacerda_Hawes_2023, title={Planning with Hidden Parameter Polynomial MDPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26411}, DOI={10.1609/aaai.v37i10.26411}, abstractNote={For many applications of Markov Decision Processes (MDPs), the transition function cannot be specified exactly. Bayes-Adaptive MDPs (BAMDPs) extend MDPs to consider transition probabilities governed by latent parameters. To act optimally in BAMDPs, one must maintain a belief distribution over the latent parameters. Typically, this distribution is described by a set of sample (particle) MDPs, and associated weights which represent the likelihood of a sample MDP being the true underlying MDP. However, as the number of dimensions of the latent parameter space increases, the number of sample MDPs required to sufficiently represent the belief distribution grows exponentially. Thus, maintaining an accurate belief in the form of a set of sample MDPs over complex latent spaces is computationally intensive, which in turn affects the performance of planning for these models. In this paper, we propose an alternative approach for maintaining the belief over the latent parameters. We consider a class of BAMDPs where the transition probabilities can be expressed in closed form as a polynomial of the latent parameters, and outline a method to maintain a closed-form belief distribution for the latent parameters which results in an accurate belief representation. Furthermore, the closed-form representation does away with the need to tune the number of sample MDPs required to represent the belief. We evaluate two domains and empirically show that the polynomial, closed-form, belief representation results in better plans than a sampling-based belief representation.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Costen, Clarissa and Rigter, Marc and Lacerda, Bruno and Hawes, Nick}, year={2023}, month={Jun.}, pages={11963-11971} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26411/26183", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26411", + "pdf_size": 352408, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10601914119293712148&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", + "email": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "Oxford Robotics Institute", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Oxford", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25143", + "title": "Point-Teaching: Weakly Semi-supervised Object Detection with Point Annotations", + "track": "main", + "status": "Technical", + "abstract": "Point annotations are considerably more time-efficient than bounding box annotations. However, how to use cheap point annotations to boost the performance of semi-supervised object detection is still an open question. In this work, we present Point-Teaching, a weakly- and semi-supervised object detection framework to fully utilize the point annotations. Specifically, we propose a Hungarian-based point-matching method to generate pseudo labels for point-annotated images. We further propose multiple instance learning (MIL) approaches at the level of images and points to supervise the object detector with point annotations. Finally, we propose a simple data augmentation, named Point-Guided Copy-Paste, to reduce the impact of those unmatched points. Experiments demonstrate the effectiveness of our method on a few datasets and various data regimes. In particular, Point-Teaching outperforms the previous best method Group R-CNN by 3.1 AP with 5% fully labeled data and 2.3 AP with 30% fully labeled data on the MS COCO dataset. We believe that our proposed framework can largely lower the bar of learning accurate object detectors and pave the way for its broader applications. The code is available at https://github.com/YongtaoGe/Point-Teaching.", + "primary_area": "computer vision i", + "author": "Yongtao Ge; Qiang Zhou; Xinlong Wang; Chunhua Shen; Zhibin Wang; Hao Li", + "authorids": "", + "aff": "The University of Adelaide; Alibaba Group; Beijing Academy of Artificial Intelligence; Zhejiang University; Alibaba Group; Alibaba Group", + "bibtex": "@article{Ge_Zhou_Wang_Shen_Wang_Li_2023, title={Point-Teaching: Weakly Semi-supervised Object Detection with Point Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25143}, DOI={10.1609/aaai.v37i1.25143}, abstractNote={Point annotations are considerably more time-efficient than bounding box annotations. However, how to use cheap point annotations to boost the performance of semi-supervised object detection is still an open question. In this work, we present Point-Teaching, a weakly- and semi-supervised object detection framework to fully utilize the point annotations. Specifically, we propose a Hungarian-based point-matching method to generate pseudo labels for point-annotated images. We further propose multiple instance learning (MIL) approaches at the level of images and points to supervise the object detector with point annotations. Finally, we propose a simple data augmentation, named Point-Guided Copy-Paste, to reduce the impact of those unmatched points. Experiments demonstrate the effectiveness of our method on a few datasets and various data regimes. In particular, Point-Teaching outperforms the previous best method Group R-CNN by 3.1 AP with 5% fully labeled data and 2.3 AP with 30% fully labeled data on the MS COCO dataset. We believe that our proposed framework can largely lower the bar of learning accurate object detectors and pave the way for its broader applications. The code is available at https://github.com/YongtaoGe/Point-Teaching.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ge, Yongtao and Zhou, Qiang and Wang, Xinlong and Shen, Chunhua and Wang, Zhibin and Li, Hao}, year={2023}, month={Jun.}, pages={667-675} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25143/24915", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25143", + "pdf_size": 603375, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15635948905613137401&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "adelaide.edu.au;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;baai.ac.cn;zju.edu.cn", + "email": "adelaide.edu.au;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;baai.ac.cn;zju.edu.cn", + "github": "https://github.com/YongtaoGe/Point-Teaching", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;1;1", + "aff_unique_norm": "University of Adelaide;Alibaba Group;Beijing Academy of Artificial Intelligence;Zhejiang University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.adelaide.edu.au;https://www.alibaba.com;https://www.baaic.cn;https://www.zju.edu.cn", + "aff_unique_abbr": "Adelaide;Alibaba;BAAI;ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26675", + "title": "Point-to-Region Co-learning for Poverty Mapping at High Resolution Using Satellite Imagery", + "track": "aaai special track", + "status": "Technical", + "abstract": "Despite improvements in safe water and sanitation services in low-income countries, a substantial proportion of the population in Africa still does not have access to these essential services. Up-to-date fine-scale maps of low-income settlements are urgently needed by authorities to improve service provision. We aim to develop a cost-effective solution to generate fine-scale maps of these vulnerable populations using multi-source public information. The problem is challenging as ground-truth maps are available at only a limited number of cities, and the patterns are heterogeneous across cities. Recent attempts tackling the spatial heterogeneity issue focus on scenarios where true labels partially exist for each input region, which are unavailable for the present problem. We propose a dynamic point-to-region co-learning framework to learn heterogeneity patterns that cannot be reflected by point-level information and generalize deep learners to new areas with no labels. We also propose an attention-based correction layer to remove spurious signatures, and a region-gate to capture both region-invariant and variant patterns. Experiment results on real-world fine-scale data in three cities of Kenya show that the proposed approach can largely improve model performance on various base network architectures.", + "primary_area": "ai for social impact", + "author": "Zhili Li; Yiqun Xie; Xiaowei Jia; Kara Stuart; Caroline Delaire; Sergii Skakun", + "authorids": "", + "aff": "University of Maryland; University of Maryland; University of Pittsburgh; The Aquaya Institute; The Aquaya Institute; University of Maryland", + "bibtex": "@article{Li_Xie_Jia_Stuart_Delaire_Skakun_2023, title={Point-to-Region Co-learning for Poverty Mapping at High Resolution Using Satellite Imagery}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26675}, DOI={10.1609/aaai.v37i12.26675}, abstractNote={Despite improvements in safe water and sanitation services in low-income countries, a substantial proportion of the population in Africa still does not have access to these essential services. Up-to-date fine-scale maps of low-income settlements are urgently needed by authorities to improve service provision. We aim to develop a cost-effective solution to generate fine-scale maps of these vulnerable populations using multi-source public information. The problem is challenging as ground-truth maps are available at only a limited number of cities, and the patterns are heterogeneous across cities. Recent attempts tackling the spatial heterogeneity issue focus on scenarios where true labels partially exist for each input region, which are unavailable for the present problem. We propose a dynamic point-to-region co-learning framework to learn heterogeneity patterns that cannot be reflected by point-level information and generalize deep learners to new areas with no labels. We also propose an attention-based correction layer to remove spurious signatures, and a region-gate to capture both region-invariant and variant patterns. Experiment results on real-world fine-scale data in three cities of Kenya show that the proposed approach can largely improve model performance on various base network architectures.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zhili and Xie, Yiqun and Jia, Xiaowei and Stuart, Kara and Delaire, Caroline and Skakun, Sergii}, year={2023}, month={Jun.}, pages={14321-14328} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26675/26447", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26675", + "pdf_size": 7345991, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12649484638024528650&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "umd.edu;umd.edu;pitt.edu;aquaya.org;aquaya.org;umd.edu", + "email": "umd.edu;umd.edu;pitt.edu;aquaya.org;aquaya.org;umd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;2;0", + "aff_unique_norm": "University of Maryland;University of Pittsburgh;Aquaya Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www/umd.edu;https://www.pitt.edu;https://www.aquayainstitute.org", + "aff_unique_abbr": "UMD;Pitt;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25166", + "title": "PointCA: Evaluating the Robustness of 3D Point Cloud Completion Models against Adversarial Examples", + "track": "main", + "status": "Technical", + "abstract": "Point cloud completion, as the upstream procedure of 3D recognition and segmentation, has become an essential part of many tasks such as navigation and scene understanding. While various point cloud completion models have demonstrated their powerful capabilities, their robustness against adversarial attacks, which have been proven to be fatally malicious towards deep neural networks, remains unknown. In addition, existing attack approaches towards point cloud classifiers cannot be applied to the completion models due to different output forms and attack purposes. In order to evaluate the robustness of the completion models, we propose PointCA, the first adversarial attack against 3D point cloud completion models. PointCA can generate adversarial point clouds that maintain high similarity with the original ones, while being completed as another object with totally different semantic information. Specifically, we minimize the representation discrepancy between the adversarial example and the target point set to jointly explore the adversarial point clouds in the geometry space and the feature space. Furthermore, to launch a stealthier attack, we innovatively employ the neighbourhood density information to tailor the perturbation constraint, leading to geometry-aware and distribution-adaptive modifications for each point.\nExtensive experiments against different premier point cloud completion networks show that PointCA can cause the performance degradation from 77.9% to 16.7%, with the structure chamfer distance kept below 0.01. We conclude that existing completion models are severely vulnerable to adversarial examples, and state-of-the-art defenses for point cloud classification will be partially invalid when applied to incomplete and uneven point cloud data.", + "primary_area": "computer vision i", + "author": "Shengshan Hu; Junwei Zhang; Wei Liu; Junhui Hou; Minghui Li; Leo Yu Zhang; Hai Jin; Lichao Sun", + "authorids": "", + "aff": "School of Cyber Science and Engineering, Huazhong University of Science and Technology+National Engineering Research Center for Big Data Technology and System+Hubei Engineering Research Center on Big Data Security+Hubei Key Laboratory of Distributed System Security+Services Computing Technology and System Lab; School of Computer Science and Technology, Huazhong University of Science and Technology+National Engineering Research Center for Big Data Technology and System+Hubei Engineering Research Center on Big Data Security+Hubei Key Laboratory of Distributed System Security+Services Computing Technology and System Lab; School of Cyber Science and Engineering, Huazhong University of Science and Technology+National Engineering Research Center for Big Data Technology and System+Hubei Engineering Research Center on Big Data Security+Hubei Key Laboratory of Distributed System Security+Services Computing Technology and System Lab; Department of Computer Science, City University of Hong Kong; School of Software Engineering, Huazhong University of Science and Technology; School of Information Technology, Deakin University; School of Computer Science and Technology, Huazhong University of Science and Technology+Cluster and Grid Computing Lab+Services Computing Technology and System Lab; Department of Computer Science and Engineering, Lehigh University", + "bibtex": "@article{Hu_Zhang_Liu_Hou_Li_Zhang_Jin_Sun_2023, title={PointCA: Evaluating the Robustness of 3D Point Cloud Completion Models against Adversarial Examples}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25166}, DOI={10.1609/aaai.v37i1.25166}, abstractNote={Point cloud completion, as the upstream procedure of 3D recognition and segmentation, has become an essential part of many tasks such as navigation and scene understanding. While various point cloud completion models have demonstrated their powerful capabilities, their robustness against adversarial attacks, which have been proven to be fatally malicious towards deep neural networks, remains unknown. In addition, existing attack approaches towards point cloud classifiers cannot be applied to the completion models due to different output forms and attack purposes. In order to evaluate the robustness of the completion models, we propose PointCA, the first adversarial attack against 3D point cloud completion models. PointCA can generate adversarial point clouds that maintain high similarity with the original ones, while being completed as another object with totally different semantic information. Specifically, we minimize the representation discrepancy between the adversarial example and the target point set to jointly explore the adversarial point clouds in the geometry space and the feature space. Furthermore, to launch a stealthier attack, we innovatively employ the neighbourhood density information to tailor the perturbation constraint, leading to geometry-aware and distribution-adaptive modifications for each point.\nExtensive experiments against different premier point cloud completion networks show that PointCA can cause the performance degradation from 77.9% to 16.7%, with the structure chamfer distance kept below 0.01. We conclude that existing completion models are severely vulnerable to adversarial examples, and state-of-the-art defenses for point cloud classification will be partially invalid when applied to incomplete and uneven point cloud data.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Shengshan and Zhang, Junwei and Liu, Wei and Hou, Junhui and Li, Minghui and Zhang, Leo Yu and Jin, Hai and Sun, Lichao}, year={2023}, month={Jun.}, pages={872-880} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25166/24938", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25166", + "pdf_size": 7384469, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10478713655978912302&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;cityu.edu.hk;hust.edu.cn;deakin.edu.au;hust.edu.cn;lehigh.edu", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;cityu.edu.hk;hust.edu.cn;deakin.edu.au;hust.edu.cn;lehigh.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1+2+3+4;0+1+2+3+4;0+1+2+3+4;5;0;6;0+7+4;8", + "aff_unique_norm": "Huazhong University of Science and Technology;National Engineering Research Center for Big Data Technology and System;Hubei Engineering Research Center on Big Data Security;Hubei Key Laboratory of Distributed System Security;Services Computing Technology and System Lab;City University of Hong Kong;Deakin University;Cluster and Grid Computing Lab;Lehigh University", + "aff_unique_dep": "School of Cyber Science and Engineering;;Engineering Research Center on Big Data Security;Distributed System Security;Services Computing Technology and System;Department of Computer Science;School of Information Technology;Computer Science;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.hust.edu.cn;;;;;https://www.cityu.edu.hk;https://www.deakin.edu.au;;https://www.lehigh.edu", + "aff_unique_abbr": "HUST;;;;;CityU;Deakin;;Lehigh", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0+0;0+0+0+0;0+0+0+0;0;0;2;0;3", + "aff_country_unique": "China;;Australia;United States" + }, + { + "id": "article-25982", + "title": "Pointerformer: Deep Reinforced Multi-Pointer Transformer for the Traveling Salesman Problem", + "track": "main", + "status": "Technical", + "abstract": "Traveling Salesman Problem (TSP), as a classic routing optimization problem originally arising in the domain of transportation and logistics, has become a critical task in broader domains, such as manufacturing and biology. Recently, Deep Reinforcement Learning (DRL) has been increasingly employed to solve TSP due to its high inference efficiency. Nevertheless, most of existing end-to-end DRL algorithms only perform well on small TSP instances and can hardly generalize to large scale because of the drastically soaring memory consumption and computation time along with the enlarging problem scale. In this paper, we propose a novel end-to-end DRL approach, referred to as Pointerformer, based on multi-pointer Transformer. Particularly, Pointerformer adopts both reversible residual network in the encoder and multi-pointer network in the decoder to effectively contain memory consumption of the encoder-decoder architecture. To further improve the performance of TSP solutions, Pointerformer employs a feature augmentation method to explore the symmetries of TSP at both training and inference stages as well as an enhanced context embedding approach to include more comprehensive context information in the query. Extensive experiments on a randomly generated benchmark and a public benchmark have shown that, while achieving comparative results on most small-scale TSP instances as state-of-the-art DRL approaches do, Pointerformer can also well generalize to large-scale TSPs.", + "primary_area": "machine learning ii", + "author": "Yan Jin; Yuandong Ding; Xuanhao Pan; Kun He; Li Zhao; Tao Qin; Lei Song; Jiang Bian", + "authorids": "", + "aff": "School of Computer Science, Huazhong University of Science and Technology, China; School of Computer Science, Huazhong University of Science and Technology, China; School of Computer Science, Huazhong University of Science and Technology, China; School of Computer Science, Huazhong University of Science and Technology, China + HopcroftCenter on Computing Science, Huazhong University of Science and Technology, China; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia", + "bibtex": "@article{Jin_Ding_Pan_He_Zhao_Qin_Song_Bian_2023, title={Pointerformer: Deep Reinforced Multi-Pointer Transformer for the Traveling Salesman Problem}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25982}, DOI={10.1609/aaai.v37i7.25982}, abstractNote={Traveling Salesman Problem (TSP), as a classic routing optimization problem originally arising in the domain of transportation and logistics, has become a critical task in broader domains, such as manufacturing and biology. Recently, Deep Reinforcement Learning (DRL) has been increasingly employed to solve TSP due to its high inference efficiency. Nevertheless, most of existing end-to-end DRL algorithms only perform well on small TSP instances and can hardly generalize to large scale because of the drastically soaring memory consumption and computation time along with the enlarging problem scale. In this paper, we propose a novel end-to-end DRL approach, referred to as Pointerformer, based on multi-pointer Transformer. Particularly, Pointerformer adopts both reversible residual network in the encoder and multi-pointer network in the decoder to effectively contain memory consumption of the encoder-decoder architecture. To further improve the performance of TSP solutions, Pointerformer employs a feature augmentation method to explore the symmetries of TSP at both training and inference stages as well as an enhanced context embedding approach to include more comprehensive context information in the query. Extensive experiments on a randomly generated benchmark and a public benchmark have shown that, while achieving comparative results on most small-scale TSP instances as state-of-the-art DRL approaches do, Pointerformer can also well generalize to large-scale TSPs.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Yan and Ding, Yuandong and Pan, Xuanhao and He, Kun and Zhao, Li and Qin, Tao and Song, Lei and Bian, Jiang}, year={2023}, month={Jun.}, pages={8132-8140} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25982/25754", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25982", + "pdf_size": 556467, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10503691475163636868&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0+0;1;1;1;1", + "aff_unique_norm": "Huazhong University of Science and Technology;Microsoft Research", + "aff_unique_dep": "School of Computer Science;Research", + "aff_unique_url": "http://www.hust.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "HUST;MSR Asia", + "aff_campus_unique_index": ";1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26083", + "title": "Poisoning with Cerberus: Stealthy and Colluded Backdoor Attack against Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Are Federated Learning (FL) systems free from backdoor poisoning with the arsenal of various defense strategies deployed? This is an intriguing problem with significant practical implications regarding the utility of FL services. Despite the recent flourish of poisoning-resilient FL methods, our study shows that carefully tuning the collusion between malicious participants can minimize the trigger-induced bias of the poisoned local model from the poison-free one, which plays the key role in delivering stealthy backdoor attacks and circumventing a wide spectrum of state-of-the-art defense methods in FL. In our work, we instantiate the attack strategy by proposing a distributed backdoor attack method, namely Cerberus Poisoning (CerP). It jointly tunes the backdoor trigger and controls the poisoned model changes on each malicious participant to achieve a stealthy yet successful backdoor attack against a wide spectrum of defensive mechanisms of federated learning techniques. Our extensive study on 3 large-scale benchmark datasets and 13 mainstream defensive mechanisms confirms that Cerberus Poisoning raises a significantly severe threat to the integrity and security of federated learning practices, regardless of the flourish of robust Federated Learning methods.", + "primary_area": "machine learning ii", + "author": "Xiaoting Lyu; Yufei Han; Wei Wang; Jingkai Liu; Bin Wang; Jiqiang Liu; Xiangliang Zhang", + "authorids": "", + "aff": "Beijing Key Laboratory of Security and Privacy in Intelligent Transportation, Beijing Jiaotong University, China; INRIA, France; Beijing Key Laboratory of Security and Privacy in Intelligent Transportation, Beijing Jiaotong University, China; Beijing Key Laboratory of Security and Privacy in Intelligent Transportation, Beijing Jiaotong University, China; Zhejiang Key Laboratory of Multi-dimensional Perception Technology, Application and Cybersecurity, China; Beijing Key Laboratory of Security and Privacy in Intelligent Transportation, Beijing Jiaotong University, China; University of Notre Dame, USA", + "bibtex": "@article{Lyu_Han_Wang_Liu_Wang_Liu_Zhang_2023, title={Poisoning with Cerberus: Stealthy and Colluded Backdoor Attack against Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26083}, DOI={10.1609/aaai.v37i7.26083}, abstractNote={Are Federated Learning (FL) systems free from backdoor poisoning with the arsenal of various defense strategies deployed? This is an intriguing problem with significant practical implications regarding the utility of FL services. Despite the recent flourish of poisoning-resilient FL methods, our study shows that carefully tuning the collusion between malicious participants can minimize the trigger-induced bias of the poisoned local model from the poison-free one, which plays the key role in delivering stealthy backdoor attacks and circumventing a wide spectrum of state-of-the-art defense methods in FL. In our work, we instantiate the attack strategy by proposing a distributed backdoor attack method, namely Cerberus Poisoning (CerP). It jointly tunes the backdoor trigger and controls the poisoned model changes on each malicious participant to achieve a stealthy yet successful backdoor attack against a wide spectrum of defensive mechanisms of federated learning techniques. Our extensive study on 3 large-scale benchmark datasets and 13 mainstream defensive mechanisms confirms that Cerberus Poisoning raises a significantly severe threat to the integrity and security of federated learning practices, regardless of the flourish of robust Federated Learning methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Xiaoting and Han, Yufei and Wang, Wei and Liu, Jingkai and Wang, Bin and Liu, Jiqiang and Zhang, Xiangliang}, year={2023}, month={Jun.}, pages={9020-9028} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26083/25855", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26083", + "pdf_size": 197072, + "gs_citation": 73, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11997044330955674846&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "bjtu.edu.cn;gmail.com;bjtu.edu.cn;bjtu.edu.cn;zju.edu.cn;bjtu.edu.cn;nd.edu", + "email": "bjtu.edu.cn;gmail.com;bjtu.edu.cn;bjtu.edu.cn;zju.edu.cn;bjtu.edu.cn;nd.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;2;0;3", + "aff_unique_norm": "Beijing Jiaotong University;INRIA;Zhejiang Key Laboratory of Multi-dimensional Perception Technology, Application and Cybersecurity;University of Notre Dame", + "aff_unique_dep": "Beijing Key Laboratory of Security and Privacy in Intelligent Transportation;;Multi-dimensional Perception Technology, Application and Cybersecurity;", + "aff_unique_url": "http://www.bjtu.edu.cn;https://www.inria.fr;;https://www.nd.edu", + "aff_unique_abbr": "BJTU;INRIA;;Notre Dame", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;1;0;0;0;0;2", + "aff_country_unique": "China;France;United States" + }, + { + "id": "article-26921", + "title": "Poisoning-Based Backdoor Attacks in Computer Vision", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Recent studies demonstrated that the training process of deep neural networks (DNNs) is vulnerable to backdoor attacks if third-party training resources (e.g., samples) are adopted. Specifically, the adversaries intend to embed hidden backdoors into DNNs, where the backdoor can be activated by pre-defined trigger patterns and leading malicious model predictions. My dissertation focuses on poisoning-based backdoor attacks in computer vision. Firstly, I study and propose more stealthy and effective attacks against image classification tasks in both physical and digital spaces. Secondly, I reveal the backdoor threats in visual object tracking, which is representative of critical video-related tasks. Thirdly, I explore how to exploit backdoor attacks as watermark techniques for positive purposes. I design a Python toolbox (i.e., BackdoorBox) that implements representative and advanced backdoor attacks and defenses under a unified and flexible framework, based on which to provide a comprehensive benchmark of existing methods at the end.", + "primary_area": "", + "author": "Yiming Li", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University, China", + "bibtex": "@article{Li_2024, title={Poisoning-Based Backdoor Attacks in Computer Vision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26921}, DOI={10.1609/aaai.v37i13.26921}, abstractNote={Recent studies demonstrated that the training process of deep neural networks (DNNs) is vulnerable to backdoor attacks if third-party training resources (e.g., samples) are adopted. Specifically, the adversaries intend to embed hidden backdoors into DNNs, where the backdoor can be activated by pre-defined trigger patterns and leading malicious model predictions. My dissertation focuses on poisoning-based backdoor attacks in computer vision. Firstly, I study and propose more stealthy and effective attacks against image classification tasks in both physical and digital spaces. Secondly, I reveal the backdoor threats in visual object tracking, which is representative of critical video-related tasks. Thirdly, I explore how to exploit backdoor attacks as watermark techniques for positive purposes. I design a Python toolbox (i.e., BackdoorBox) that implements representative and advanced backdoor attacks and defenses under a unified and flexible framework, based on which to provide a comprehensive benchmark of existing methods at the end.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yiming}, year={2024}, month={Jul.}, pages={16121-16122} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26921/26693", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26921", + "pdf_size": 60505, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6405633053146231364&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "mails.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "International Graduate School", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "article-25185", + "title": "PolarFormer: Multi-Camera 3D Object Detection with Polar Transformer", + "track": "main", + "status": "Technical", + "abstract": "3D object detection in autonomous driving aims to reason \u201cwhat\u201d and \u201cwhere\u201d the objects of interest present in a 3D world. Following the conventional wisdom of previous 2D object detection, existing methods often adopt the canonical Cartesian coordinate system with perpendicular axis. However, we conjugate that this does not fit the nature of the ego car\u2019s perspective, as each onboard camera perceives the world in shape of wedge intrinsic to the imaging geometry with radical (non perpendicular) axis. Hence, in this paper we advocate the exploitation of the Polar coordinate system and propose a new Polar Transformer (PolarFormer) for more accurate 3D object detection in the bird\u2019s-eye-view (BEV) taking as input only multi-camera 2D images. Specifically, we design a cross-attention based Polar detection head without restriction to the shape of input structure to deal with irregular Polar grids. For tackling the unconstrained object scale variations along Polar\u2019s distance dimension, we further introduce a multi-scale Polar representation learning strategy. As a result, our model can make best use of the Polar representation rasterized via attending to the corresponding image observation in a sequence-to-sequence fashion subject to the geometric constraints. Thorough experiments on the nuScenes dataset demonstrate that our PolarFormer outperforms significantly state-of-the-art 3D object detection alternatives.", + "primary_area": "computer vision i", + "author": "Yanqin Jiang; Li Zhang; Zhenwei Miao; Xiatian Zhu; Jin Gao; Weiming Hu; Yu-Gang Jiang", + "authorids": "", + "aff": "NLPR, Institute of Automation, Chinese Academy of Sciences; School of Data Science, Fudan University; Alibaba DAMO Academy; Surrey Institute for People-Centred Artificial Intelligence, CVSSP, University of Surrey; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Information Science and Technology, ShanghaiTech University; School of Computer Science, Fudan University", + "bibtex": "@article{Jiang_Zhang_Miao_Zhu_Gao_Hu_Jiang_2023, title={PolarFormer: Multi-Camera 3D Object Detection with Polar Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25185}, DOI={10.1609/aaai.v37i1.25185}, abstractNote={3D object detection in autonomous driving aims to reason \u201cwhat\u201d and \u201cwhere\u201d the objects of interest present in a 3D world. Following the conventional wisdom of previous 2D object detection, existing methods often adopt the canonical Cartesian coordinate system with perpendicular axis. However, we conjugate that this does not fit the nature of the ego car\u2019s perspective, as each onboard camera perceives the world in shape of wedge intrinsic to the imaging geometry with radical (non perpendicular) axis. Hence, in this paper we advocate the exploitation of the Polar coordinate system and propose a new Polar Transformer (PolarFormer) for more accurate 3D object detection in the bird\u2019s-eye-view (BEV) taking as input only multi-camera 2D images. Specifically, we design a cross-attention based Polar detection head without restriction to the shape of input structure to deal with irregular Polar grids. For tackling the unconstrained object scale variations along Polar\u2019s distance dimension, we further introduce a multi-scale Polar representation learning strategy. As a result, our model can make best use of the Polar representation rasterized via attending to the corresponding image observation in a sequence-to-sequence fashion subject to the geometric constraints. Thorough experiments on the nuScenes dataset demonstrate that our PolarFormer outperforms significantly state-of-the-art 3D object detection alternatives.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Yanqin and Zhang, Li and Miao, Zhenwei and Zhu, Xiatian and Gao, Jin and Hu, Weiming and Jiang, Yu-Gang}, year={2023}, month={Jun.}, pages={1042-1050} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25185/24957", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25185", + "pdf_size": 14847321, + "gs_citation": 201, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=328847309631372068&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ia.ac.cn;fudan.edu.cn;alibaba-inc.com;surrey.ac.uk;nlpr.ia.ac.cn;nlpr.ia.ac.cn;fudan.edu.cn", + "email": "ia.ac.cn;fudan.edu.cn;alibaba-inc.com;surrey.ac.uk;nlpr.ia.ac.cn;nlpr.ia.ac.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;0+4;0+5;1", + "aff_unique_norm": "Chinese Academy of Sciences;Fudan University;Alibaba Group;University of Surrey;University of Chinese Academy of Sciences;ShanghaiTech University", + "aff_unique_dep": "Institute of Automation;School of Data Science;DAMO Academy;Surrey Institute for People-Centred Artificial Intelligence;School of Artificial Intelligence;School of Information Science and Technology", + "aff_unique_url": "http://www.ia.cas.cn;https://www.fudan.edu.cn;https://www.alibaba-group.com;https://www.surrey.ac.uk;http://www.ucas.ac.cn;https://www.shanghaitech.edu.cn", + "aff_unique_abbr": "CAS;Fudan;Alibaba DAMO;Surrey;UCAS;ShanghaiTech", + "aff_campus_unique_index": "1;;2", + "aff_campus_unique": ";Guildford;Shanghai", + "aff_country_unique_index": "0;0;0;1;0+0;0+0;0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-25486", + "title": "Polarization-Aware Low-Light Image Enhancement", + "track": "main", + "status": "Technical", + "abstract": "Polarization-based vision algorithms have found uses in various applications since polarization provides additional physical constraints. However, in low-light conditions, their performance would be severely degenerated since the captured polarized images could be noisy, leading to noticeable degradation in the degree of polarization (DoP) and the angle of polarization (AoP). Existing low-light image enhancement methods cannot handle the polarized images well since they operate in the intensity domain, without effectively exploiting the information provided by polarization. In this paper, we propose a Stokes-domain enhancement pipeline along with a dual-branch neural network to handle the problem in a polarization-aware manner. Two application scenarios (reflection removal and shape from polarization) are presented to show how our enhancement can improve their results.", + "primary_area": "computer vision iii", + "author": "Chu Zhou; Minggui Teng; Youwei Lyu; Si Li; Chao Xu; Boxin Shi", + "authorids": "", + "aff": "Key Laboratory of Machine Perception (MOE), School of Intelligence Science and Technology, Peking University; National Engineering Research Center of Visual Technology, School of Computer Science, Peking University; School of Artificial Intelligence, Beijing University of Posts and Telecommunications; School of Artificial Intelligence, Beijing University of Posts and Telecommunications; Key Laboratory of Machine Perception (MOE), School of Intelligence Science and Technology, Peking University; National Engineering Research Center of Visual Technology, School of Computer Science, Peking University", + "bibtex": "@article{Zhou_Teng_Lyu_Li_Xu_Shi_2023, title={Polarization-Aware Low-Light Image Enhancement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25486}, DOI={10.1609/aaai.v37i3.25486}, abstractNote={Polarization-based vision algorithms have found uses in various applications since polarization provides additional physical constraints. However, in low-light conditions, their performance would be severely degenerated since the captured polarized images could be noisy, leading to noticeable degradation in the degree of polarization (DoP) and the angle of polarization (AoP). Existing low-light image enhancement methods cannot handle the polarized images well since they operate in the intensity domain, without effectively exploiting the information provided by polarization. In this paper, we propose a Stokes-domain enhancement pipeline along with a dual-branch neural network to handle the problem in a polarization-aware manner. Two application scenarios (reflection removal and shape from polarization) are presented to show how our enhancement can improve their results.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Chu and Teng, Minggui and Lyu, Youwei and Li, Si and Xu, Chao and Shi, Boxin}, year={2023}, month={Jun.}, pages={3742-3750} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25486/25258", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25486", + "pdf_size": 5510112, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1998225830561097000&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "pku.edu.cn; ; ; ; ;pku.edu.cn", + "email": "pku.edu.cn; ; ; ; ;pku.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;0;0", + "aff_unique_norm": "Peking University;Beijing University of Posts and Telecommunications", + "aff_unique_dep": "School of Intelligence Science and Technology;School of Artificial Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;http://www.bupt.edu.cn/", + "aff_unique_abbr": "Peking University;BUPT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26195", + "title": "Policy-Adaptive Estimator Selection for Off-Policy Evaluation", + "track": "main", + "status": "Technical", + "abstract": "Off-policy evaluation (OPE) aims to accurately evaluate the performance of counterfactual policies using only offline logged data. Although many estimators have been developed, there is no single estimator that dominates the others, because the estimators' accuracy can vary greatly depending on a given OPE task such as the evaluation policy, number of actions, and noise level. Thus, the data-driven estimator selection problem is becoming increasingly important and can have a significant impact on the accuracy of OPE. However, identifying the most accurate estimator using only the logged data is quite challenging because the ground-truth estimation accuracy of estimators is generally unavailable. This paper thus studies this challenging problem of estimator selection for OPE for the first time. In particular, we enable an estimator selection that is adaptive to a given OPE task, by appropriately subsampling available logged data and constructing pseudo policies useful for the underlying estimator selection task. Comprehensive experiments on both synthetic and real-world company data demonstrate that the proposed procedure substantially improves the estimator selection compared to a non-adaptive heuristic. Note that complete version with technical appendix is available on arXiv: http://arxiv.org/abs/2211.13904.", + "primary_area": "machine learning iii", + "author": "Takuma Udagawa; Haruka Kiyohara; Yusuke Narita; Yuta Saito; Kei Tateno", + "authorids": "", + "aff": "Sony Group Corporation; Tokyo Institute of Technology; Yale University; Cornell University; Sony Group Corporation", + "bibtex": "@article{Udagawa_Kiyohara_Narita_Saito_Tateno_2023, title={Policy-Adaptive Estimator Selection for Off-Policy Evaluation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26195}, DOI={10.1609/aaai.v37i8.26195}, abstractNote={Off-policy evaluation (OPE) aims to accurately evaluate the performance of counterfactual policies using only offline logged data. Although many estimators have been developed, there is no single estimator that dominates the others, because the estimators\u2019 accuracy can vary greatly depending on a given OPE task such as the evaluation policy, number of actions, and noise level. Thus, the data-driven estimator selection problem is becoming increasingly important and can have a significant impact on the accuracy of OPE. However, identifying the most accurate estimator using only the logged data is quite challenging because the ground-truth estimation accuracy of estimators is generally unavailable. This paper thus studies this challenging problem of estimator selection for OPE for the first time. In particular, we enable an estimator selection that is adaptive to a given OPE task, by appropriately subsampling available logged data and constructing pseudo policies useful for the underlying estimator selection task. Comprehensive experiments on both synthetic and real-world company data demonstrate that the proposed procedure substantially improves the estimator selection compared to a non-adaptive heuristic. Note that complete version with technical appendix is available on arXiv: http://arxiv.org/abs/2211.13904.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Udagawa, Takuma and Kiyohara, Haruka and Narita, Yusuke and Saito, Yuta and Tateno, Kei}, year={2023}, month={Jun.}, pages={10025-10033} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26195/25967", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26195", + "pdf_size": 536078, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13286806257664346343&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "sony.com;m.titech.ac.jp;yale.edu;cornell.edu;sony.com", + "email": "sony.com;m.titech.ac.jp;yale.edu;cornell.edu;sony.com", + "github": "", + "project": "http://arxiv.org/abs/2211.13904", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Sony Group Corporation;Tokyo Institute of Technology;Yale University;Cornell University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.sony.com;https://www.titech.ac.jp;https://www.yale.edu;https://www.cornell.edu", + "aff_unique_abbr": "Sony;Titech;Yale;Cornell", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;0", + "aff_country_unique": "Japan;United States" + }, + { + "id": "article-26299", + "title": "Policy-Based Primal-Dual Methods for Convex Constrained Markov Decision Processes", + "track": "main", + "status": "Technical", + "abstract": "We study convex Constrained Markov Decision Processes (CMDPs) in which the objective is concave and the constraints are convex in the state-action occupancy measure. We propose a policy-based primal-dual algorithm that updates the primal variable via policy gradient ascent and updates the dual variable via projected sub-gradient descent. Despite the loss of additivity structure and the nonconvex nature, we establish the global convergence of the proposed algorithm by leveraging a hidden convexity in the problem, and prove the O(T^-1/3) convergence rate in terms of both optimality gap and constraint violation. When the objective is strongly concave in the occupancy measure, we prove an improved convergence rate of O(T^-1/2). By introducing a pessimistic term to the constraint, we further show that a zero constraint violation can be achieved while preserving the same convergence rate for the optimality gap. This work is the first one in the literature that establishes non-asymptotic convergence guarantees for policy-based primal-dual methods for solving infinite-horizon discounted convex CMDPs.", + "primary_area": "machine learning iv", + "author": "Donghao Ying; Mengzi Amy Guo; Yuhao Ding; Javad Lavaei; Zuo-Jun Shen", + "authorids": "", + "aff": "UC Berkeley, Department of Industrial Engineering and Operations Research; UC Berkeley, Department of Industrial Engineering and Operations Research; UC Berkeley, Department of Industrial Engineering and Operations Research; UC Berkeley, Department of Industrial Engineering and Operations Research; UC Berkeley, Department of Industrial Engineering and Operations Research", + "bibtex": "@article{Ying_Guo_Ding_Lavaei_Shen_2023, title={Policy-Based Primal-Dual Methods for Convex Constrained Markov Decision Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26299}, DOI={10.1609/aaai.v37i9.26299}, abstractNote={We study convex Constrained Markov Decision Processes (CMDPs) in which the objective is concave and the constraints are convex in the state-action occupancy measure. We propose a policy-based primal-dual algorithm that updates the primal variable via policy gradient ascent and updates the dual variable via projected sub-gradient descent. Despite the loss of additivity structure and the nonconvex nature, we establish the global convergence of the proposed algorithm by leveraging a hidden convexity in the problem, and prove the O(T^-1/3) convergence rate in terms of both optimality gap and constraint violation. When the objective is strongly concave in the occupancy measure, we prove an improved convergence rate of O(T^-1/2). By introducing a pessimistic term to the constraint, we further show that a zero constraint violation can be achieved while preserving the same convergence rate for the optimality gap. This work is the first one in the literature that establishes non-asymptotic convergence guarantees for policy-based primal-dual methods for solving infinite-horizon discounted convex CMDPs.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ying, Donghao and Guo, Mengzi Amy and Ding, Yuhao and Lavaei, Javad and Shen, Zuo-Jun}, year={2023}, month={Jun.}, pages={10963-10971} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26299/26071", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26299", + "pdf_size": 352874, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8603636351102704673&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", + "email": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of California, Berkeley", + "aff_unique_dep": "Department of Industrial Engineering and Operations Research", + "aff_unique_url": "https://www.berkeley.edu", + "aff_unique_abbr": "UC Berkeley", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26052", + "title": "Policy-Independent Behavioral Metric-Based Representation for Deep Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Behavioral metrics can calculate the distance between states or state-action pairs from the rewards and transitions difference. By virtue of their capability to filter out task-irrelevant information in theory, using them to shape a state embedding space becomes a new trend of representation learning for deep reinforcement learning (RL), especially when there are explicit distracting factors in observation backgrounds. However, due to the tight coupling between the metric and the RL policy, such metric-based methods may result in less informative embedding spaces which can weaken their aid to the baseline RL algorithm and even consume more samples to learn. We resolve this by proposing a new behavioral metric. It decouples the learning of RL policy and metric owing to its independence on RL policy. We theoretically justify its scalability to continuous state and action spaces and design a practical way to incorporate it into an RL procedure as a representation learning target. We evaluate our approach on DeepMind control tasks with default and distracting backgrounds. By statistically reliable evaluation protocols, our experiments demonstrate our approach is superior to previous metric-based methods in terms of sample efficiency and asymptotic performance in both backgrounds.", + "primary_area": "machine learning ii", + "author": "Weijian Liao; Zongzhang Zhang; Yang Yu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China; Institute of Artificial Intelligence and Blockchain, Guangzhou University, Guangzhou 510006, China; Peng Cheng Laboratory, Shenzhen, 518055, China", + "bibtex": "@article{Liao_Zhang_Yu_2023, title={Policy-Independent Behavioral Metric-Based Representation for Deep Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26052}, DOI={10.1609/aaai.v37i7.26052}, abstractNote={Behavioral metrics can calculate the distance between states or state-action pairs from the rewards and transitions difference. By virtue of their capability to filter out task-irrelevant information in theory, using them to shape a state embedding space becomes a new trend of representation learning for deep reinforcement learning (RL), especially when there are explicit distracting factors in observation backgrounds. However, due to the tight coupling between the metric and the RL policy, such metric-based methods may result in less informative embedding spaces which can weaken their aid to the baseline RL algorithm and even consume more samples to learn. We resolve this by proposing a new behavioral metric. It decouples the learning of RL policy and metric owing to its independence on RL policy. We theoretically justify its scalability to continuous state and action spaces and design a practical way to incorporate it into an RL procedure as a representation learning target. We evaluate our approach on DeepMind control tasks with default and distracting backgrounds. By statistically reliable evaluation protocols, our experiments demonstrate our approach is superior to previous metric-based methods in terms of sample efficiency and asymptotic performance in both backgrounds.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liao, Weijian and Zhang, Zongzhang and Yu, Yang}, year={2023}, month={Jun.}, pages={8746-8754} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26052/25824", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26052", + "pdf_size": 316297, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11234427496044250745&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "lamda.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Nanjing University;Guangzhou University;Peng Cheng Laboratory", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;Institute of Artificial Intelligence and Blockchain;", + "aff_unique_url": "http://www.nju.edu.cn;http://www.gzhu.edu.cn;", + "aff_unique_abbr": "Nanjing U;GU;", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "Nanjing;Guangzhou;Shenzhen", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25910", + "title": "Popularizing Fairness: Group Fairness and Individual Welfare", + "track": "main", + "status": "Technical", + "abstract": "Group-fair learning methods typically seek to ensure that some measure of prediction efficacy for (often historically) disadvantaged minority groups is comparable to that for the majority of the population. When a principal seeks to adopt a group-fair approach to replace another, the principal may face opposition from those who feel they may be harmed by the switch, and this, in turn, may deter adoption. We propose that a potential mitigation to this concern is to ensure that a group-fair model is also popular, in the sense that, for a majority of the target population, it yields a preferred distribution over outcomes compared with the conventional model. In this paper, we show that state of the art fair learning approaches are often unpopular in this sense. We propose several efficient algorithms for postprocessing an existing group-fair learning scheme to improve its popularity while retaining fairness. Through extensive experiments, we demonstrate that the proposed postprocessing approaches are highly effective in practice.", + "primary_area": "machine learning i", + "author": "Andrew Estornell; Sanmay Das; Brendan Juba; Yevgeniy Vorobeychik", + "authorids": "", + "aff": "Washington University in Saint Louis; George Mason University; Washington University in Saint Louis; Washington University in Saint Louis", + "bibtex": "@article{Estornell_Das_Juba_Vorobeychik_2023, title={Popularizing Fairness: Group Fairness and Individual Welfare}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25910}, DOI={10.1609/aaai.v37i6.25910}, abstractNote={Group-fair learning methods typically seek to ensure that some measure of prediction efficacy for (often historically) disadvantaged minority groups is comparable to that for the majority of the population. When a principal seeks to adopt a group-fair approach to replace another, the principal may face opposition from those who feel they may be harmed by the switch, and this, in turn, may deter adoption. We propose that a potential mitigation to this concern is to ensure that a group-fair model is also popular, in the sense that, for a majority of the target population, it yields a preferred distribution over outcomes compared with the conventional model. In this paper, we show that state of the art fair learning approaches are often unpopular in this sense. We propose several efficient algorithms for postprocessing an existing group-fair learning scheme to improve its popularity while retaining fairness. Through extensive experiments, we demonstrate that the proposed postprocessing approaches are highly effective in practice.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Estornell, Andrew and Das, Sanmay and Juba, Brendan and Vorobeychik, Yevgeniy}, year={2023}, month={Jun.}, pages={7485-7493} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25910/25682", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25910", + "pdf_size": 762940, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:boWs3NtJSX8J:scholar.google.com/&scioq=Popularizing+Fairness:+Group+Fairness+and+Individual+Welfare&hl=en&as_sdt=0,5", + "gs_version_total": 7, + "aff_domain": "wustl.edu;gmu.edu;wustl.edu;wustl.edu", + "email": "wustl.edu;gmu.edu;wustl.edu;wustl.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Washington University in St. Louis;George Mason University", + "aff_unique_dep": ";", + "aff_unique_url": "https://wustl.edu;https://www.gmu.edu", + "aff_unique_abbr": "WUSTL;GMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "St. Louis;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25195", + "title": "Pose-Guided 3D Human Generation in Indoor Scene", + "track": "main", + "status": "Technical", + "abstract": "In this work, we address the problem of scene-aware 3D human avatar generation based on human-scene interactions. In particular, we pay attention to the fact that physical contact between a 3D human and a scene (i.e., physical human-scene interactions) requires a geometrical alignment to generate natural 3D human avatar. Motivated by this fact, we present a new 3D human generation framework that considers geometric alignment on potential contact areas between 3D human avatars and their surroundings. In addition, we introduce a compact yet effective human pose classifier that classifies the human pose and provides potential contact areas of the 3D human avatar. It allows us to adaptively use geometric alignment loss according to the classified human pose. Compared to state-of-the-art method, our method can generate physically and semantically plausible 3D humans that interact naturally with 3D scenes without additional post-processing. In our evaluations, we achieve the improvements with more plausible interactions and more variety of poses than prior research in qualitative and quantitative analysis. Project page: https://bupyeonghealer.github.io/phin/.", + "primary_area": "computer vision i", + "author": "Minseok Kim; Changwoo Kang; Jeongin Park; Kyungdon Joo", + "authorids": "", + "aff": "Artificial Intelligence Graduate School, UNIST; Artificial Intelligence Graduate School, UNIST; Artificial Intelligence Graduate School, UNIST; Artificial Intelligence Graduate School, UNIST", + "bibtex": "@article{Kim_Kang_Park_Joo_2023, title={Pose-Guided 3D Human Generation in Indoor Scene}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25195}, DOI={10.1609/aaai.v37i1.25195}, abstractNote={In this work, we address the problem of scene-aware 3D human avatar generation based on human-scene interactions. In particular, we pay attention to the fact that physical contact between a 3D human and a scene (i.e., physical human-scene interactions) requires a geometrical alignment to generate natural 3D human avatar. Motivated by this fact, we present a new 3D human generation framework that considers geometric alignment on potential contact areas between 3D human avatars and their surroundings. In addition, we introduce a compact yet effective human pose classifier that classifies the human pose and provides potential contact areas of the 3D human avatar. It allows us to adaptively use geometric alignment loss according to the classified human pose. Compared to state-of-the-art method, our method can generate physically and semantically plausible 3D humans that interact naturally with 3D scenes without additional post-processing. In our evaluations, we achieve the improvements with more plausible interactions and more variety of poses than prior research in qualitative and quantitative analysis. Project page: https://bupyeonghealer.github.io/phin/.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Minseok and Kang, Changwoo and Park, Jeongin and Joo, Kyungdon}, year={2023}, month={Jun.}, pages={1133-1141} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25195/24967", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25195", + "pdf_size": 17148734, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4082059101279480168&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "unist.ac.kr;unist.ac.kr;unist.ac.kr;unist.ac.kr", + "email": "unist.ac.kr;unist.ac.kr;unist.ac.kr;unist.ac.kr", + "github": "", + "project": "https://bupyeonghealer.github.io/phin/", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Ulsan National Institute of Science and Technology", + "aff_unique_dep": "Artificial Intelligence Graduate School", + "aff_unique_url": "https://www.unist.ac.kr", + "aff_unique_abbr": "UNIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25213", + "title": "Pose-Oriented Transformer with Uncertainty-Guided Refinement for 2D-to-3D Human Pose Estimation", + "track": "main", + "status": "Technical", + "abstract": "There has been a recent surge of interest in introducing transformers to 3D human pose estimation (HPE) due to their powerful capabilities in modeling long-term dependencies. However, existing transformer-based methods treat body joints as equally important inputs and ignore the prior knowledge of human skeleton topology in the self-attention mechanism. To tackle this issue, in this paper, we propose a Pose-Oriented Transformer (POT) with uncertainty guided refinement for 3D HPE. Specifically, we first develop novel pose-oriented self-attention mechanism and distance-related position embedding for POT to explicitly exploit the human skeleton topology. The pose-oriented self-attention mechanism explicitly models the topological interactions between body joints, whereas the distance-related position embedding encodes the distance of joints to the root joint to distinguish groups of joints with different difficulties in regression. Furthermore, we present an Uncertainty-Guided Refinement Network (UGRN) to refine pose predictions from POT, especially for the difficult joints, by considering the estimated uncertainty of each joint with uncertainty-guided sampling strategy and self-attention mechanism. Extensive experiments demonstrate that our method significantly outperforms the state-of-the-art methods with reduced model parameters on 3D HPE benchmarks such as Human3.6M and MPI-INF-3DHP.", + "primary_area": "computer vision i", + "author": "Han Li; Bowen Shi; Wenrui Dai; Hongwei Zheng; Botao Wang; Yu Sun; Min Guo; Chenglin Li; Junni Zou; Hongkai Xiong", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Qualcomm AI Research; Qualcomm AI Research; Qualcomm AI Research; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Li_Shi_Dai_Zheng_Wang_Sun_Guo_Li_Zou_Xiong_2023, title={Pose-Oriented Transformer with Uncertainty-Guided Refinement for 2D-to-3D Human Pose Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25213}, DOI={10.1609/aaai.v37i1.25213}, abstractNote={There has been a recent surge of interest in introducing transformers to 3D human pose estimation (HPE) due to their powerful capabilities in modeling long-term dependencies. However, existing transformer-based methods treat body joints as equally important inputs and ignore the prior knowledge of human skeleton topology in the self-attention mechanism. To tackle this issue, in this paper, we propose a Pose-Oriented Transformer (POT) with uncertainty guided refinement for 3D HPE. Specifically, we first develop novel pose-oriented self-attention mechanism and distance-related position embedding for POT to explicitly exploit the human skeleton topology. The pose-oriented self-attention mechanism explicitly models the topological interactions between body joints, whereas the distance-related position embedding encodes the distance of joints to the root joint to distinguish groups of joints with different difficulties in regression. Furthermore, we present an Uncertainty-Guided Refinement Network (UGRN) to refine pose predictions from POT, especially for the difficult joints, by considering the estimated uncertainty of each joint with uncertainty-guided sampling strategy and self-attention mechanism. Extensive experiments demonstrate that our method significantly outperforms the state-of-the-art methods with reduced model parameters on 3D HPE benchmarks such as Human3.6M and MPI-INF-3DHP.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Han and Shi, Bowen and Dai, Wenrui and Zheng, Hongwei and Wang, Botao and Sun, Yu and Guo, Min and Li, Chenglin and Zou, Junni and Xiong, Hongkai}, year={2023}, month={Jun.}, pages={1296-1304} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25213/24985", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25213", + "pdf_size": 1481710, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7421613566752629108&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;qti.qualcomm.com;qti.qualcomm.com;qti.qualcomm.com;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;qti.qualcomm.com;qti.qualcomm.com;qti.qualcomm.com;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;1;1;1;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Qualcomm", + "aff_unique_dep": ";Qualcomm AI Research", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.qualcomm.com/research", + "aff_unique_abbr": "SJTU;Qualcomm AI Research", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;1;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25461", + "title": "Positional Label for Self-Supervised Vision Transformer", + "track": "main", + "status": "Technical", + "abstract": "Positional encoding is important for vision transformer (ViT) to capture the spatial structure of the input image. General effectiveness has been proven in ViT. In our work we propose to train ViT to recognize the positional label of patches of the input image, this apparently simple task actually yields a meaningful self-supervisory task. Based on previous work on ViT positional encoding, we propose two positional labels dedicated to 2D images including absolute position and relative position. Our positional labels can be easily plugged into various current ViT variants. It can work in two ways: (a) As an auxiliary training target for vanilla ViT for better performance. (b) Combine the self-supervised ViT to provide a more powerful self-supervised signal for semantic feature learning. Experiments demonstrate that with the proposed self-supervised methods, ViT-B and Swin-B gain improvements of 1.20% (top-1 Acc) and 0.74% (top-1 Acc) on ImageNet, respectively, and 6.15% and 1.14% improvement on Mini-ImageNet. The code is publicly available at: https://github.com/zhangzhemin/PositionalLabel.", + "primary_area": "computer vision iii", + "author": "Zhemin Zhang; Xun Gong", + "authorids": "", + "aff": "School of Computing and Artificial Intelligence, Southwest Jiaotong University, Chengdu, Sichuan, China + Engineering Research Center of Sustainable Urban Intelligent Transportation, Ministry of Education, China + Manufacturing Industry Chains Collaboration and Information Support Technology Key Laboratory of Sichuan Province, Chengdu, Sichuan, China; School of Computing and Artificial Intelligence, Southwest Jiaotong University, Chengdu, Sichuan, China + Engineering Research Center of Sustainable Urban Intelligent Transportation, Ministry of Education, China + Manufacturing Industry Chains Collaboration and Information Support Technology Key Laboratory of Sichuan Province, Chengdu, Sichuan, China", + "bibtex": "@article{Zhang_Gong_2023, title={Positional Label for Self-Supervised Vision Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25461}, DOI={10.1609/aaai.v37i3.25461}, abstractNote={Positional encoding is important for vision transformer (ViT) to capture the spatial structure of the input image. General effectiveness has been proven in ViT. In our work we propose to train ViT to recognize the positional label of patches of the input image, this apparently simple task actually yields a meaningful self-supervisory task. Based on previous work on ViT positional encoding, we propose two positional labels dedicated to 2D images including absolute position and relative position. Our positional labels can be easily plugged into various current ViT variants. It can work in two ways: (a) As an auxiliary training target for vanilla ViT for better performance. (b) Combine the self-supervised ViT to provide a more powerful self-supervised signal for semantic feature learning. Experiments demonstrate that with the proposed self-supervised methods, ViT-B and Swin-B gain improvements of 1.20% (top-1 Acc) and 0.74% (top-1 Acc) on ImageNet, respectively, and 6.15% and 1.14% improvement on Mini-ImageNet. The code is publicly available at: https://github.com/zhangzhemin/PositionalLabel.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhemin and Gong, Xun}, year={2023}, month={Jun.}, pages={3516-3524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25461/25233", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25461", + "pdf_size": 627349, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2275585794402171350&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "my.swjtu.edu.cn;swjtu.edu.cn", + "email": "my.swjtu.edu.cn;swjtu.edu.cn", + "github": "https://github.com/zhangzhemin/PositionalLabel", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1+2;0+1+2", + "aff_unique_norm": "Southwest Jiaotong University;Engineering Research Center of Sustainable Urban Intelligent Transportation;Sichuan Province Key Laboratory of Manufacturing Industry Chains Collaboration and Information Support Technology", + "aff_unique_dep": "School of Computing and Artificial Intelligence;Ministry of Education;", + "aff_unique_url": "https://www.southwestjiaotong.edu.cn;;", + "aff_unique_abbr": "SWJTU;;", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Chengdu;", + "aff_country_unique_index": "0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26051", + "title": "Positive Distribution Pollution: Rethinking Positive Unlabeled Learning from a Unified Perspective", + "track": "main", + "status": "Technical", + "abstract": "Positive Unlabeled (PU) learning, which has a wide range of applications, is becoming increasingly prevalent. However, it suffers from problems such as data imbalance, selection bias, and prior agnostic in real scenarios. Existing studies focus on addressing part of these problems, which fail to provide a unified perspective to understand these problems. In this paper, we first rethink these problems by analyzing a typical PU scenario and come up with an insightful point of view that all these problems are inherently connected to one problem, i.e., positive distribution pollution, which refers to the inaccuracy in estimating positive data distribution under very little labeled data. Then, inspired by this insight, we devise a variational model named CoVPU, which addresses all three problems in a unified perspective by targeting the positive distribution pollution problem. CoVPU not only accurately separates the positive data from the unlabeled data based on discrete normalizing flows, but also effectively approximates the positive distribution based on our derived unbiased rebalanced risk estimator and supervises the approximation based on a novel prior-free variational loss. Rigorous theoretical analysis proves the convergence of CoVPU to an optimal Bayesian classifier. Extensive experiments demonstrate the superiority of CoVPU over the state-of-the-art PU learning methods under these problems.", + "primary_area": "machine learning ii", + "author": "Qianqiao Liang; Mengying Zhu; Yan Wang; Xiuyuan Wang; Wanjia Zhao; Mengyuan Yang; Hua Wei; Bing Han; Xiaolin Zheng", + "authorids": "", + "aff": "College of Computer Science, Zhejiang University, China; College of Computer Science, Zhejiang University, China; School of Computing, Macquarie University, Australia; College of Computer Science, Zhejiang University, China; College of Computer Science, Zhejiang University, China; College of Computer Science, Zhejiang University, China; MYbank, Ant Group, China; MYbank, Ant Group, China; College of Computer Science, Zhejiang University, China", + "bibtex": "@article{Liang_Zhu_Wang_Wang_Zhao_Yang_Wei_Han_Zheng_2023, title={Positive Distribution Pollution: Rethinking Positive Unlabeled Learning from a Unified Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26051}, DOI={10.1609/aaai.v37i7.26051}, abstractNote={Positive Unlabeled (PU) learning, which has a wide range of applications, is becoming increasingly prevalent. However, it suffers from problems such as data imbalance, selection bias, and prior agnostic in real scenarios. Existing studies focus on addressing part of these problems, which fail to provide a unified perspective to understand these problems. In this paper, we first rethink these problems by analyzing a typical PU scenario and come up with an insightful point of view that all these problems are inherently connected to one problem, i.e., positive distribution pollution, which refers to the inaccuracy in estimating positive data distribution under very little labeled data. Then, inspired by this insight, we devise a variational model named CoVPU, which addresses all three problems in a unified perspective by targeting the positive distribution pollution problem. CoVPU not only accurately separates the positive data from the unlabeled data based on discrete normalizing flows, but also effectively approximates the positive distribution based on our derived unbiased rebalanced risk estimator and supervises the approximation based on a novel prior-free variational loss. Rigorous theoretical analysis proves the convergence of CoVPU to an optimal Bayesian classifier. Extensive experiments demonstrate the superiority of CoVPU over the state-of-the-art PU learning methods under these problems.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Qianqiao and Zhu, Mengying and Wang, Yan and Wang, Xiuyuan and Zhao, Wanjia and Yang, Mengyuan and Wei, Hua and Han, Bing and Zheng, Xiaolin}, year={2023}, month={Jun.}, pages={8737-8745} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26051/25823", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26051", + "pdf_size": 3659347, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11313798628981791824&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "zju.edu.cn;zju.edu.cn;mq.edu.au;zju.edu.cn;zju.edu.cn;zju.edu.cn;antgroup.com;antgroup.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;mq.edu.au;zju.edu.cn;zju.edu.cn;zju.edu.cn;antgroup.com;antgroup.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;0;0;2;2;0", + "aff_unique_norm": "Zhejiang University;Macquarie University;MYbank", + "aff_unique_dep": "College of Computer Science;School of Computing;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.mq.edu.au;", + "aff_unique_abbr": "ZJU;MQ;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26167", + "title": "Post-hoc Uncertainty Learning Using a Dirichlet Meta-Model", + "track": "main", + "status": "Technical", + "abstract": "It is known that neural networks have the problem of being over-confident when directly using the output label distribution to generate uncertainty measures. Existing methods mainly resolve this issue by retraining the entire model to impose the uncertainty quantification capability so that the learned model can achieve desired performance in accuracy and uncertainty prediction simultaneously. However, training the model from scratch is computationally expensive, and a trade-off might exist between prediction accuracy and uncertainty quantification. To this end, we consider a more practical post-hoc uncertainty learning setting, where a well-trained base model is given, and we focus on the uncertainty quantification task at the second stage of training. We propose a novel Bayesian uncertainty learning approach using the Dirichlet meta-model, which is effective and computationally efficient. Our proposed method requires no additional training data and is flexible enough to quantify different uncertainties and easily adapt to different application settings, including out-of-domain data detection, misclassification detection, and trustworthy transfer learning. Finally, we demonstrate our proposed meta-model approach's flexibility and superior empirical performance on these applications over multiple representative image classification benchmarks.", + "primary_area": "machine learning iii", + "author": "Maohao Shen; Yuheng Bu; Prasanna Sattigeri; Soumya Ghosh; Subhro Das; Gregory Wornell", + "authorids": "", + "aff": "Massachusetts Institute of Technology; University of Florida; MIT-IBM Watson AI Lab, IBM Research; MIT-IBM Watson AI Lab, IBM Research; MIT-IBM Watson AI Lab, IBM Research; Massachusetts Institute of Technology", + "bibtex": "@article{Shen_Bu_Sattigeri_Ghosh_Das_Wornell_2023, title={Post-hoc Uncertainty Learning Using a Dirichlet Meta-Model}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26167}, DOI={10.1609/aaai.v37i8.26167}, abstractNote={It is known that neural networks have the problem of being over-confident when directly using the output label distribution to generate uncertainty measures. Existing methods mainly resolve this issue by retraining the entire model to impose the uncertainty quantification capability so that the learned model can achieve desired performance in accuracy and uncertainty prediction simultaneously. However, training the model from scratch is computationally expensive, and a trade-off might exist between prediction accuracy and uncertainty quantification. To this end, we consider a more practical post-hoc uncertainty learning setting, where a well-trained base model is given, and we focus on the uncertainty quantification task at the second stage of training. We propose a novel Bayesian uncertainty learning approach using the Dirichlet meta-model, which is effective and computationally efficient. Our proposed method requires no additional training data and is flexible enough to quantify different uncertainties and easily adapt to different application settings, including out-of-domain data detection, misclassification detection, and trustworthy transfer learning. Finally, we demonstrate our proposed meta-model approach\u2019s flexibility and superior empirical performance on these applications over multiple representative image classification benchmarks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Maohao and Bu, Yuheng and Sattigeri, Prasanna and Ghosh, Soumya and Das, Subhro and Wornell, Gregory}, year={2023}, month={Jun.}, pages={9772-9781} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26167/25939", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26167", + "pdf_size": 296077, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2041465423617638458&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "mit.edu; ; ; ; ; ", + "email": "mit.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;2;2;0", + "aff_unique_norm": "Massachusetts Institute of Technology;University of Florida;MIT-IBM Watson AI Lab", + "aff_unique_dep": ";;AI Lab", + "aff_unique_url": "https://web.mit.edu;https://www.ufl.edu;https://www.ibmwatsonai.org/", + "aff_unique_abbr": "MIT;UF;MIT-IBM AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25853", + "title": "Posterior Coreset Construction with Kernelized Stein Discrepancy for Model-Based Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Model-based approaches to reinforcement learning (MBRL) exhibit favorable performance in practice, but their theoretical guarantees in large spaces are mostly restricted to the setting when transition model is Gaussian or Lipschitz, and demands a posterior estimate whose representational complexity grows unbounded with time. In this work, we develop a novel MBRL method (i) which relaxes the assumptions on the target transition model to belong to a generic family of mixture models; (ii) is applicable to large-scale training by incorporating a compression step such that the posterior estimate consists of a Bayesian coreset of only statistically significant past state-action pairs; and (iii) exhibits a sublinear Bayesian regret.\nTo achieve these results, we adopt an approach based upon Stein's method, which, under a smoothness condition on the constructed posterior and target, allows distributional distance to be evaluated in closed form as the kernelized Stein discrepancy (KSD). The aforementioned compression step is then computed in terms of greedily retaining only those samples which are more than a certain KSD away from the previous model estimate.\nExperimentally, we observe that this approach is competitive with several state-of-the-art RL methodologies, and can achieve up-to 50 percent reduction in wall clock time in some continuous control environments.", + "primary_area": "machine learning i", + "author": "Souradip Chakraborty; Amrit Singh Bedi; Pratap Tokekar; Alec Koppel; Brian Sadler; Furong Huang; Dinesh Manocha", + "authorids": "", + "aff": "University of Maryland, College Park, USA; University of Maryland, College Park, USA; University of Maryland, College Park, USA; JP Morgan AI Research, NY, USA; DEVCOM Army Research Laboratory, Adeplhi, USA; University of Maryland, College Park, USA; University of Maryland, College Park, USA", + "bibtex": "@article{Chakraborty_Bedi_Tokekar_Koppel_Sadler_Huang_Manocha_2023, title={Posterior Coreset Construction with Kernelized Stein Discrepancy for Model-Based Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25853}, DOI={10.1609/aaai.v37i6.25853}, abstractNote={Model-based approaches to reinforcement learning (MBRL) exhibit favorable performance in practice, but their theoretical guarantees in large spaces are mostly restricted to the setting when transition model is Gaussian or Lipschitz, and demands a posterior estimate whose representational complexity grows unbounded with time. In this work, we develop a novel MBRL method (i) which relaxes the assumptions on the target transition model to belong to a generic family of mixture models; (ii) is applicable to large-scale training by incorporating a compression step such that the posterior estimate consists of a Bayesian coreset of only statistically significant past state-action pairs; and (iii) exhibits a sublinear Bayesian regret.\nTo achieve these results, we adopt an approach based upon Stein\u2019s method, which, under a smoothness condition on the constructed posterior and target, allows distributional distance to be evaluated in closed form as the kernelized Stein discrepancy (KSD). The aforementioned compression step is then computed in terms of greedily retaining only those samples which are more than a certain KSD away from the previous model estimate.\nExperimentally, we observe that this approach is competitive with several state-of-the-art RL methodologies, and can achieve up-to 50 percent reduction in wall clock time in some continuous control environments.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chakraborty, Souradip and Bedi, Amrit Singh and Tokekar, Pratap and Koppel, Alec and Sadler, Brian and Huang, Furong and Manocha, Dinesh}, year={2023}, month={Jun.}, pages={6980-6988} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25853/25625", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25853", + "pdf_size": 220828, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12647017399080513183&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "umd.edu;umd.edu;umd.edu;jpmchase.com;army.mil;umd.edu;umd.edu", + "email": "umd.edu;umd.edu;umd.edu;jpmchase.com;army.mil;umd.edu;umd.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;2;0;0", + "aff_unique_norm": "University of Maryland;JP Morgan AI Research;DEVCOM Army Research Laboratory", + "aff_unique_dep": ";AI Research;", + "aff_unique_url": "https://www/umd.edu;https://www.jpmorgan.com/global/research;", + "aff_unique_abbr": "UMD;JPM AI;", + "aff_campus_unique_index": "0;0;0;1;0;0", + "aff_campus_unique": "College Park;New York;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26724", + "title": "PowRL: A Reinforcement Learning Framework for Robust Management of Power Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Power grids, across the world, play an important societal and economical role by providing uninterrupted, reliable and transient-free power to several industries, businesses and household consumers. With the advent of renewable power resources and EVs resulting into uncertain generation and highly dynamic load demands, it has become ever so important to ensure robust operation of power networks through suitable management of transient stability issues and localize the events of blackouts. In the light of ever increasing stress on the modern grid infrastructure and the grid operators, this paper presents a reinforcement learning (RL) framework, PowRL, to mitigate the effects of unexpected network events, as well as reliably maintain electricity everywhere on the network at all times. The PowRL leverages a novel heuristic for overload management, along with the RL-guided decision making on optimal topology selection to ensure that the grid is operated safely and reliably (with no overloads). PowRL is benchmarked on a variety of competition datasets hosted by the L2RPN (Learning to Run a Power Network). Even with its reduced action space, PowRL tops the leaderboard in the L2RPN NeurIPS 2020 challenge (Robustness track) at an aggregate level, while also being the top performing agent in the L2RPN WCCI 2020 challenge. Moreover, detailed analysis depicts state-of-the-art performances by the PowRL agent in some of the test scenarios.", + "primary_area": "safe and robust ai", + "author": "Anandsingh Chauhan; Mayank Baranwal; Ansuma Basumatary", + "authorids": "", + "aff": "Tata Consultancy Services Research, Mumbai; Indian Institute of Technology, Bombay + Tata Consultancy Services Research, Mumbai; SalesKen, Bengaluru", + "bibtex": "@article{Chauhan_Baranwal_Basumatary_2023, title={PowRL: A Reinforcement Learning Framework for Robust Management of Power Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26724}, DOI={10.1609/aaai.v37i12.26724}, abstractNote={Power grids, across the world, play an important societal and economical role by providing uninterrupted, reliable and transient-free power to several industries, businesses and household consumers. With the advent of renewable power resources and EVs resulting into uncertain generation and highly dynamic load demands, it has become ever so important to ensure robust operation of power networks through suitable management of transient stability issues and localize the events of blackouts. In the light of ever increasing stress on the modern grid infrastructure and the grid operators, this paper presents a reinforcement learning (RL) framework, PowRL, to mitigate the effects of unexpected network events, as well as reliably maintain electricity everywhere on the network at all times. The PowRL leverages a novel heuristic for overload management, along with the RL-guided decision making on optimal topology selection to ensure that the grid is operated safely and reliably (with no overloads). PowRL is benchmarked on a variety of competition datasets hosted by the L2RPN (Learning to Run a Power Network). Even with its reduced action space, PowRL tops the leaderboard in the L2RPN NeurIPS 2020 challenge (Robustness track) at an aggregate level, while also being the top performing agent in the L2RPN WCCI 2020 challenge. Moreover, detailed analysis depicts state-of-the-art performances by the PowRL agent in some of the test scenarios.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chauhan, Anandsingh and Baranwal, Mayank and Basumatary, Ansuma}, year={2023}, month={Jun.}, pages={14757-14764} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26724/26496", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26724", + "pdf_size": 5003693, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14594372099509088352&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 7, + "aff_domain": "tcs.com;tcs.com;gmail.com", + "email": "tcs.com;tcs.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+0;2", + "aff_unique_norm": "Tata Consultancy Services;Indian Institute of Technology Bombay;SalesKen", + "aff_unique_dep": "Research;;", + "aff_unique_url": "https://www.tcs.com;https://www.iitb.ac.in;", + "aff_unique_abbr": "TCS;IIT Bombay;", + "aff_campus_unique_index": "0;1+0", + "aff_campus_unique": "Mumbai;Bombay;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25612", + "title": "Practical Cross-System Shilling Attacks with Limited Access to Data", + "track": "main", + "status": "Technical", + "abstract": "In shilling attacks, an adversarial party injects a few fake user profiles into a Recommender System (RS) so that the target item can be promoted or demoted. Although much effort has been devoted to developing shilling attack methods, we find that existing approaches are still far from practical. In this paper, we analyze the properties a practical shilling attack method should have and propose a new concept of Cross-system Attack. With the idea of Cross-system Attack, we design a Practical Cross-system Shilling Attack (PC-Attack) framework that requires little information about the victim RS model and the target RS data for conducting attacks. PC-Attack is trained to capture graph topology knowledge from public RS data in a self-supervised manner. Then, it is fine-tuned on a small portion of target data that is easy to access to construct fake profiles. Extensive experiments have demonstrated the superiority of PC-Attack over state-of-the-art baselines. Our implementation of PC-Attack is available at https://github.com/KDEGroup/PC-Attack.", + "primary_area": "data mining and knowledge management", + "author": "Meifang Zeng; Ke Li; Bingchuan Jiang; Liujuan Cao; Hui Li", + "authorids": "", + "aff": "School of Informatics, Xiamen University; PLA Strategic Support Force Information Engineering University; PLA Strategic Support Force Information Engineering University; School of Informatics, Xiamen University; School of Informatics, Xiamen University", + "bibtex": "@article{Zeng_Li_Jiang_Cao_Li_2023, title={Practical Cross-System Shilling Attacks with Limited Access to Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25612}, DOI={10.1609/aaai.v37i4.25612}, abstractNote={In shilling attacks, an adversarial party injects a few fake user profiles into a Recommender System (RS) so that the target item can be promoted or demoted. Although much effort has been devoted to developing shilling attack methods, we find that existing approaches are still far from practical. In this paper, we analyze the properties a practical shilling attack method should have and propose a new concept of Cross-system Attack. With the idea of Cross-system Attack, we design a Practical Cross-system Shilling Attack (PC-Attack) framework that requires little information about the victim RS model and the target RS data for conducting attacks. PC-Attack is trained to capture graph topology knowledge from public RS data in a self-supervised manner. Then, it is fine-tuned on a small portion of target data that is easy to access to construct fake profiles. Extensive experiments have demonstrated the superiority of PC-Attack over state-of-the-art baselines. Our implementation of PC-Attack is available at https://github.com/KDEGroup/PC-Attack.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Meifang and Li, Ke and Jiang, Bingchuan and Cao, Liujuan and Li, Hui}, year={2023}, month={Jun.}, pages={4864-4874} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25612/25384", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25612", + "pdf_size": 562285, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9258631665061364949&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.xmu.edu.cn;163.com;163.com;xmu.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;163.com;163.com;xmu.edu.cn;xmu.edu.cn", + "github": "https://github.com/KDEGroup/PC-Attack", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;0", + "aff_unique_norm": "Xiamen University;PLA Strategic Support Force Information Engineering University", + "aff_unique_dep": "School of Informatics;", + "aff_unique_url": "https://www.xmu.edu.cn;", + "aff_unique_abbr": "XMU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26693", + "title": "Practical Disruption of Image Translation Deepfake Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "By harnessing the latest advances in deep learning, image-to-image translation architectures have recently achieved impressive capabilities. Unfortunately, the growing representational power of these architectures has prominent unethical uses. Among these, the threats of (1) face manipulation (\"DeepFakes\") used for misinformation or pornographic use (2) \"DeepNude\" manipulations of body images to remove clothes from individuals, etc. Several works tackle the task of disrupting such image translation networks by inserting imperceptible adversarial attacks into the input image. Nevertheless, these works have limitations that may result in disruptions that are not practical in the real world. Specifically, most works generate disruptions in a white-box scenario, assuming perfect knowledge about the image translation network. The few remaining works that assume a black-box scenario require a large number of queries to successfully disrupt the adversary's image translation network. In this work we propose Leaking Transferable Perturbations (LTP), an algorithm that significantly reduces the number of queries needed to disrupt an image translation network by dynamically re-purposing previous disruptions into new query efficient disruptions.", + "primary_area": "ai for social impact", + "author": "Nataniel Ruiz; Sarah Adel Bargal; Cihang Xie; Stan Sclaroff", + "authorids": "", + "aff": "Boston University; Georgetown University; University of California, Santa Cruz; Boston University", + "bibtex": "@article{Ruiz_Bargal_Xie_Sclaroff_2023, title={Practical Disruption of Image Translation Deepfake Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26693}, DOI={10.1609/aaai.v37i12.26693}, abstractNote={By harnessing the latest advances in deep learning, image-to-image translation architectures have recently achieved impressive capabilities. Unfortunately, the growing representational power of these architectures has prominent unethical uses. Among these, the threats of (1) face manipulation ("DeepFakes") used for misinformation or pornographic use (2) "DeepNude" manipulations of body images to remove clothes from individuals, etc. Several works tackle the task of disrupting such image translation networks by inserting imperceptible adversarial attacks into the input image. Nevertheless, these works have limitations that may result in disruptions that are not practical in the real world. Specifically, most works generate disruptions in a white-box scenario, assuming perfect knowledge about the image translation network. The few remaining works that assume a black-box scenario require a large number of queries to successfully disrupt the adversary\u2019s image translation network. In this work we propose Leaking Transferable Perturbations (LTP), an algorithm that significantly reduces the number of queries needed to disrupt an image translation network by dynamically re-purposing previous disruptions into new query efficient disruptions.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ruiz, Nataniel and Bargal, Sarah Adel and Xie, Cihang and Sclaroff, Stan}, year={2023}, month={Jun.}, pages={14478-14486} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26693/26465", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26693", + "pdf_size": 3848154, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12494954613015792093&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "bu.edu;georgetown.edu;ucsc.edu;bu.edu", + "email": "bu.edu;georgetown.edu;ucsc.edu;bu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Boston University;Georgetown University;University of California, Santa Cruz", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.bu.edu;https://www.georgetown.edu;https://www.ucsc.edu", + "aff_unique_abbr": "BU;GU;UCSC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Santa Cruz", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26236", + "title": "Practical Markov Boundary Learning without Strong Assumptions", + "track": "main", + "status": "Technical", + "abstract": "Theoretically, the Markov boundary (MB) is the optimal solution for feature selection. However, existing MB learning algorithms often fail to identify some critical features in real-world feature selection tasks, mainly because the strict assumptions of existing algorithms, on either data distribution, variable types, or correctness of criteria, cannot be satisfied in application scenarios. This paper takes further steps toward opening the door to real-world applications for MB. We contribute in particular to a practical MB learning strategy, which can maintain feasibility and effectiveness in real-world data where variables can be numerical or categorical with linear or nonlinear, pairwise or multivariate relationships. Specifically, the equivalence between MB and the minimal conditional covariance operator (CCO) is investigated, which inspires us to design the objective function based on the predictability evaluation of the mapping variables in a reproducing kernel Hilbert space. Based on this, a kernel MB learning algorithm is proposed, where nonlinear multivariate dependence could be considered without extra requirements on data distribution and variable types. Extensive experiments demonstrate the efficacy of these contributions.", + "primary_area": "machine learning iv", + "author": "Xingyu Wu; Bingbing Jiang; Tianhao Wu; Huanhuan Chen", + "authorids": "", + "aff": "School of Computer Science and Technology, University of Science and Technology of China; School of Information Science and Engineering, Hangzhou Normal University; School of Data Science, University of Science and Technology of China; School of Computer Science and Technology, University of Science and Technology of China", + "bibtex": "@article{Wu_Jiang_Wu_Chen_2023, title={Practical Markov Boundary Learning without Strong Assumptions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26236}, DOI={10.1609/aaai.v37i9.26236}, abstractNote={Theoretically, the Markov boundary (MB) is the optimal solution for feature selection. However, existing MB learning algorithms often fail to identify some critical features in real-world feature selection tasks, mainly because the strict assumptions of existing algorithms, on either data distribution, variable types, or correctness of criteria, cannot be satisfied in application scenarios. This paper takes further steps toward opening the door to real-world applications for MB. We contribute in particular to a practical MB learning strategy, which can maintain feasibility and effectiveness in real-world data where variables can be numerical or categorical with linear or nonlinear, pairwise or multivariate relationships. Specifically, the equivalence between MB and the minimal conditional covariance operator (CCO) is investigated, which inspires us to design the objective function based on the predictability evaluation of the mapping variables in a reproducing kernel Hilbert space. Based on this, a kernel MB learning algorithm is proposed, where nonlinear multivariate dependence could be considered without extra requirements on data distribution and variable types. Extensive experiments demonstrate the efficacy of these contributions.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Xingyu and Jiang, Bingbing and Wu, Tianhao and Chen, Huanhuan}, year={2023}, month={Jun.}, pages={10388-10398} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26236/26008", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26236", + "pdf_size": 368070, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12447273098086703532&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;hznu.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;hznu.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Science and Technology of China;Hangzhou Normal University", + "aff_unique_dep": "School of Computer Science and Technology;School of Information Science and Engineering", + "aff_unique_url": "http://www.ustc.edu.cn;http://www.hgh.edu.cn", + "aff_unique_abbr": "USTC;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25885", + "title": "Practical Parallel Algorithms for Submodular Maximization Subject to a Knapsack Constraint with Nearly Optimal Adaptivity", + "track": "main", + "status": "Technical", + "abstract": "Submodular maximization has wide applications in machine learning and data mining, where massive datasets have brought the great need for designing efficient and parallelizable algorithms. One measure of the parallelizability of a submodular maximization algorithm is its adaptivity complexity, which indicates the number of sequential rounds where a polynomial number of queries to the objective function can be executed in parallel. In this paper, we study the problem of non-monotone submodular maximization subject to a knapsack constraint, and propose the first combinatorial algorithm achieving an (8+epsilon)-approximation under O(log n) adaptive complexity, which is optimal up to a factor of O(loglog n). Moreover, under slightly larger adaptivity, we also propose approximation algorithms with nearly optimal query complexity of O(n), while achieving better approximation ratios. We show that our algorithms can also be applied to the special case of submodular maximization subject to a cardinality constraint, and achieve performance bounds comparable with those of state-of-the-art algorithms. Finally, the effectiveness of our approach is demonstrated by extensive experiments on real-world applications.", + "primary_area": "machine learning i", + "author": "Shuang Cui; Kai Han; Jing Tang; He Huang; Xueying Li; Aakas Zhiyuli", + "authorids": "", + "aff": "School of Computer Science and Technology / Suzhou Research Institute, University of Science and Technology of China; School of Computer Science and Technology, Soochow University; The Hong Kong University of Science and Technology (Guangzhou); School of Computer Science and Technology, Soochow University; Alibaba Grou; Alibaba Group", + "bibtex": "@article{Cui_Han_Tang_Huang_Li_Zhiyuli_2023, title={Practical Parallel Algorithms for Submodular Maximization Subject to a Knapsack Constraint with Nearly Optimal Adaptivity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25885}, DOI={10.1609/aaai.v37i6.25885}, abstractNote={Submodular maximization has wide applications in machine learning and data mining, where massive datasets have brought the great need for designing efficient and parallelizable algorithms. One measure of the parallelizability of a submodular maximization algorithm is its adaptivity complexity, which indicates the number of sequential rounds where a polynomial number of queries to the objective function can be executed in parallel. In this paper, we study the problem of non-monotone submodular maximization subject to a knapsack constraint, and propose the first combinatorial algorithm achieving an (8+epsilon)-approximation under O(log n) adaptive complexity, which is optimal up to a factor of O(loglog n). Moreover, under slightly larger adaptivity, we also propose approximation algorithms with nearly optimal query complexity of O(n), while achieving better approximation ratios. We show that our algorithms can also be applied to the special case of submodular maximization subject to a cardinality constraint, and achieve performance bounds comparable with those of state-of-the-art algorithms. Finally, the effectiveness of our approach is demonstrated by extensive experiments on real-world applications.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cui, Shuang and Han, Kai and Tang, Jing and Huang, He and Li, Xueying and Zhiyuli, Aakas}, year={2023}, month={Jun.}, pages={7261-7269} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25885/25657", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25885", + "pdf_size": 236274, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15442476854442424040&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;suda.edu.cn;ust.hk;suda.edu.cn;alibaba-inc.com;alibaba-inc.com", + "email": "mail.ustc.edu.cn;suda.edu.cn;ust.hk;suda.edu.cn;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;3;3", + "aff_unique_norm": "University of Science and Technology of China;Soochow University;The Hong Kong University of Science and Technology;Alibaba Group", + "aff_unique_dep": "School of Computer Science and Technology;School of Computer Science and Technology;;", + "aff_unique_url": "http://www.ustc.edu.cn;https://eng.suda.edu.cn/;https://www.ust.hk;https://www.alibaba.com", + "aff_unique_abbr": "USTC;Soochow U;HKUST;Alibaba", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Suzhou;;Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27004", + "title": "Pre-training with Scientific Text Improves Educational Question Generation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "With the boom of digital educational materials and scalable e-learning systems, the potential for realising AI-assisted personalised learning has skyrocketed. In this landscape, the automatic generation of educational questions will play a key role, enabling scalable self-assessment when a global population is manoeuvring their personalised learning journeys. We develop EduQG, a novel educational question generation model built by adapting a large language model. Our initial experiments demonstrate that EduQG can produce superior educational questions by pre-training on scientific text.", + "primary_area": "", + "author": "Hamze Muse; Sahan Bulathwela; Emine Yilmaz", + "authorids": "", + "aff": "Centre for Artificial Intelligence, University College London; Centre for Artificial Intelligence, University College London; Centre for Artificial Intelligence, University College London", + "bibtex": "@article{Muse_Bulathwela_Yilmaz_2024, title={Pre-training with Scientific Text Improves Educational Question Generation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27004}, DOI={10.1609/aaai.v37i13.27004}, abstractNote={With the boom of digital educational materials and scalable e-learning systems, the potential for realising AI-assisted personalised learning has skyrocketed. In this landscape, the automatic generation of educational questions will play a key role, enabling scalable self-assessment when a global population is manoeuvring their personalised learning journeys. We develop EduQG, a novel educational question generation model built by adapting a large language model. Our initial experiments demonstrate that EduQG can produce superior educational questions by pre-training on scientific text.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Muse, Hamze and Bulathwela, Sahan and Yilmaz, Emine}, year={2024}, month={Jul.}, pages={16288-16289} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27004/26776", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27004", + "pdf_size": 84826, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7049438790292947087&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff_domain": "ucl.ac.uk;ucl.ac.uk;ucl.ac.uk", + "email": "ucl.ac.uk;ucl.ac.uk;ucl.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University College London", + "aff_unique_dep": "Centre for Artificial Intelligence", + "aff_unique_url": "https://www.ucl.ac.uk", + "aff_unique_abbr": "UCL", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26429", + "title": "Predicate Invention for Bilevel Planning", + "track": "main", + "status": "Technical", + "abstract": "Efficient planning in continuous state and action spaces is fundamentally hard, even when the transition model is deterministic and known. One way to alleviate this challenge is to perform bilevel planning with abstractions, where a high-level search for abstract plans is used to guide planning in the original transition space. Previous work has shown that when state abstractions in the form of symbolic predicates are hand-designed, operators and samplers for bilevel planning can be learned from demonstrations. In this work, we propose an algorithm for learning predicates from demonstrations, eliminating the need for manually specified state abstractions. Our key idea is to learn predicates by optimizing a surrogate objective that is tractable but faithful to our real efficient-planning objective. We use this surrogate objective in a hill-climbing search over predicate sets drawn from a grammar. Experimentally, we show across four robotic planning environments that our learned abstractions are able to quickly solve held-out tasks, outperforming six baselines.", + "primary_area": "planning routing and scheduling", + "author": "Tom Silver; Rohan Chitnis; Nishanth Kumar; Willie McClinton; Tom\u00e1s Lozano-P\u00e9rez; Leslie Kaelbling; Joshua B. Tenenbaum", + "authorids": "", + "aff": "MIT Computer Science and Artificial Intelligence Laboratory; Meta AI; MIT Computer Science and Artificial Intelligence Laboratory; MIT Computer Science and Artificial Intelligence Laboratory; MIT Computer Science and Artificial Intelligence Laboratory; MIT Computer Science and Artificial Intelligence Laboratory; MIT Computer Science and Artificial Intelligence Laboratory", + "bibtex": "@article{Silver_Chitnis_Kumar_McClinton_Lozano-P\u00e9rez_Kaelbling_Tenenbaum_2023, title={Predicate Invention for Bilevel Planning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26429}, DOI={10.1609/aaai.v37i10.26429}, abstractNote={Efficient planning in continuous state and action spaces is fundamentally hard, even when the transition model is deterministic and known. One way to alleviate this challenge is to perform bilevel planning with abstractions, where a high-level search for abstract plans is used to guide planning in the original transition space. Previous work has shown that when state abstractions in the form of symbolic predicates are hand-designed, operators and samplers for bilevel planning can be learned from demonstrations. In this work, we propose an algorithm for learning predicates from demonstrations, eliminating the need for manually specified state abstractions. Our key idea is to learn predicates by optimizing a surrogate objective that is tractable but faithful to our real efficient-planning objective. We use this surrogate objective in a hill-climbing search over predicate sets drawn from a grammar. Experimentally, we show across four robotic planning environments that our learned abstractions are able to quickly solve held-out tasks, outperforming six baselines.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Silver, Tom and Chitnis, Rohan and Kumar, Nishanth and McClinton, Willie and Lozano-P\u00e9rez, Tom\u00e1s and Kaelbling, Leslie and Tenenbaum, Joshua B.}, year={2023}, month={Jun.}, pages={12120-12129} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26429/26201", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26429", + "pdf_size": 4038155, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12635039951024122650&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mit.edu;meta.com;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu", + "email": "mit.edu;meta.com;mit.edu;mit.edu;mit.edu;mit.edu;mit.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Meta Platforms, Inc.", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Meta AI", + "aff_unique_url": "https://www.csail.mit.edu;https://meta.com", + "aff_unique_abbr": "MIT CSAIL;Meta", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25513", + "title": "Predict+Optimize for Packing and Covering LPs with Unknown Parameters in Constraints", + "track": "main", + "status": "Technical", + "abstract": "Predict+Optimize is a recently proposed framework which combines machine learning and constrained optimization, tackling optimization problems that contain parameters that are unknown at solving time. The goal is to predict the unknown parameters and use the estimates to solve for an estimated optimal solution to the optimization problem. However, all prior works have focused on the case where unknown parameters appear only in the optimization objective and not the constraints, for the simple reason that if the constraints were not known exactly, the estimated optimal solution might not even be feasible under the true parameters. The contributions of this paper are two-fold. First, we propose a novel and practically relevant framework for the Predict+Optimize setting, but with unknown parameters in both the objective and the constraints. We introduce the notion of a correction function, and an additional penalty term in the loss function, modelling practical scenarios where an estimated optimal solution can be modified into a feasible solution after the true parameters are revealed, but at an additional cost. Second, we propose a corresponding algorithmic approach for our framework, which handles all packing and covering linear programs. Our approach is inspired by the prior work of Mandi and Guns, though with crucial modifications and re-derivations for our very different setting. Experimentation demonstrates the superior empirical performance of our method over classical approaches.", + "primary_area": "constraint satisfaction and optimization", + "author": "Xinyi Hu; Jasper C.H. Lee; Jimmy H.M. Lee", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong; Department of Computer Sciences & Institute for Foundations of Data Science, University of Wisconsin\u2013Madison, WI, USA; Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong", + "bibtex": "@article{Hu_Lee_Lee_2023, title={Predict+Optimize for Packing and Covering LPs with Unknown Parameters in Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25513}, DOI={10.1609/aaai.v37i4.25513}, abstractNote={Predict+Optimize is a recently proposed framework which combines machine learning and constrained optimization, tackling optimization problems that contain parameters that are unknown at solving time. The goal is to predict the unknown parameters and use the estimates to solve for an estimated optimal solution to the optimization problem. However, all prior works have focused on the case where unknown parameters appear only in the optimization objective and not the constraints, for the simple reason that if the constraints were not known exactly, the estimated optimal solution might not even be feasible under the true parameters. The contributions of this paper are two-fold. First, we propose a novel and practically relevant framework for the Predict+Optimize setting, but with unknown parameters in both the objective and the constraints. We introduce the notion of a correction function, and an additional penalty term in the loss function, modelling practical scenarios where an estimated optimal solution can be modified into a feasible solution after the true parameters are revealed, but at an additional cost. Second, we propose a corresponding algorithmic approach for our framework, which handles all packing and covering linear programs. Our approach is inspired by the prior work of Mandi and Guns, though with crucial modifications and re-derivations for our very different setting. Experimentation demonstrates the superior empirical performance of our method over classical approaches.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Xinyi and Lee, Jasper C.H. and Lee, Jimmy H.M.}, year={2023}, month={Jun.}, pages={3987-3995} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25513/25285", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25513", + "pdf_size": 324302, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=716817163970822103&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cse.cuhk.edu.hk;wisc.edu;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;wisc.edu;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;University of Wisconsin\u2013Madison", + "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Sciences", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.wisc.edu", + "aff_unique_abbr": "CUHK;UW\u2013Madison", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Shatin;Madison", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26910", + "title": "Predicting Perceived Music Emotions with Respect to Instrument Combinations", + "track": "eaai symposium human aware ai in sound and music", + "status": "Technical", + "abstract": "Music Emotion Recognition has attracted a lot of academic research work in recent years because it has a wide range of applications, including song recommendation and music visualization. As music is a way for humans to express emotion, there is a need for a machine to automatically infer the perceived emotion of pieces of music. In this paper, we compare the accuracy difference between music emotion recognition models given music pieces as a whole versus music pieces separated by instruments. To compare the models' emotion predictions, which are distributions over valence and arousal values, we provide a metric that compares two distribution curves. Using this metric, we provide empirical evidence that training Random Forest and Convolution Recurrent Neural Network with mixed instrumental music data conveys a better understanding of emotion than training the same models with music that are separated into each instrumental source.", + "primary_area": "", + "author": "Viet Dung Nguyen; Quan H. Nguyen; Richard G. Freedman", + "authorids": "", + "aff": "Rochester Institute of Technology; Gettysburg College; SIFT", + "bibtex": "@article{Nguyen_Nguyen_Freedman_2024, title={Predicting Perceived Music Emotions with Respect to Instrument Combinations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26910}, DOI={10.1609/aaai.v37i13.26910}, abstractNote={Music Emotion Recognition has attracted a lot of academic research work in recent years because it has a wide range of applications, including song recommendation and music visualization. As music is a way for humans to express emotion, there is a need for a machine to automatically infer the perceived emotion of pieces of music. In this paper, we compare the accuracy difference between music emotion recognition models given music pieces as a whole versus music pieces separated by instruments. To compare the models\u2019 emotion predictions, which are distributions over valence and arousal values, we provide a metric that compares two distribution curves. Using this metric, we provide empirical evidence that training Random Forest and Convolution Recurrent Neural Network with mixed instrumental music data conveys a better understanding of emotion than training the same models with music that are separated into each instrumental source.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Viet Dung and Nguyen, Quan H. and Freedman, Richard G.}, year={2024}, month={Jul.}, pages={16078-16086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26910/26682", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26910", + "pdf_size": 196111, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=374036648227119502&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "rit.edu;gettysburg.edu;sift.net", + "email": "rit.edu;gettysburg.edu;sift.net", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Rochester Institute of Technology;Gettysburg College;SIFT", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.rit.edu;https://www.gettysburg.edu;", + "aff_unique_abbr": "RIT;Gettysburg College;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States;" + }, + { + "id": "article-25609", + "title": "Predicting Temporal Sets with Simplified Fully Connected Networks", + "track": "main", + "status": "Technical", + "abstract": "Given a sequence of sets, where each set contains an arbitrary number of elements, temporal sets prediction aims to predict which elements will appear in the subsequent set. Existing methods for temporal sets prediction are developed on sophisticated components (e.g., recurrent neural networks, attention or gating mechanisms, and graph neural networks), which inevitably increase the model complexity due to more trainable parameters and higher computational costs. Moreover, the involved nonlinear activation may contribute little or even degrade the performance. In this paper, we present a succinct architecture that is solely built on the Simplified Fully Connected Networks (SFCNs) for temporal sets prediction to bring both effectiveness and efficiency together. In particular, given a user's sequence of sets, we employ SFCNs to derive representations of the user by learning inter-set temporal dependencies, intra-set element relationships, and intra-embedding channel correlations. Two families of general functions are introduced to preserve the permutation-invariant property of each set and the permutation-equivariant property of elements in each set. Moreover, we design a user representations adaptive fusing module to aggregate user representations according to each element for improving the prediction performance. Experiments on four benchmarks show the superiority of our approach over the state-of-the-art under both transductive and inductive settings. We also theoretically and empirically demonstrate that our model has lower space and time complexity than baselines. Codes and datasets are available at https://github.com/yule-BUAA/SFCNTSP.", + "primary_area": "data mining and knowledge management", + "author": "Le Yu; Zihang Liu; Tongyu Zhu; Leilei Sun; Bowen Du; Weifeng Lv", + "authorids": "", + "aff": "State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China; State Key Laboratory of Software Development Environment, Beihang University, Beijing, 100191, China", + "bibtex": "@article{Yu_Liu_Zhu_Sun_Du_Lv_2023, title={Predicting Temporal Sets with Simplified Fully Connected Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25609}, DOI={10.1609/aaai.v37i4.25609}, abstractNote={Given a sequence of sets, where each set contains an arbitrary number of elements, temporal sets prediction aims to predict which elements will appear in the subsequent set. Existing methods for temporal sets prediction are developed on sophisticated components (e.g., recurrent neural networks, attention or gating mechanisms, and graph neural networks), which inevitably increase the model complexity due to more trainable parameters and higher computational costs. Moreover, the involved nonlinear activation may contribute little or even degrade the performance. In this paper, we present a succinct architecture that is solely built on the Simplified Fully Connected Networks (SFCNs) for temporal sets prediction to bring both effectiveness and efficiency together. In particular, given a user\u2019s sequence of sets, we employ SFCNs to derive representations of the user by learning inter-set temporal dependencies, intra-set element relationships, and intra-embedding channel correlations. Two families of general functions are introduced to preserve the permutation-invariant property of each set and the permutation-equivariant property of elements in each set. Moreover, we design a user representations adaptive fusing module to aggregate user representations according to each element for improving the prediction performance. Experiments on four benchmarks show the superiority of our approach over the state-of-the-art under both transductive and inductive settings. We also theoretically and empirically demonstrate that our model has lower space and time complexity than baselines. Codes and datasets are available at https://github.com/yule-BUAA/SFCNTSP.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Le and Liu, Zihang and Zhu, Tongyu and Sun, Leilei and Du, Bowen and Lv, Weifeng}, year={2023}, month={Jun.}, pages={4835-4844} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25609/25381", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25609", + "pdf_size": 2273286, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1055827197058219954&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/yule-BUAA/SFCNTSP", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "State Key Laboratory of Software Development Environment", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "BUAA", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26042", + "title": "Predictive Exit: Prediction of Fine-Grained Early Exits for Computation- and Energy-Efficient Inference", + "track": "main", + "status": "Technical", + "abstract": "By adding exiting layers to the deep learning networks, early exit can terminate the inference earlier with accurate results. However, the passive decision-making of whether to exit or continue the next layer has to go through every pre-placed exiting layer until it exits. In addition, it is hard to adjust the configurations of the computing platforms alongside the inference proceeds. By incorporating a low-cost prediction engine, we propose a Predictive Exit framework for computation- and energy-efficient deep learning applications. Predictive Exit can forecast where the network will exit (i.e., establish the number of remaining layers to finish the inference), which effectively reduces the network computation cost by exiting on time without running every pre-placed exiting layer. Moreover, according to the number of remaining layers, proper computing configurations (i.e., frequency and voltage) are selected to execute the network to further save energy. Extensive experimental results demonstrate that Predictive Exit achieves up to 96.2% computation reduction and 72.9% energy-saving compared with classic deep learning networks; and 12.8% computation reduction and 37.6% energy-saving compared with the early exit under state-of-the-art exiting strategies, given the same inference accuracy and latency.", + "primary_area": "machine learning ii", + "author": "Xiangjie Li; Chenfei Lou; Yuchi Chen; Zhengping Zhu; Yingtao Shen; Yehan Ma; An Zou", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Li_Lou_Chen_Zhu_Shen_Ma_Zou_2023, title={Predictive Exit: Prediction of Fine-Grained Early Exits for Computation- and Energy-Efficient Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26042}, DOI={10.1609/aaai.v37i7.26042}, abstractNote={By adding exiting layers to the deep learning networks, early exit can terminate the inference earlier with accurate results. However, the passive decision-making of whether to exit or continue the next layer has to go through every pre-placed exiting layer until it exits. In addition, it is hard to adjust the configurations of the computing platforms alongside the inference proceeds. By incorporating a low-cost prediction engine, we propose a Predictive Exit framework for computation- and energy-efficient deep learning applications. Predictive Exit can forecast where the network will exit (i.e., establish the number of remaining layers to finish the inference), which effectively reduces the network computation cost by exiting on time without running every pre-placed exiting layer. Moreover, according to the number of remaining layers, proper computing configurations (i.e., frequency and voltage) are selected to execute the network to further save energy. Extensive experimental results demonstrate that Predictive Exit achieves up to 96.2% computation reduction and 72.9% energy-saving compared with classic deep learning networks; and 12.8% computation reduction and 37.6% energy-saving compared with the early exit under state-of-the-art exiting strategies, given the same inference accuracy and latency.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Xiangjie and Lou, Chenfei and Chen, Yuchi and Zhu, Zhengping and Shen, Yingtao and Ma, Yehan and Zou, An}, year={2023}, month={Jun.}, pages={8657-8665} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26042/25814", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26042", + "pdf_size": 296261, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15935892636252220577&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26227", + "title": "Predictive Multiplicity in Probabilistic Classification", + "track": "main", + "status": "Technical", + "abstract": "Machine learning models are often used to inform real world risk assessment tasks: predicting consumer default risk, predicting whether a person suffers from a serious illness, or predicting a person's risk to appear in court. Given multiple models that perform almost equally well for a prediction task, to what extent do predictions vary across these models? If predictions are relatively consistent for similar models, then the standard approach of choosing the model that optimizes a penalized loss suffices. But what if predictions vary significantly for similar models? In machine learning, this is referred to as predictive multiplicity i.e. the prevalence of conflicting predictions assigned by near-optimal competing models. In this paper, we present a framework for measuring predictive multiplicity in probabilistic classification (predicting the probability of a positive outcome). We introduce measures that capture the variation in risk estimates over the set of competing models, and develop optimization-based methods to compute these measures efficiently and reliably for convex empirical risk minimization problems. We demonstrate the incidence and prevalence of predictive multiplicity in real-world tasks. Further, we provide insight into how predictive multiplicity arises by analyzing the relationship between predictive multiplicity and data set characteristics (outliers, separability, and majority-minority structure). Our results emphasize the need to report predictive multiplicity more widely.", + "primary_area": "machine learning iv", + "author": "Jamelle Watson-Daniels; David C. Parkes; Berk Ustun", + "authorids": "", + "aff": "Harvard University; Harvard University + DeepMind; U.C. San Diego", + "bibtex": "@article{Watson-Daniels_Parkes_Ustun_2023, title={Predictive Multiplicity in Probabilistic Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26227}, DOI={10.1609/aaai.v37i9.26227}, abstractNote={Machine learning models are often used to inform real world risk assessment tasks: predicting consumer default risk, predicting whether a person suffers from a serious illness, or predicting a person\u2019s risk to appear in court. Given multiple models that perform almost equally well for a prediction task, to what extent do predictions vary across these models? If predictions are relatively consistent for similar models, then the standard approach of choosing the model that optimizes a penalized loss suffices. But what if predictions vary significantly for similar models? In machine learning, this is referred to as predictive multiplicity i.e. the prevalence of conflicting predictions assigned by near-optimal competing models. In this paper, we present a framework for measuring predictive multiplicity in probabilistic classification (predicting the probability of a positive outcome). We introduce measures that capture the variation in risk estimates over the set of competing models, and develop optimization-based methods to compute these measures efficiently and reliably for convex empirical risk minimization problems. We demonstrate the incidence and prevalence of predictive multiplicity in real-world tasks. Further, we provide insight into how predictive multiplicity arises by analyzing the relationship between predictive multiplicity and data set characteristics (outliers, separability, and majority-minority structure). Our results emphasize the need to report predictive multiplicity more widely.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Watson-Daniels, Jamelle and Parkes, David C. and Ustun, Berk}, year={2023}, month={Jun.}, pages={10306-10314} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26227/25999", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26227", + "pdf_size": 1817779, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10671895792955724368&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "g.harvard.edu;eecs.harvard.edu;ucsd.edu", + "email": "g.harvard.edu;eecs.harvard.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "Harvard University;DeepMind;University of California, San Diego", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.harvard.edu;https://deepmind.com;https://www.ucsd.edu", + "aff_unique_abbr": "Harvard;DeepMind;UCSD", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";San Diego", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-26490", + "title": "Preference-Controlled Multi-Objective Reinforcement Learning for Conditional Text Generation", + "track": "main", + "status": "Technical", + "abstract": "Conditional text generation is to generate text sequences conditioning on linguistic or non-linguistic data. The main line of existing work proposed deterministic models to improve the fidelity of the generated text but often ignored the diversity. Another line relied on conditional variational auto-encoders (CVAEs), which increased the diversity over their deterministic backbones. However, CVAEs regard diversity as an implicit objective and may not be optimal. In this paper, we raise two questions: i) Can diversity be further improved with an explicit objective? ii) Since fidelity and diversity are two conflicting objectives, how can we obtain different multi-objective optimal solutions according to user preferences? To answer question i), we propose a multi-objective reinforcement learning (MORL) method which explicitly takes CIDEr and Self-CIDEr scores as the fidelity-oriented and diversity-oriented rewards respectively. To answer question ii), we propose a preference-controlled MORL method, which can obtain infinite multi-objective optimal solutions by tuning the preference variable. We conduct extensive experiments on paraphrasing and image captioning tasks, which show that in the fidelity-diversity trade-off space, our model outperforms both deterministic and CVAE-based baselines.", + "primary_area": "speech natural language processing", + "author": "Wenqing Chen; Jidong Tian; Caoyun Fan; Yitian Li; Hao He; Yaohui Jin", + "authorids": "", + "aff": "School of Software Engineering, Sun Yat-sen University; MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University + State Key Lab of Advanced Optical Communication System and Network, School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University; MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University", + "bibtex": "@article{Chen_Tian_Fan_Li_He_Jin_2023, title={Preference-Controlled Multi-Objective Reinforcement Learning for Conditional Text Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26490}, DOI={10.1609/aaai.v37i11.26490}, abstractNote={Conditional text generation is to generate text sequences conditioning on linguistic or non-linguistic data. The main line of existing work proposed deterministic models to improve the fidelity of the generated text but often ignored the diversity. Another line relied on conditional variational auto-encoders (CVAEs), which increased the diversity over their deterministic backbones. However, CVAEs regard diversity as an implicit objective and may not be optimal. In this paper, we raise two questions: i) Can diversity be further improved with an explicit objective? ii) Since fidelity and diversity are two conflicting objectives, how can we obtain different multi-objective optimal solutions according to user preferences? To answer question i), we propose a multi-objective reinforcement learning (MORL) method which explicitly takes CIDEr and Self-CIDEr scores as the fidelity-oriented and diversity-oriented rewards respectively. To answer question ii), we propose a preference-controlled MORL method, which can obtain infinite multi-objective optimal solutions by tuning the preference variable. We conduct extensive experiments on paraphrasing and image captioning tasks, which show that in the fidelity-diversity trade-off space, our model outperforms both deterministic and CVAE-based baselines.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Wenqing and Tian, Jidong and Fan, Caoyun and Li, Yitian and He, Hao and Jin, Yaohui}, year={2023}, month={Jun.}, pages={12662-12672} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26490/26262", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26490", + "pdf_size": 1164246, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2877649408420376134&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.sysu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "mail.sysu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+1;1;1;1;1", + "aff_unique_norm": "Sun Yat-sen University;Shanghai Jiao Tong University", + "aff_unique_dep": "School of Software Engineering;AI Institute", + "aff_unique_url": "http://www.sysu.edu.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "SYSU;SJTU", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26631", + "title": "Preserve Context Information for Extract-Generate Long-Input Summarization Framework", + "track": "main", + "status": "Technical", + "abstract": "The Extract-generate framework has been a classic approach for text summarization. As pretrained language models struggling with long-input summarization for their high memory cost, extract-generate framework regains researchers' interests. However, the cost of its effectiveness in dealing with long-input summarization is the loss of context information. In this paper, we present a context-aware extract-generate framework (CAEG) for long-input text summarization. It focuses on preserving both local and global context information in an extract-generate framework with little cost, and can be applied to most of existing extract-generate summarization models. CAEG generates a set of context-related text spans called context prompts for each text snippet and use them to transfer the context information from the extractor and generator. To find such context prompts, we propose to capture the context information based on the interpretation of the extractor, where the text spans having the highest contribution to the extraction decision are considered as containing the richest context information. We evaluate our approach on both long-document and long-dialogue summarization datasets: arXiv and QMSum. The experiment results show that CAEG achieves the-state-of-art result on QMSum and outperforms other extract-generate based models in arXiv.", + "primary_area": "speech natural language processing", + "author": "Ruifeng Yuan; Zili Wang; Ziqiang Cao; Wenjie Li", + "authorids": "", + "aff": "The Hong Kong Polytechnic University; Xiaohongshu Inc; Institute of Artificial Intelligence, Soochow University, China; The Hong Kong Polytechnic University", + "bibtex": "@article{Yuan_Wang_Cao_Li_2023, title={Preserve Context Information for Extract-Generate Long-Input Summarization Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26631}, DOI={10.1609/aaai.v37i11.26631}, abstractNote={The Extract-generate framework has been a classic approach for text summarization. As pretrained language models struggling with long-input summarization for their high memory cost, extract-generate framework regains researchers\u2019 interests. However, the cost of its effectiveness in dealing with long-input summarization is the loss of context information. In this paper, we present a context-aware extract-generate framework (CAEG) for long-input text summarization. It focuses on preserving both local and global context information in an extract-generate framework with little cost, and can be applied to most of existing extract-generate summarization models. CAEG generates a set of context-related text spans called context prompts for each text snippet and use them to transfer the context information from the extractor and generator. To find such context prompts, we propose to capture the context information based on the interpretation of the extractor, where the text spans having the highest contribution to the extraction decision are considered as containing the richest context information. We evaluate our approach on both long-document and long-dialogue summarization datasets: arXiv and QMSum. The experiment results show that CAEG achieves the-state-of-art result on QMSum and outperforms other extract-generate based models in arXiv.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Ruifeng and Wang, Zili and Cao, Ziqiang and Li, Wenjie}, year={2023}, month={Jun.}, pages={13932-13939} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26631/26403", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26631", + "pdf_size": 216629, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3997394991941945623&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "The Hong Kong Polytechnic University;Xiaohongshu Inc;Soochow University", + "aff_unique_dep": ";;Institute of Artificial Intelligence", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.xiaohongshu.com;https://www.soochow.edu.cn", + "aff_unique_abbr": "PolyU;XHS;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25384", + "title": "Preserving Structural Consistency in Arbitrary Artist and Artwork Style Transfer", + "track": "main", + "status": "Technical", + "abstract": "Deep generative models are effective in style transfer. \nPrevious methods learn one or several specific artist-style from a collection of artworks.\nThese methods not only homogenize the artist-style of different artworks of the same artist but also lack generalization for the unseen artists.\nTo solve these challenges, we propose a double-style transferring module (DSTM).\nIt extracts different artist-style and artwork-style from different artworks (even untrained) and preserves the intrinsic diversity between different artworks of the same artist.\nDSTM swaps the two styles in the adversarial training and encourages realistic image generation given arbitrary style combinations.\nHowever, learning style from single artwork can often cause over-adaption to it, resulting in the introduction of structural features of style image.\nWe further propose an edge enhancing module (EEM) which derives edge information from multi-scale and multi-level features to enhance structural consistency.\nWe broadly evaluate our method across six large-scale benchmark datasets.\nEmpirical results show that our method achieves arbitrary artist-style and artwork-style extraction from a single artwork, and effectively avoids introducing the style image\u2019s structural features.\nOur method improves the state-of-the-art deception rate from 58.9% to 67.2% and the average FID from 48.74 to 42.83.", + "primary_area": "computer vision iii", + "author": "Jingyu Wu; Lefan Hou; Zejian Li; Jun Liao; Li Liu; Lingyun Sun", + "authorids": "", + "aff": "Alibaba-Zhejiang University Joint Institute of Frontier Technologies, Zhejiang University, Hangzhou 310027, China; Alibaba-Zhejiang University Joint Institute of Frontier Technologies, Zhejiang University, Hangzhou 310027, China; School of Software Technology, Zhejiang University, Ningbo 315048, China+Alibaba-Zhejiang University Joint Institute of Frontier Technologies, Zhejiang University, Hangzhou 310027, China; School of Big Data & Software Engineering, Chongqing University, Chongqing 400044, China; School of Big Data & Software Engineering, Chongqing University, Chongqing 400044, China; Alibaba-Zhejiang University Joint Institute of Frontier Technologies, Zhejiang University, Hangzhou 310027, China+Zhejiang-Singapore Innovation and AI Joint Research Lab, Hangzhou 310027, China", + "bibtex": "@article{Wu_Hou_Li_Liao_Liu_Sun_2023, title={Preserving Structural Consistency in Arbitrary Artist and Artwork Style Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25384}, DOI={10.1609/aaai.v37i3.25384}, abstractNote={Deep generative models are effective in style transfer. Previous methods learn one or several specific artist-style from a collection of artworks.\nThese methods not only homogenize the artist-style of different artworks of the same artist but also lack generalization for the unseen artists.\nTo solve these challenges, we propose a double-style transferring module (DSTM).\nIt extracts different artist-style and artwork-style from different artworks (even untrained) and preserves the intrinsic diversity between different artworks of the same artist.\nDSTM swaps the two styles in the adversarial training and encourages realistic image generation given arbitrary style combinations.\nHowever, learning style from single artwork can often cause over-adaption to it, resulting in the introduction of structural features of style image.\nWe further propose an edge enhancing module (EEM) which derives edge information from multi-scale and multi-level features to enhance structural consistency.\nWe broadly evaluate our method across six large-scale benchmark datasets.\nEmpirical results show that our method achieves arbitrary artist-style and artwork-style extraction from a single artwork, and effectively avoids introducing the style image\u2019s structural features.\nOur method improves the state-of-the-art deception rate from 58.9% to 67.2% and the average FID from 48.74 to 42.83.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Jingyu and Hou, Lefan and Li, Zejian and Liao, Jun and Liu, Li and Sun, Lingyun}, year={2023}, month={Jun.}, pages={2830-2838} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25384/25156", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25384", + "pdf_size": 2008960, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13009664762315237168&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;cqu.edu.cn;cqu.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;cqu.edu.cn;cqu.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+0;1;1;0+2", + "aff_unique_norm": "Zhejiang University;Chongqing University;Zhejiang-Singapore Innovation and AI Joint Research Lab", + "aff_unique_dep": "Alibaba-Zhejiang University Joint Institute of Frontier Technologies;School of Big Data & Software Engineering;", + "aff_unique_url": "http://www.zju.edu.cn;;", + "aff_unique_abbr": "ZJU;;", + "aff_campus_unique_index": "0;0;1+0;2;2;0+0", + "aff_campus_unique": "Hangzhou;Ningbo;Chongqing", + "aff_country_unique_index": "0;0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25876", + "title": "PrimeNet: Pre-training for Irregular Multivariate Time Series", + "track": "main", + "status": "Technical", + "abstract": "Real-world applications often involve irregular time series, for which the time intervals between successive observations are non-uniform. Irregularity across multiple features in a multi-variate time series further results in a different subset of features at any given time (i.e., asynchronicity). Existing pre-training schemes for time-series, however, often assume regularity of time series and make no special treatment of irregularity. We argue that such irregularity offers insight about domain property of the data\u2014for example, frequency of hospital visits may signal patient health condition\u2014that can guide representation learning. In this work, we propose PrimeNet to learn a self-supervised representation for irregular multivariate time-series. Specifically, we design a time sensitive contrastive learning and data reconstruction task to pre-train a model. Irregular time-series exhibits considerable variations in sampling density over time. Hence, our triplet generation strategy follows the density of the original data points, preserving its native irregularity. Moreover, the sampling density variation over time makes data reconstruction difficult for different regions. Therefore, we design a data masking technique that always masks a constant time duration to accommodate reconstruction for regions of different sampling density. We learn with these tasks using unlabeled data to build a pre-trained model and fine-tune on a downstream task with limited labeled data, in contrast with existing fully supervised approach for irregular time-series, requiring large amounts of labeled data. Experiment results show that PrimeNet significantly outperforms state-of-the-art methods on naturally irregular and asynchronous data from Healthcare and IoT applications for several downstream tasks, including classification, interpolation, and regression.", + "primary_area": "machine learning i", + "author": "Ranak Roy Chowdhury; Jiacheng Li; Xiyuan Zhang; Dezhi Hong; Rajesh K. Gupta; Jingbo Shang", + "authorids": "", + "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego; Amazon; University of California, San Diego; University of California, San Diego", + "bibtex": "@article{Chowdhury_Li_Zhang_Hong_Gupta_Shang_2023, title={PrimeNet: Pre-training for Irregular Multivariate Time Series}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25876}, DOI={10.1609/aaai.v37i6.25876}, abstractNote={Real-world applications often involve irregular time series, for which the time intervals between successive observations are non-uniform. Irregularity across multiple features in a multi-variate time series further results in a different subset of features at any given time (i.e., asynchronicity). Existing pre-training schemes for time-series, however, often assume regularity of time series and make no special treatment of irregularity. We argue that such irregularity offers insight about domain property of the data\u2014for example, frequency of hospital visits may signal patient health condition\u2014that can guide representation learning. In this work, we propose PrimeNet to learn a self-supervised representation for irregular multivariate time-series. Specifically, we design a time sensitive contrastive learning and data reconstruction task to pre-train a model. Irregular time-series exhibits considerable variations in sampling density over time. Hence, our triplet generation strategy follows the density of the original data points, preserving its native irregularity. Moreover, the sampling density variation over time makes data reconstruction difficult for different regions. Therefore, we design a data masking technique that always masks a constant time duration to accommodate reconstruction for regions of different sampling density. We learn with these tasks using unlabeled data to build a pre-trained model and fine-tune on a downstream task with limited labeled data, in contrast with existing fully supervised approach for irregular time-series, requiring large amounts of labeled data. Experiment results show that PrimeNet significantly outperforms state-of-the-art methods on naturally irregular and asynchronous data from Healthcare and IoT applications for several downstream tasks, including classification, interpolation, and regression.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chowdhury, Ranak Roy and Li, Jiacheng and Zhang, Xiyuan and Hong, Dezhi and Gupta, Rajesh K. and Shang, Jingbo}, year={2023}, month={Jun.}, pages={7184-7192} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25876/25648", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25876", + "pdf_size": 471329, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6133487726887823818&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 12, + "aff_domain": "eng.ucsd.edu;eng.ucsd.edu;ucsd.edu;amazon.com;eng.ucsd.edu;eng.ucsd.edu", + "email": "eng.ucsd.edu;eng.ucsd.edu;ucsd.edu;amazon.com;eng.ucsd.edu;eng.ucsd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "University of California, San Diego;Amazon.com, Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsd.edu;https://www.amazon.com", + "aff_unique_abbr": "UCSD;Amazon", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "San Diego;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25628", + "title": "Principled Data-Driven Decision Support for Cyber-Forensic Investigations", + "track": "main", + "status": "Technical", + "abstract": "In the wake of a cybersecurity incident, it is crucial to promptly discover how the threat actors breached security in order to assess the impact of the incident and to develop and deploy countermeasures that can protect against further attacks. To this end, defenders can launch a cyber-forensic investigation, which discovers the techniques that the threat actors used in the incident. A fundamental challenge in such an investigation is prioritizing the investigation of particular techniques since the investigation of each technique requires time and effort, but forensic analysts cannot know which ones were actually used before investigating them. To ensure prompt discovery, it is imperative to provide decision support that can help forensic analysts with this prioritization. A recent study demonstrated that data-driven decision support, based on a dataset of prior incidents, can provide state-of-the-art prioritization. However, this data-driven approach, called DISCLOSE, is based on a heuristic that utilizes only a subset of the available information and does not approximate optimal decisions. To improve upon this heuristic, we introduce a principled approach for data-driven decision support for cyber-forensic investigations. We formulate the decision-support problem using a Markov decision process, whose states represent the states of a forensic investigation. To solve the decision problem, we propose a Monte Carlo tree search based method, which relies on a k-NN regression over prior incidents to estimate state-transition probabilities. We evaluate our proposed approach on multiple versions of the MITRE ATT&CK dataset, which is a knowledge base of adversarial techniques and tactics based on real-world cyber incidents, and demonstrate that our approach outperforms DISCLOSE in terms of techniques discovered per effort spent.", + "primary_area": "domain s of application", + "author": "Soodeh Atefi; Sakshyam Panda; Emmanouil Panaousis; Aron Laszka", + "authorids": "", + "aff": "University of Houston; University of Greenwich; University of Greenwich; Pennsylvania State University", + "bibtex": "@article{Atefi_Panda_Panaousis_Laszka_2023, title={Principled Data-Driven Decision Support for Cyber-Forensic Investigations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25628}, DOI={10.1609/aaai.v37i4.25628}, abstractNote={In the wake of a cybersecurity incident, it is crucial to promptly discover how the threat actors breached security in order to assess the impact of the incident and to develop and deploy countermeasures that can protect against further attacks. To this end, defenders can launch a cyber-forensic investigation, which discovers the techniques that the threat actors used in the incident. A fundamental challenge in such an investigation is prioritizing the investigation of particular techniques since the investigation of each technique requires time and effort, but forensic analysts cannot know which ones were actually used before investigating them. To ensure prompt discovery, it is imperative to provide decision support that can help forensic analysts with this prioritization. A recent study demonstrated that data-driven decision support, based on a dataset of prior incidents, can provide state-of-the-art prioritization. However, this data-driven approach, called DISCLOSE, is based on a heuristic that utilizes only a subset of the available information and does not approximate optimal decisions. To improve upon this heuristic, we introduce a principled approach for data-driven decision support for cyber-forensic investigations. We formulate the decision-support problem using a Markov decision process, whose states represent the states of a forensic investigation. To solve the decision problem, we propose a Monte Carlo tree search based method, which relies on a k-NN regression over prior incidents to estimate state-transition probabilities. We evaluate our proposed approach on multiple versions of the MITRE ATT&CK dataset, which is a knowledge base of adversarial techniques and tactics based on real-world cyber incidents, and demonstrate that our approach outperforms DISCLOSE in terms of techniques discovered per effort spent.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Atefi, Soodeh and Panda, Sakshyam and Panaousis, Emmanouil and Laszka, Aron}, year={2023}, month={Jun.}, pages={5010-5017} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25628/25400", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25628", + "pdf_size": 151237, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17382713694226534219&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 17, + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "University of Houston;University of Greenwich;Pennsylvania State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uh.edu;https://www.gre.ac.uk;https://www.psu.edu", + "aff_unique_abbr": "UH;Greenwich;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "article-26439", + "title": "Principled and Efficient Motif Finding for Structure Learning of Lifted Graphical Models", + "track": "main", + "status": "Technical", + "abstract": "Structure learning is a core problem in AI central to the fields of neuro-symbolic AI and statistical relational learning. It consists in automatically learning a logical theory from data. The basis for structure learning is mining repeating patterns in the data, known as structural motifs. Finding these patterns reduces the exponential search space and therefore guides the learning of formulas. Despite the importance of motif learning, it is still not well understood. We present the first principled approach for mining structural motifs in lifted graphical models, languages that blend first-order logic with probabilistic models, which uses a stochastic process to measure the similarity of entities in the data. \n\nOur first contribution is an algorithm, which depends on two intuitive hyperparameters: one controlling the uncertainty in the entity similarity measure, and one controlling the softness of the resulting rules. Our second contribution is a preprocessing step where we perform hierarchical clustering on the data to reduce the search space to the most relevant data. Our third contribution is to introduce an O(n ln(n)) (in the size of the entities in the data) algorithm for clustering structurally-related data. We evaluate our approach using standard benchmarks and show that we outperform state-of-the-art structure learning approaches by up to 6% in terms of accuracy and up to 80% in terms of runtime.", + "primary_area": "reasoning under uncertainty", + "author": "Jonathan Feldstein; Dominic Phillips; Efthymia Tsamoura", + "authorids": "", + "aff": "University of Edinburgh, Edinburgh, United Kingdom + BENNU.AI, Edinburgh, United Kingdom; University of Edinburgh, Edinburgh, United Kingdom; Samsung AI, Cambridge, United Kingdom", + "bibtex": "@article{Feldstein_Phillips_Tsamoura_2023, title={Principled and Efficient Motif Finding for Structure Learning of Lifted Graphical Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26439}, DOI={10.1609/aaai.v37i10.26439}, abstractNote={Structure learning is a core problem in AI central to the fields of neuro-symbolic AI and statistical relational learning. It consists in automatically learning a logical theory from data. The basis for structure learning is mining repeating patterns in the data, known as structural motifs. Finding these patterns reduces the exponential search space and therefore guides the learning of formulas. Despite the importance of motif learning, it is still not well understood. We present the first principled approach for mining structural motifs in lifted graphical models, languages that blend first-order logic with probabilistic models, which uses a stochastic process to measure the similarity of entities in the data. Our first contribution is an algorithm, which depends on two intuitive hyperparameters: one controlling the uncertainty in the entity similarity measure, and one controlling the softness of the resulting rules. Our second contribution is a preprocessing step where we perform hierarchical clustering on the data to reduce the search space to the most relevant data. Our third contribution is to introduce an O(n ln(n)) (in the size of the entities in the data) algorithm for clustering structurally-related data. We evaluate our approach using standard benchmarks and show that we outperform state-of-the-art structure learning approaches by up to 6% in terms of accuracy and up to 80% in terms of runtime.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feldstein, Jonathan and Phillips, Dominic and Tsamoura, Efthymia}, year={2023}, month={Jun.}, pages={12205-12215} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26439/26211", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26439", + "pdf_size": 187510, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7547235961135457965&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 8, + "aff_domain": "bennu.ai;ed.ac.uk;samsung.com", + "email": "bennu.ai;ed.ac.uk;samsung.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;2", + "aff_unique_norm": "University of Edinburgh;BENNU.AI;Samsung AI", + "aff_unique_dep": ";;AI", + "aff_unique_url": "https://www.ed.ac.uk;;https://www.samsung.com/global/innovation/ai/", + "aff_unique_abbr": "Edinburgh;;Samsung AI", + "aff_campus_unique_index": "0+0;0;1", + "aff_campus_unique": "Edinburgh;Cambridge", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26300", + "title": "Priori Anchor Labels Supervised Scalable Multi-View Bipartite Graph Clustering", + "track": "main", + "status": "Technical", + "abstract": "Although multi-view clustering (MVC) has achieved remarkable performance by integrating the complementary information of views, it is inefficient when facing scalable data. Proverbially, anchor strategy can mitigate such a challenge a certain extent. However, the unsupervised dynamic strategy usually cannot obtain the optimal anchors for MVC. The main reasons are that it does not consider the fairness of different views and lacks the priori supervised guidance. To completely solve these problems, we first propose the priori anchor graph regularization (PAGG) for scalable multi-view bipartite graph clustering, dubbed as SMGC method. Specifically, SMGC learns a few representative consensus anchors to simulate the numerous view data well, and constructs a bipartite graph to bridge the affinities between the anchors and original data points. In order to largely improve the quality of anchors, PAGG predefines prior anchor labels to constrain the anchors with discriminative cluster structure and fair view allocation, such that a better bipartite graph can be obtained for fast clustering. Experimentally, abundant of experiments are accomplished on six scalable benchmark datasets, and the experimental results fully demonstrate the effectiveness and efficiency of our SMGC.", + "primary_area": "machine learning iv", + "author": "Jiali You; Zhenwen Ren; Xiaojian You; Haoran Li; Yuancheng Yao", + "authorids": "", + "aff": "School of National Defense Science and Technology, Southwest University of Science and Technology, Mianyang, China; Key Laboratory of System Control and Information Processing, Ministry of Education, Shanghai, China + SongShan Laboratory, Henan, China; School of National Defense Science and Technology, Southwest University of Science and Technology, Mianyang, China; School of National Defense Science and Technology, Southwest University of Science and Technology, Mianyang, China + Department of School of Electronics and Communication Engineering, Sun Yat-sen University, Shenzhen, China; School of National Defense Science and Technology, Southwest University of Science and Technology, Mianyang, China", + "bibtex": "@article{You_Ren_You_Li_Yao_2023, title={Priori Anchor Labels Supervised Scalable Multi-View Bipartite Graph Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26300}, DOI={10.1609/aaai.v37i9.26300}, abstractNote={Although multi-view clustering (MVC) has achieved remarkable performance by integrating the complementary information of views, it is inefficient when facing scalable data. Proverbially, anchor strategy can mitigate such a challenge a certain extent. However, the unsupervised dynamic strategy usually cannot obtain the optimal anchors for MVC. The main reasons are that it does not consider the fairness of different views and lacks the priori supervised guidance. To completely solve these problems, we first propose the priori anchor graph regularization (PAGG) for scalable multi-view bipartite graph clustering, dubbed as SMGC method. Specifically, SMGC learns a few representative consensus anchors to simulate the numerous view data well, and constructs a bipartite graph to bridge the affinities between the anchors and original data points. In order to largely improve the quality of anchors, PAGG predefines prior anchor labels to constrain the anchors with discriminative cluster structure and fair view allocation, such that a better bipartite graph can be obtained for fast clustering. Experimentally, abundant of experiments are accomplished on six scalable benchmark datasets, and the experimental results fully demonstrate the effectiveness and efficiency of our SMGC.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={You, Jiali and Ren, Zhenwen and You, Xiaojian and Li, Haoran and Yao, Yuancheng}, year={2023}, month={Jun.}, pages={10972-10979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26300/26072", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26300", + "pdf_size": 554791, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7346508389880756998&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "163.com;njust.edu.cn;163.com;gmail.com;163.com", + "email": "163.com;njust.edu.cn;163.com;gmail.com;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;0;0+3;0", + "aff_unique_norm": "Southwest University of Science and Technology;Key Laboratory of System Control and Information Processing;SongShan Laboratory;Sun Yat-sen University", + "aff_unique_dep": "School of National Defense Science and Technology;Ministry of Education;;School of Electronics and Communication Engineering", + "aff_unique_url": ";;;http://www.sysu.edu.cn", + "aff_unique_abbr": ";;;SYSU", + "aff_campus_unique_index": "0;;0;0+2;0", + "aff_campus_unique": "Mianyang;;Shenzhen", + "aff_country_unique_index": "0;0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26412", + "title": "Privacy Attacks on Schedule-Driven Data", + "track": "main", + "status": "Technical", + "abstract": "Schedules define how resources process jobs in diverse domains, reaching from healthcare to transportation, and, therefore, denote a valuable starting point for analysis of the underlying system. However, publishing a schedule may disclose private information on the considered jobs. In this paper, we provide a first threat model for published schedules, thereby defining a completely new class of data privacy problems. We then propose distance-based measures to assess the privacy loss incurred by a published schedule, and show their theoretical properties for an uninformed adversary, which can be used as a benchmark for informed attacks. We show how an informed attack on a published schedule can be phrased as an inverse scheduling problem. We instantiate this idea by formulating the inverse of a well-studied single-machine scheduling problem, namely minimizing the total weighted completion times. An empirical evaluation for synthetic scheduling problems shows the effectiveness of informed privacy attacks and compares the results to theoretical bounds on uninformed attacks.", + "primary_area": "planning routing and scheduling", + "author": "Stephan A. Fahrenkrog-Petersen; Arik Senderovich; Alexandra Tichauer; Ali Kaan Tutak; J. Christopher Beck; Matthias Weidlich", + "authorids": "", + "aff": "Humboldt-Universit \u00a8at zu Berlin; York University; Humboldt-Universit \u00a8at zu Berlin; Humboldt-Universit \u00a8at zu Berlin; University of Toronto; Humboldt-Universit \u00a8at zu Berlin", + "bibtex": "@article{Fahrenkrog-Petersen_Senderovich_Tichauer_Tutak_Beck_Weidlich_2023, title={Privacy Attacks on Schedule-Driven Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26412}, DOI={10.1609/aaai.v37i10.26412}, abstractNote={Schedules define how resources process jobs in diverse domains, reaching from healthcare to transportation, and, therefore, denote a valuable starting point for analysis of the underlying system. However, publishing a schedule may disclose private information on the considered jobs. In this paper, we provide a first threat model for published schedules, thereby defining a completely new class of data privacy problems. We then propose distance-based measures to assess the privacy loss incurred by a published schedule, and show their theoretical properties for an uninformed adversary, which can be used as a benchmark for informed attacks. We show how an informed attack on a published schedule can be phrased as an inverse scheduling problem. We instantiate this idea by formulating the inverse of a well-studied single-machine scheduling problem, namely minimizing the total weighted completion times. An empirical evaluation for synthetic scheduling problems shows the effectiveness of informed privacy attacks and compares the results to theoretical bounds on uninformed attacks.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fahrenkrog-Petersen, Stephan A. and Senderovich, Arik and Tichauer, Alexandra and Tutak, Ali Kaan and Beck, J. Christopher and Weidlich, Matthias}, year={2023}, month={Jun.}, pages={11972-11979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26412/26184", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26412", + "pdf_size": 546470, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4289356712534113263&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hu-berlin.de;yorku.ca;hu-berlin.de;hu-berlin.de;mie.utoronto.ca;hu-berlin.de", + "email": "hu-berlin.de;yorku.ca;hu-berlin.de;hu-berlin.de;mie.utoronto.ca;hu-berlin.de", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;2;0", + "aff_unique_norm": "Humboldt University of Berlin;York University;University of Toronto", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.hu-berlin.de;https://www.yorku.ca;https://www.utoronto.ca", + "aff_unique_abbr": "HU Berlin;York U;U of T", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;1;0", + "aff_country_unique": "Germany;Canada" + }, + { + "id": "article-26703", + "title": "Privacy-Preserved Evolutionary Graph Modeling via Gromov-Wasserstein Autoregression", + "track": "aaai special track", + "status": "Technical", + "abstract": "Real-world graphs like social networks are often evolutionary over time, whose observations at different timestamps lead to graph sequences. Modeling such evolutionary graphs is important for many applications, but solving this problem often requires the correspondence between the graphs at different timestamps, which may leak private node information, e.g., the temporal behavior patterns of the nodes. We proposed a Gromov-Wasserstein Autoregressive (GWAR) model to capture the generative mechanisms of evolutionary graphs, which does not require the correspondence information and thus preserves the privacy of the graphs' nodes. This model consists of two autoregressions, predicting the number of nodes and the probabilities of nodes and edges, respectively. The model takes observed graphs as its input and predicts future graphs via solving a joint graph alignment and merging task. This task leads to a fused Gromov-Wasserstein (FGW) barycenter problem, in which we approximate the alignment of the graphs based on a novel inductive fused Gromov-Wasserstein (IFGW) distance. The IFGW distance is parameterized by neural networks and can be learned under mild assumptions, thus, we can infer the FGW barycenters without iterative optimization and predict future graphs efficiently. Experiments show that our GWAR achieves encouraging performance in modeling evolutionary graphs in privacy-preserving scenarios.", + "primary_area": "ai for social impact", + "author": "Yue Xiang; Dixin Luo; Hongteng Xu", + "authorids": "", + "aff": "School of Statistics, Renmin University of China; School of Computer Science and Technology, Beijing Institute of Technology; Gaoling School of Artificial Intelligence, Renmin University of China + Beijing Key Laboratory of Big Data Management and Analysis Methods", + "bibtex": "@article{Xiang_Luo_Xu_2023, title={Privacy-Preserved Evolutionary Graph Modeling via Gromov-Wasserstein Autoregression}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26703}, DOI={10.1609/aaai.v37i12.26703}, abstractNote={Real-world graphs like social networks are often evolutionary over time, whose observations at different timestamps lead to graph sequences. Modeling such evolutionary graphs is important for many applications, but solving this problem often requires the correspondence between the graphs at different timestamps, which may leak private node information, e.g., the temporal behavior patterns of the nodes. We proposed a Gromov-Wasserstein Autoregressive (GWAR) model to capture the generative mechanisms of evolutionary graphs, which does not require the correspondence information and thus preserves the privacy of the graphs\u2019 nodes. This model consists of two autoregressions, predicting the number of nodes and the probabilities of nodes and edges, respectively. The model takes observed graphs as its input and predicts future graphs via solving a joint graph alignment and merging task. This task leads to a fused Gromov-Wasserstein (FGW) barycenter problem, in which we approximate the alignment of the graphs based on a novel inductive fused Gromov-Wasserstein (IFGW) distance. The IFGW distance is parameterized by neural networks and can be learned under mild assumptions, thus, we can infer the FGW barycenters without iterative optimization and predict future graphs efficiently. Experiments show that our GWAR achieves encouraging performance in modeling evolutionary graphs in privacy-preserving scenarios.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiang, Yue and Luo, Dixin and Xu, Hongteng}, year={2023}, month={Jun.}, pages={14566-14574} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26703/26475", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26703", + "pdf_size": 620997, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:bReh27E3NA4J:scholar.google.com/&scioq=Privacy-Preserved+Evolutionary+Graph+Modeling+via+Gromov-Wasserstein+Autoregression&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "ruc.edu.cn;bit.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;bit.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+2", + "aff_unique_norm": "Renmin University of China;Beijing Institute of Technology;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_dep": "School of Statistics;School of Computer Science and Technology;Big Data Management and Analysis", + "aff_unique_url": "http://www.ruc.edu.cn;http://www.bit.edu.cn/;", + "aff_unique_abbr": "RUC;BIT;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26932", + "title": "Privacy-Preserving Representation Learning for Text-Attributed Networks with Simplicial Complexes", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Although recent network representation learning (NRL) works in text-attributed networks demonstrated superior performance for various graph inference tasks, learning network representations could always raise privacy concerns when nodes represent people or human-related variables. Moreover, standard NRLs that leverage structural information from a graph proceed by first encoding pairwise relationships into learned representations and then analysing its properties. This approach is fundamentally misaligned with problems where the relationships involve multiple points, and topological structure must be encoded beyond pairwise interactions. Fortunately, the machinery of topological data analysis (TDA) and, in particular, simplicial neural networks (SNNs) offer a mathematically rigorous framework to evaluate not only higher-order interactions, but also global invariant features of the observed graph to systematically learn topological structures. It is critical to investigate if the representation outputs from SNNs are more vulnerable compared to regular representation outputs from graph neural networks (GNNs) via pairwise interactions. In my dissertation, I will first study learning the representations with text attributes for simplicial complexes (RT4SC) via SNNs. Then, I will conduct research on two potential attacks on the representation outputs from SNNs: (1) membership inference attack, which infers whether a certain node of a graph is inside the training data of the GNN model; and (2) graph reconstruction attacks, which infer the confidential edges of a text-attributed network. Finally, I will study a privacy-preserving deterministic differentially private alternating direction method of multiplier to learn secure representation outputs from SNNs that capture multi-scale relationships and facilitate the passage from local structure to global invariant features on text-attributed networks.", + "primary_area": "", + "author": "Huixin Zhan; Victor S. Sheng", + "authorids": "", + "aff": "Department of Computer Science, Texas Tech University; Department of Computer Science, Texas Tech University", + "bibtex": "@article{Zhan_Sheng_2024, title={Privacy-Preserving Representation Learning for Text-Attributed Networks with Simplicial Complexes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26932}, DOI={10.1609/aaai.v37i13.26932}, abstractNote={Although recent network representation learning (NRL) works in text-attributed networks demonstrated superior performance for various graph inference tasks, learning network representations could always raise privacy concerns when nodes represent people or human-related variables. Moreover, standard NRLs that leverage structural information from a graph proceed by first encoding pairwise relationships into learned representations and then analysing its properties. This approach is fundamentally misaligned with problems where the relationships involve multiple points, and topological structure must be encoded beyond pairwise interactions. Fortunately, the machinery of topological data analysis (TDA) and, in particular, simplicial neural networks (SNNs) offer a mathematically rigorous framework to evaluate not only higher-order interactions, but also global invariant features of the observed graph to systematically learn topological structures. It is critical to investigate if the representation outputs from SNNs are more vulnerable compared to regular representation outputs from graph neural networks (GNNs) via pairwise interactions. In my dissertation, I will first study learning the representations with text attributes for simplicial complexes (RT4SC) via SNNs. Then, I will conduct research on two potential attacks on the representation outputs from SNNs: (1) membership inference attack, which infers whether a certain node of a graph is inside the training data of the GNN model; and (2) graph reconstruction attacks, which infer the confidential edges of a text-attributed network. Finally, I will study a privacy-preserving deterministic differentially private alternating direction method of multiplier to learn secure representation outputs from SNNs that capture multi-scale relationships and facilitate the passage from local structure to global invariant features on text-attributed networks.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhan, Huixin and Sheng, Victor S.}, year={2024}, month={Jul.}, pages={16143-16144} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26932/26704", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26932", + "pdf_size": 70591, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6303896553956175882&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ttu.edu;ttu.edu", + "email": "ttu.edu;ttu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Texas Tech University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ttu.edu", + "aff_unique_abbr": "TTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26507", + "title": "ProKD: An Unsupervised Prototypical Knowledge Distillation Network for Zero-Resource Cross-Lingual Named Entity Recognition", + "track": "main", + "status": "Technical", + "abstract": "For named entity recognition (NER) in zero-resource languages, utilizing knowledge distillation methods to transfer language-independent knowledge from the rich-resource source languages to zero-resource languages is an effective means. Typically, these approaches adopt a teacher-student architecture, where the teacher network is trained in the source language, and the student network seeks to learn knowledge from the teacher network and is expected to perform well in the target language. Despite the impressive performance achieved by these methods, we argue that they have two limitations. Firstly, the teacher network fails to effectively learn language-independent knowledge shared across languages due to the differences in the feature distribution between the source and target languages. Secondly, the student network acquires all of its knowledge from the teacher network and ignores the learning of target language-specific knowledge.\nUndesirably, these limitations would hinder the model's performance in the target language. This paper proposes an unsupervised prototype knowledge distillation network (ProKD) to address these issues. Specifically, ProKD presents a contrastive learning-based prototype alignment method to achieve class feature alignment by adjusting the prototypes' distance from the source and target languages, boosting the teacher network's capacity to acquire language-independent knowledge. In addition, ProKD introduces a prototype self-training method to learn the intrinsic structure of the language by retraining the student network on the target data using samples' distance information from prototypes, thereby enhancing the student network's ability to acquire language-specific knowledge. Extensive experiments on three benchmark cross-lingual NER datasets demonstrate the effectiveness of our approach.", + "primary_area": "speech natural language processing", + "author": "Ling Ge; Chunming Hu; Guanghui Ma; Hong Zhang; Jihong Liu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China + College of Software, Beihang University, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China; National Computer Network Emergency Response Technical Team / Coordination Center of China, Beijing, China; School of Mechanical Engineering and Automation, Beihang University, Beijing, China", + "bibtex": "@article{Ge_Hu_Ma_Zhang_Liu_2023, title={ProKD: An Unsupervised Prototypical Knowledge Distillation Network for Zero-Resource Cross-Lingual Named Entity Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26507}, DOI={10.1609/aaai.v37i11.26507}, abstractNote={For named entity recognition (NER) in zero-resource languages, utilizing knowledge distillation methods to transfer language-independent knowledge from the rich-resource source languages to zero-resource languages is an effective means. Typically, these approaches adopt a teacher-student architecture, where the teacher network is trained in the source language, and the student network seeks to learn knowledge from the teacher network and is expected to perform well in the target language. Despite the impressive performance achieved by these methods, we argue that they have two limitations. Firstly, the teacher network fails to effectively learn language-independent knowledge shared across languages due to the differences in the feature distribution between the source and target languages. Secondly, the student network acquires all of its knowledge from the teacher network and ignores the learning of target language-specific knowledge.\nUndesirably, these limitations would hinder the model\u2019s performance in the target language. This paper proposes an unsupervised prototype knowledge distillation network (ProKD) to address these issues. Specifically, ProKD presents a contrastive learning-based prototype alignment method to achieve class feature alignment by adjusting the prototypes\u2019 distance from the source and target languages, boosting the teacher network\u2019s capacity to acquire language-independent knowledge. In addition, ProKD introduces a prototype self-training method to learn the intrinsic structure of the language by retraining the student network on the target data using samples\u2019 distance information from prototypes, thereby enhancing the student network\u2019s ability to acquire language-specific knowledge. Extensive experiments on three benchmark cross-lingual NER datasets demonstrate the effectiveness of our approach.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ge, Ling and Hu, Chunming and Ma, Guanghui and Zhang, Hong and Liu, Jihong}, year={2023}, month={Jun.}, pages={12818-12826} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26507/26279", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26507", + "pdf_size": 10905987, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6560092905579615425&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;isc.org.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;isc.org.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0;0;1;0", + "aff_unique_norm": "Beihang University;National Computer Network Emergency Response Technical Team / Coordination Center of China", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "BUAA;", + "aff_campus_unique_index": "0;0+0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25525", + "title": "Probabilistic Generalization of Backdoor Trees with Application to SAT", + "track": "main", + "status": "Technical", + "abstract": "The concept of Strong Backdoor Sets (SBS) for Constraint Satisfaction Problems is well known as one of the attempts to exploit structural peculiarities in hard instances. However, in practice, finding an SBS for a particular instance is often harder than solving it. Recently, a probabilistic weakened variant of the SBS was introduced: in the SBS, all subproblems must be polynomially solvable, whereas in the probabilistic SBS only a large fraction \u03c1 of them should have this property. This new variant of backdoors called \u03c1-backdoors makes it possible to use the Monte Carlo method and metaheuristic optimization to find \u03c1-backdoors with \u03c1 very close to 1, and relatively fast. Despite the fact that in a \u03c1-backdoor-based decomposition a portion of hard subproblems remain, in practice the narrowing of the search space often allows solving the problem faster with such a backdoor than without it. In this paper, we significantly improve on the concept of \u03c1-backdoors by extending this concept to backdoor trees: we introduce \u03c1-backdoor trees, show the interconnections between SBS, \u03c1-backdoors, and the corresponding backdoor trees, and establish some new theoretical properties of backdoor trees. In the experimental part of the paper, we show that moving from the metaheuristic search for \u03c1-backdoors to that of \u03c1-backdoor trees allows drastically reducing the time required to construct the required decompositions without compromising their quality.", + "primary_area": "constraint satisfaction and optimization", + "author": "Alexander Semenov; Daniil Chivilikhin; Stepan Kochemazov; Ibragim Dzhiblavi", + "authorids": "", + "aff": "ITMO University, St. Petersburg, Russia; ITMO University, St. Petersburg, Russia; ITMO University, St. Petersburg, Russia; ITMO University, St. Petersburg, Russia", + "bibtex": "@article{Semenov_Chivilikhin_Kochemazov_Dzhiblavi_2023, title={Probabilistic Generalization of Backdoor Trees with Application to SAT}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25525}, DOI={10.1609/aaai.v37i4.25525}, abstractNote={The concept of Strong Backdoor Sets (SBS) for Constraint Satisfaction Problems is well known as one of the attempts to exploit structural peculiarities in hard instances. However, in practice, finding an SBS for a particular instance is often harder than solving it. Recently, a probabilistic weakened variant of the SBS was introduced: in the SBS, all subproblems must be polynomially solvable, whereas in the probabilistic SBS only a large fraction \u03c1 of them should have this property. This new variant of backdoors called \u03c1-backdoors makes it possible to use the Monte Carlo method and metaheuristic optimization to find \u03c1-backdoors with \u03c1 very close to 1, and relatively fast. Despite the fact that in a \u03c1-backdoor-based decomposition a portion of hard subproblems remain, in practice the narrowing of the search space often allows solving the problem faster with such a backdoor than without it. In this paper, we significantly improve on the concept of \u03c1-backdoors by extending this concept to backdoor trees: we introduce \u03c1-backdoor trees, show the interconnections between SBS, \u03c1-backdoors, and the corresponding backdoor trees, and establish some new theoretical properties of backdoor trees. In the experimental part of the paper, we show that moving from the metaheuristic search for \u03c1-backdoors to that of \u03c1-backdoor trees allows drastically reducing the time required to construct the required decompositions without compromising their quality.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Semenov, Alexander and Chivilikhin, Daniil and Kochemazov, Stepan and Dzhiblavi, Ibragim}, year={2023}, month={Jun.}, pages={4095-4103} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25525/25297", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25525", + "pdf_size": 240152, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3760584328533951309&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "itmo.ru;gmail.com;itmo.ru;gmail.com", + "email": "itmo.ru;gmail.com;itmo.ru;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "ITMO University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.itmo.ru", + "aff_unique_abbr": "ITMO", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "St. Petersburg", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Russia" + }, + { + "id": "article-26790", + "title": "Probabilistic Programs as an Action Description Language", + "track": "senior member presentation blue sky papers", + "status": "Technical", + "abstract": "Actions description languages (ADLs), such as STRIPS, PDDL, and RDDL specify the input format for planning algorithms. Unfortunately, their syntax is familiar to planning experts only, and not to potential users of planning technology. Moreover, this syntax limits the ability to describe complex and large domains. We argue that programming languages (PLs), and more specifically, probabilistic programming languages (PPLs), provide a more suitable alternative. PLs are familiar to all programmers, support complex data types and rich libraries for their manipulation, and have powerful constructs, such as loops, sub-routines, and local variables with which complex, realistic models and complex objectives can be simply and naturally specified. PPLs, specifically, make it easy to specify distributions, which is essential for stochastic models. The natural objection to this proposal is that PLs are opaque and too expressive, making reasoning about them difficult. However, PPLs also come with efficient inference algorithms, which, coupled with a growing body of work on sampling-based and gradient-based planning, imply that planning and execution monitoring can be carried out efficiently in practice. In this paper, we expand on this proposal, illustrating its potential with examples.", + "primary_area": "", + "author": "Ronen I. Brafman; David Tolpin; Or Wertheim", + "authorids": "", + "aff": "Department of Computer Science, Ben Gurion University of the Negev; Department of Computer Science, Ben Gurion University of the Negev; Department of Computer Science, Ben Gurion University of the Negev", + "bibtex": "@article{Brafman_Tolpin_Wertheim_2024, title={Probabilistic Programs as an Action Description Language}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26790}, DOI={10.1609/aaai.v37i13.26790}, abstractNote={Actions description languages (ADLs), such as STRIPS, PDDL, and RDDL specify the input format for planning algorithms. Unfortunately, their syntax is familiar to planning experts only, and not to potential users of planning technology. Moreover, this syntax limits the ability to describe complex and large domains. We argue that programming languages (PLs), and more specifically, probabilistic programming languages (PPLs), provide a more suitable alternative. PLs are familiar to all programmers, support complex data types and rich libraries for their manipulation, and have powerful constructs, such as loops, sub-routines, and local variables with which complex, realistic models and complex objectives can be simply and naturally specified. PPLs, specifically, make it easy to specify distributions, which is essential for stochastic models. The natural objection to this proposal is that PLs are opaque and too expressive, making reasoning about them difficult. However, PPLs also come with efficient inference algorithms, which, coupled with a growing body of work on sampling-based and gradient-based planning, imply that planning and execution monitoring can be carried out efficiently in practice. In this paper, we expand on this proposal, illustrating its potential with examples.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brafman, Ronen I. and Tolpin, David and Wertheim, Or}, year={2024}, month={Jul.}, pages={15351-15358} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26790/26562", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26790", + "pdf_size": 133883, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4008586586638285425&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "post.bgu.ac.il;post.bgu.ac.il;post.bgu.ac.il", + "email": "post.bgu.ac.il;post.bgu.ac.il;post.bgu.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Ben Gurion University of the Negev", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26800", + "title": "Probabilistic Reasoning and Learning for Trustworthy AI", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "As automated decision-making systems are increasingly deployed in areas with personal and societal impacts, there is a growing demand for artificial intelligence and machine learning systems that are fair, robust, interpretable, and generally trustworthy. Ideally we would wish to answer questions regarding these properties and provide guarantees about any automated system to be deployed in the real world. This raises the need for a unified language and framework under which we can reason about and develop trustworthy AI systems. This talk will discuss how tractable probabilistic reasoning and learning provides such framework. \n\nIt is important to note that guarantees regarding fairness, robustness, etc., hold with respect to the distribution of the world in which the decision-making system operates. For example, to see whether automated loan decisions are biased against certain gender, one may compare the average decision for each gender; this requires knowledge of how the features used in the decision are distributed for each gender. Moreover, there are inherent uncertainties in modeling this distribution, in addition to the uncertainties when deploying a system in the real world, such as missing or noisy information. We can handle such uncertainties in a principled way through probabilistic reasoning. Taking fairness-aware learning as an example, we can deal with biased labels in the training data by explicitly modeling the observed labels as being generated from some probabilistic process that injects bias/noise to hidden, fair labels, particularly in a way that best explains the observed data.\n\nA key challenge that still needs to be addressed is that: we need models that can closely fit complex real-world distributions\u2014i.e. expressive\u2014while also being amenable to exact and efficient inference of probabilistic queries\u2014i.e. tractable. I will show that probabilistic circuits, a family of tractable probabilistic models, offer both such benefits. In order to ultimately develop a common framework to study various areas of trustworthy AI (e.g., privacy, fairness, explanations, etc.), we need models that can flexibly answer different questions, even the ones it did not foresee. This talk will thus survey the efforts to expand the horizon of complex reasoning capabilities of probabilistic circuits, especially highlighted by a modular approach that answers various queries via a pipeline of a handful of simple tractable operations.", + "primary_area": "", + "author": "YooJung Choi", + "authorids": "", + "aff": "School of Computing and Augmented Intelligence, Arizona State University", + "bibtex": "@article{Choi_2024, title={Probabilistic Reasoning and Learning for Trustworthy AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26800}, DOI={10.1609/aaai.v37i13.26800}, abstractNote={As automated decision-making systems are increasingly deployed in areas with personal and societal impacts, there is a growing demand for artificial intelligence and machine learning systems that are fair, robust, interpretable, and generally trustworthy. Ideally we would wish to answer questions regarding these properties and provide guarantees about any automated system to be deployed in the real world. This raises the need for a unified language and framework under which we can reason about and develop trustworthy AI systems. This talk will discuss how tractable probabilistic reasoning and learning provides such framework. It is important to note that guarantees regarding fairness, robustness, etc., hold with respect to the distribution of the world in which the decision-making system operates. For example, to see whether automated loan decisions are biased against certain gender, one may compare the average decision for each gender; this requires knowledge of how the features used in the decision are distributed for each gender. Moreover, there are inherent uncertainties in modeling this distribution, in addition to the uncertainties when deploying a system in the real world, such as missing or noisy information. We can handle such uncertainties in a principled way through probabilistic reasoning. Taking fairness-aware learning as an example, we can deal with biased labels in the training data by explicitly modeling the observed labels as being generated from some probabilistic process that injects bias/noise to hidden, fair labels, particularly in a way that best explains the observed data. A key challenge that still needs to be addressed is that: we need models that can closely fit complex real-world distributions\u2014i.e. expressive\u2014while also being amenable to exact and efficient inference of probabilistic queries\u2014i.e. tractable. I will show that probabilistic circuits, a family of tractable probabilistic models, offer both such benefits. In order to ultimately develop a common framework to study various areas of trustworthy AI (e.g., privacy, fairness, explanations, etc.), we need models that can flexibly answer different questions, even the ones it did not foresee. This talk will thus survey the efforts to expand the horizon of complex reasoning capabilities of probabilistic circuits, especially highlighted by a modular approach that answers various queries via a pipeline of a handful of simple tractable operations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Choi, YooJung}, year={2024}, month={Jul.}, pages={15433-15433} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26800/26572", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26800", + "pdf_size": 38390, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:J8jtqTXPjXQJ:scholar.google.com/&scioq=Probabilistic+Reasoning+and+Learning+for+Trustworthy+AI&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "asu.edu", + "email": "asu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "School of Computing and Augmented Intelligence", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Tempe", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26914", + "title": "Probabilistic Shape Models of Anatomy Directly from Images", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "Statistical shape modeling (SSM) is an enabling tool in medical image analysis as it allows for population-based quantitative analysis. The traditional pipeline for landmark-based SSM from images requires painstaking and cost-prohibitive steps. My thesis aims to leverage probabilistic deep learning frameworks to streamline the adoption of SSM in biomedical research and practice. The expected outcomes of this work will be new frameworks for SSM that (1) provide reliable and calibrated uncertainty quantification, (2) are effective given limited or sparsely annotated/incomplete data, and (3) can make predictions from incomplete 4D spatiotemporal data. These efforts will reduce required costs and manual labor for anatomical SSM, helping SSM become a more viable clinical tool and advancing medical practice.", + "primary_area": "", + "author": "Jadie Adams", + "authorids": "", + "aff": "Scientific Computing and Imaging Institute, University of Utah, UT, USA+School of Computing, University of Utah, UT, USA", + "bibtex": "@article{Adams_2024, title={Probabilistic Shape Models of Anatomy Directly from Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26914}, DOI={10.1609/aaai.v37i13.26914}, abstractNote={Statistical shape modeling (SSM) is an enabling tool in medical image analysis as it allows for population-based quantitative analysis. The traditional pipeline for landmark-based SSM from images requires painstaking and cost-prohibitive steps. My thesis aims to leverage probabilistic deep learning frameworks to streamline the adoption of SSM in biomedical research and practice. The expected outcomes of this work will be new frameworks for SSM that (1) provide reliable and calibrated uncertainty quantification, (2) are effective given limited or sparsely annotated/incomplete data, and (3) can make predictions from incomplete 4D spatiotemporal data. These efforts will reduce required costs and manual labor for anatomical SSM, helping SSM become a more viable clinical tool and advancing medical practice.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Adams, Jadie}, year={2024}, month={Jul.}, pages={16107-16108} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26914/26686", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26914", + "pdf_size": 54828, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:w9Ks6s1peQcJ:scholar.google.com/&scioq=Probabilistic+Shape+Models+of+Anatomy+Directly+from+Images&hl=en&as_sdt=0,33", + "gs_version_total": 5, + "aff_domain": "sci.utah.edu", + "email": "sci.utah.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+0", + "aff_unique_norm": "University of Utah", + "aff_unique_dep": "Scientific Computing and Imaging Institute", + "aff_unique_url": "https://www.sci.utah.edu", + "aff_unique_abbr": "U of U", + "aff_campus_unique_index": "0+0", + "aff_campus_unique": "Salt Lake City", + "aff_country_unique_index": "0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26718", + "title": "Probabilities Are Not Enough: Formal Controller Synthesis for Stochastic Dynamical Models with Epistemic Uncertainty", + "track": "aaai special track", + "status": "Technical", + "abstract": "Capturing uncertainty in models of complex dynamical systems is crucial to designing safe controllers. Stochastic noise causes aleatoric uncertainty, whereas imprecise knowledge of model parameters leads to epistemic uncertainty. Several approaches use formal abstractions to synthesize policies that satisfy temporal specifications related to safety and reachability. However, the underlying models exclusively capture aleatoric but not epistemic uncertainty, and thus require that model parameters are known precisely. Our contribution to overcoming this restriction is a novel abstraction-based controller synthesis method for continuous-state models with stochastic noise and uncertain parameters. By sampling techniques and robust analysis, we capture both aleatoric and epistemic uncertainty, with a user-specified confidence level, in the transition probability intervals of a so-called interval Markov decision process (iMDP). We synthesize an optimal policy on this iMDP, which translates (with the specified confidence level) to a feedback controller for the continuous model with the same performance guarantees. Our experimental benchmarks confirm that accounting for epistemic uncertainty leads to controllers that are more robust against variations in parameter values.", + "primary_area": "safe and robust ai", + "author": "Thom Badings; Licio Romao; Alessandro Abate; Nils Jansen", + "authorids": "", + "aff": "Radboud University, Nijmegen, the Netherlands; University of Oxford, Oxford, United Kingdom; University of Oxford, Oxford, United Kingdom; Radboud University, Nijmegen, the Netherlands", + "bibtex": "@article{Badings_Romao_Abate_Jansen_2023, title={Probabilities Are Not Enough: Formal Controller Synthesis for Stochastic Dynamical Models with Epistemic Uncertainty}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26718}, DOI={10.1609/aaai.v37i12.26718}, abstractNote={Capturing uncertainty in models of complex dynamical systems is crucial to designing safe controllers. Stochastic noise causes aleatoric uncertainty, whereas imprecise knowledge of model parameters leads to epistemic uncertainty. Several approaches use formal abstractions to synthesize policies that satisfy temporal specifications related to safety and reachability. However, the underlying models exclusively capture aleatoric but not epistemic uncertainty, and thus require that model parameters are known precisely. Our contribution to overcoming this restriction is a novel abstraction-based controller synthesis method for continuous-state models with stochastic noise and uncertain parameters. By sampling techniques and robust analysis, we capture both aleatoric and epistemic uncertainty, with a user-specified confidence level, in the transition probability intervals of a so-called interval Markov decision process (iMDP). We synthesize an optimal policy on this iMDP, which translates (with the specified confidence level) to a feedback controller for the continuous model with the same performance guarantees. Our experimental benchmarks confirm that accounting for epistemic uncertainty leads to controllers that are more robust against variations in parameter values.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Badings, Thom and Romao, Licio and Abate, Alessandro and Jansen, Nils}, year={2023}, month={Jun.}, pages={14701-14710} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26718/26490", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26718", + "pdf_size": 1487071, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11105231621987296968&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "ru.nl;cs.ox.ac.uk;cs.ox.ac.uk;science.ru.nl", + "email": "ru.nl;cs.ox.ac.uk;cs.ox.ac.uk;science.ru.nl", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "Radboud University;University of Oxford", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ru.nl;https://www.ox.ac.uk", + "aff_unique_abbr": "RU;Oxford", + "aff_campus_unique_index": "0;1;1;0", + "aff_campus_unique": "Nijmegen;Oxford", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "the Netherlands;United Kingdom" + }, + { + "id": "article-26448", + "title": "Probabilities of Potential Outcome Types in Experimental Studies: Identification and Estimation Based on Proxy Covariate Information", + "track": "main", + "status": "Technical", + "abstract": "The concept of potential outcome types is one of the fundamental components of causal inference. However, even in randomized experiments, assumptions on the data generating process, such as monotonicity, are required to evaluate the probabilities of the potential outcome types. To solve the problem without such assumptions in experimental studies, a novel identification condition based on proxy covariate information is proposed in this paper. In addition, the estimation problem of the probabilities of the potential outcome types reduces to that of singular models when they are identifiable through the proposed condition. Thus, they cannot be evaluated by standard statistical estimation methods. To overcome this difficulty, new plug-in estimators of these probabilities are presented, and the asymptotic normality of the proposed estimators is shown.", + "primary_area": "reasoning under uncertainty", + "author": "Ryusei Shingaki; Manabu Kuroki", + "authorids": "", + "aff": "Graduate School of Engineering Science, Yokohama National University; University of Greenwich", + "bibtex": "@article{Shingaki_Kuroki_2023, title={Probabilities of Potential Outcome Types in Experimental Studies: Identification and Estimation Based on Proxy Covariate Information}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26448}, DOI={10.1609/aaai.v37i10.26448}, abstractNote={The concept of potential outcome types is one of the fundamental components of causal inference. However, even in randomized experiments, assumptions on the data generating process, such as monotonicity, are required to evaluate the probabilities of the potential outcome types. To solve the problem without such assumptions in experimental studies, a novel identification condition based on proxy covariate information is proposed in this paper. In addition, the estimation problem of the probabilities of the potential outcome types reduces to that of singular models when they are identifiable through the proposed condition. Thus, they cannot be evaluated by standard statistical estimation methods. To overcome this difficulty, new plug-in estimators of these probabilities are presented, and the asymptotic normality of the proposed estimators is shown.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shingaki, Ryusei and Kuroki, Manabu}, year={2023}, month={Jun.}, pages={12287-12294} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26448/26220", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26448", + "pdf_size": 216071, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4546804880197680492&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ynu.jp;ynu.ac.jp", + "email": "ynu.jp;ynu.ac.jp", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Yokohama National University;University of Greenwich", + "aff_unique_dep": "Graduate School of Engineering Science;", + "aff_unique_url": "https://www.yokohama-nu.ac.jp;https://www.gre.ac.uk", + "aff_unique_abbr": "YNU;Greenwich", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Yokohama;", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Japan;United Kingdom" + }, + { + "id": "article-25244", + "title": "Probability Guided Loss for Long-Tailed Multi-Label Image Classification", + "track": "main", + "status": "Technical", + "abstract": "Long-tailed learning has attracted increasing attention in very recent years. Long-tailed multi-label image classification is one subtask and remains challenging and poorly researched. In this paper, we provide a fresh perspective from probability to tackle this problem. More specifically, we find that existing cost-sensitive learning methods for long-tailed multi-label classification will affect the predicted probability of positive and negative labels in varying degrees during training, and different processes of probability will affect the final performance in turn. We thus propose a probability guided loss which contains two components to control this process. One is the probability re-balancing which can flexibly adjust the process of training probability. And the other is the adaptive probability-aware focal which can further reduce the probability gap between positive and negative labels. We conduct extensive experiments on two long-tailed multi-label image classification datasets: VOC-LT and COCO-LT. The results demonstrate the rationality and superiority of our strategy.", + "primary_area": "computer vision ii", + "author": "Dekun Lin", + "authorids": "", + "aff": "Chengdu Institute of Computer Applications, Chinese Academy of Sciences, Chengdu 610041, China+University of Chinese Academy of Sciences, Beijing 100049, China", + "bibtex": "@article{Lin_2023, title={Probability Guided Loss for Long-Tailed Multi-Label Image Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25244}, DOI={10.1609/aaai.v37i2.25244}, abstractNote={Long-tailed learning has attracted increasing attention in very recent years. Long-tailed multi-label image classification is one subtask and remains challenging and poorly researched. In this paper, we provide a fresh perspective from probability to tackle this problem. More specifically, we find that existing cost-sensitive learning methods for long-tailed multi-label classification will affect the predicted probability of positive and negative labels in varying degrees during training, and different processes of probability will affect the final performance in turn. We thus propose a probability guided loss which contains two components to control this process. One is the probability re-balancing which can flexibly adjust the process of training probability. And the other is the adaptive probability-aware focal which can further reduce the probability gap between positive and negative labels. We conduct extensive experiments on two long-tailed multi-label image classification datasets: VOC-LT and COCO-LT. The results demonstrate the rationality and superiority of our strategy.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Dekun}, year={2023}, month={Jun.}, pages={1577-1585} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25244/25016", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25244", + "pdf_size": 356822, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15833881417625969931&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "163.com", + "email": "163.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "Chengdu Institute of Computer Applications;University of Chinese Academy of Sciences", + "aff_unique_dep": "Computer Applications;", + "aff_unique_url": ";http://www.ucas.ac.cn", + "aff_unique_abbr": ";UCAS", + "aff_campus_unique_index": "0+1", + "aff_campus_unique": "Chengdu;Beijing", + "aff_country_unique_index": "0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25732", + "title": "Probably Approximate Shapley Fairness with Applications in Machine Learning", + "track": "main", + "status": "Technical", + "abstract": "The Shapley value (SV) is adopted in various scenarios in machine learning (ML), including data valuation, agent valuation, and feature attribution, as it satisfies their fairness requirements. However, as exact SVs are infeasible to compute in practice, SV estimates are approximated instead. This approximation step raises an important question: do the SV estimates preserve the fairness guarantees of exact SVs? We observe that the fairness guarantees of exact SVs are too restrictive for SV estimates. Thus, we generalise Shapley fairness to probably approximate Shapley fairness and propose fidelity score, a metric to measure the variation of SV estimates, that determines how probable the fairness guarantees hold. Our last theoretical contribution is a novel greedy active estimation (GAE) algorithm that will maximise the lowest fidelity score and achieve a better fairness guarantee than the de facto Monte-Carlo estimation. We empirically verify GAE outperforms several existing methods in guaranteeing fairness while remaining competitive in estimation accuracy in various ML scenarios using real-world datasets.", + "primary_area": "game theory and economic paradigms", + "author": "Zijian Zhou; Xinyi Xu; Rachael Hwee Ling Sim; Chuan Sheng Foo; Bryan Kian Hsiang Low", + "authorids": "", + "aff": "Department of Computer Science, National University of Singapore, Singapore; Department of Computer Science, National University of Singapore, Singapore + Institute for Infocomm Research, A*STAR, Singapore; Department of Computer Science, National University of Singapore, Singapore; Institute for Infocomm Research, A*STAR, Singapore + Centre for Frontier AI Research, A*STAR, Singapore; Department of Computer Science, National University of Singapore, Singapore", + "bibtex": "@article{Zhou_Xu_Sim_Foo_Low_2023, title={Probably Approximate Shapley Fairness with Applications in Machine Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25732}, DOI={10.1609/aaai.v37i5.25732}, abstractNote={The Shapley value (SV) is adopted in various scenarios in machine learning (ML), including data valuation, agent valuation, and feature attribution, as it satisfies their fairness requirements. However, as exact SVs are infeasible to compute in practice, SV estimates are approximated instead. This approximation step raises an important question: do the SV estimates preserve the fairness guarantees of exact SVs? We observe that the fairness guarantees of exact SVs are too restrictive for SV estimates. Thus, we generalise Shapley fairness to probably approximate Shapley fairness and propose fidelity score, a metric to measure the variation of SV estimates, that determines how probable the fairness guarantees hold. Our last theoretical contribution is a novel greedy active estimation (GAE) algorithm that will maximise the lowest fidelity score and achieve a better fairness guarantee than the de facto Monte-Carlo estimation. We empirically verify GAE outperforms several existing methods in guaranteeing fairness while remaining competitive in estimation accuracy in various ML scenarios using real-world datasets.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Zijian and Xu, Xinyi and Sim, Rachael Hwee Ling and Foo, Chuan Sheng and Low, Bryan Kian Hsiang}, year={2023}, month={Jun.}, pages={5910-5918} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25732/25504", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25732", + "pdf_size": 1737005, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15399924782101913396&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "u.nus.edu;u.nus.edu;u.nus.edu;i2r.a-star.edu.sg;comp.nus.edu.sg", + "email": "u.nus.edu;u.nus.edu;u.nus.edu;i2r.a-star.edu.sg;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;1+2;0", + "aff_unique_norm": "National University of Singapore;Institute for Infocomm Research;A*STAR", + "aff_unique_dep": "Department of Computer Science;;Centre for Frontier AI Research", + "aff_unique_url": "https://www.nus.edu.sg;https://www.i2r.a-star.edu.sg;https://www.a-star.edu.sg", + "aff_unique_abbr": "NUS;I2R;A*STAR", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0+0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25087", + "title": "Progress and Limitations of Deep Networks to Recognize Objects in Unusual Poses", + "track": "main", + "status": "Technical", + "abstract": "Deep networks should be robust to rare events if they are to be successfully deployed in high-stakes real-world applications. Here we study the capability of deep networks to recognize objects in unusual poses. We create a synthetic dataset of images of objects in unusual orientations, and evaluate the robustness of a collection of 38 recent and competitive deep networks for image classification. We show that classifying these images is still a challenge for all networks tested, with an average accuracy drop of 29.5% compared to when the objects are presented\nupright. This brittleness is largely unaffected by various design choices, such as training losses, architectures, dataset modalities, and data-augmentation schemes. However, networks trained on very large datasets substantially outperform others, with the best network tested\u2014Noisy Student trained on JFT-300M\u2014showing a relatively small accuracy drop of only 14.5% on unusual poses. Nevertheless, a visual inspection of the failures of Noisy Student reveals a remaining gap in robustness with humans. Furthermore, combining multiple object transformations\u20143D-rotations and scaling\u2014further degrades the performance of all networks. Our results provide another measurement of the robustness of deep networks to consider when using them in the real world. Code and datasets are available at https://github.com/amro-kamal/ObjectPose.", + "primary_area": "computer vision i", + "author": "Amro Abbas; St\u00e9phane Deny", + "authorids": "", + "aff": "The African Institute For Mathematical Sciences; Aalto University", + "bibtex": "@article{Abbas_Deny_2023, title={Progress and Limitations of Deep Networks to Recognize Objects in Unusual Poses}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25087}, DOI={10.1609/aaai.v37i1.25087}, abstractNote={Deep networks should be robust to rare events if they are to be successfully deployed in high-stakes real-world applications. Here we study the capability of deep networks to recognize objects in unusual poses. We create a synthetic dataset of images of objects in unusual orientations, and evaluate the robustness of a collection of 38 recent and competitive deep networks for image classification. We show that classifying these images is still a challenge for all networks tested, with an average accuracy drop of 29.5% compared to when the objects are presented\nupright. This brittleness is largely unaffected by various design choices, such as training losses, architectures, dataset modalities, and data-augmentation schemes. However, networks trained on very large datasets substantially outperform others, with the best network tested\u2014Noisy Student trained on JFT-300M\u2014showing a relatively small accuracy drop of only 14.5% on unusual poses. Nevertheless, a visual inspection of the failures of Noisy Student reveals a remaining gap in robustness with humans. Furthermore, combining multiple object transformations\u20143D-rotations and scaling\u2014further degrades the performance of all networks. Our results provide another measurement of the robustness of deep networks to consider when using them in the real world. Code and datasets are available at https://github.com/amro-kamal/ObjectPose.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abbas, Amro and Deny, St\u00e9phane}, year={2023}, month={Jun.}, pages={160-168} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25087/24859", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25087", + "pdf_size": 11836329, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6215902837327386112&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "aimsammi.org;aalto.fi", + "email": "aimsammi.org;aalto.fi", + "github": "https://github.com/amro-kamal/ObjectPose", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "African Institute for Mathematical Sciences;Aalto University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.africamaths.org;https://www.aalto.fi", + "aff_unique_abbr": "AIMS;Aalto", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "South Africa;Finland" + }, + { + "id": "article-25487", + "title": "Progressive Bayesian Inference for Scribble-Supervised Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "The scribble-supervised semantic segmentation is an important yet challenging task in the field of computer vision. To deal with the pixel-wise sparse annotation problem, we propose a Progressive Bayesian Inference (PBI) framework to boost the performance of the scribble-supervised semantic segmentation, which can effectively infer the semantic distribution of these unlabeled pixels to guide the optimization of the segmentation network. The PBI dynamically improves the model learning from two aspects: the Bayesian inference module (i.e., semantic distribution learning) and the pixel-wise segmenter (i.e., model updating). Specifically, we effectively infer the semantic probability distribution of these unlabeled pixels with our designed Bayesian inference module, where its guidance is estimated through the Bayesian expectation maximization under the situation of partially observed data. The segmenter can be progressively improved under the joint guidance of the original scribble information and the learned semantic distribution. The segmenter optimization and semantic distribution promotion are encapsulated into a unified architecture where they could improve each other with mutual evolution in a progressive fashion. Comprehensive evaluations of several benchmark datasets demonstrate the effectiveness and superiority of our proposed PBI when compared with other state-of-the-art methods applied to the scribble-supervised semantic segmentation task.", + "primary_area": "computer vision iii", + "author": "Chuanwei Zhou; Chunyan Xu; Zhen Cui", + "authorids": "", + "aff": "PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Jiangsu Key Lab of Image and Video Understanding for Social Security, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Zhou_Xu_Cui_2023, title={Progressive Bayesian Inference for Scribble-Supervised Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25487}, DOI={10.1609/aaai.v37i3.25487}, abstractNote={The scribble-supervised semantic segmentation is an important yet challenging task in the field of computer vision. To deal with the pixel-wise sparse annotation problem, we propose a Progressive Bayesian Inference (PBI) framework to boost the performance of the scribble-supervised semantic segmentation, which can effectively infer the semantic distribution of these unlabeled pixels to guide the optimization of the segmentation network. The PBI dynamically improves the model learning from two aspects: the Bayesian inference module (i.e., semantic distribution learning) and the pixel-wise segmenter (i.e., model updating). Specifically, we effectively infer the semantic probability distribution of these unlabeled pixels with our designed Bayesian inference module, where its guidance is estimated through the Bayesian expectation maximization under the situation of partially observed data. The segmenter can be progressively improved under the joint guidance of the original scribble information and the learned semantic distribution. The segmenter optimization and semantic distribution promotion are encapsulated into a unified architecture where they could improve each other with mutual evolution in a progressive fashion. Comprehensive evaluations of several benchmark datasets demonstrate the effectiveness and superiority of our proposed PBI when compared with other state-of-the-art methods applied to the scribble-supervised semantic segmentation task.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Chuanwei and Xu, Chunyan and Cui, Zhen}, year={2023}, month={Jun.}, pages={3751-3759} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25487/25259", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25487", + "pdf_size": 442181, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2860011239495980756&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University of Science and Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.nust.edu.cn", + "aff_unique_abbr": "NJUST", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26254", + "title": "Progressive Deep Multi-View Comprehensive Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Multi-view Comprehensive Representation Learning (MCRL) aims to synthesize information from multiple views to learn comprehensive representations of data items. Prevalent deep MCRL methods typically concatenate synergistic view-specific representations or average aligned view-specific representations in the fusion stage. However, the performance of synergistic fusion methods inevitably degenerate or even fail when partial views are missing in real-world applications; the aligned based fusion methods usually cannot fully exploit the complementarity of multi-view data. To eliminate all these drawbacks, in this work we present a Progressive Deep Multi-view Fusion (PDMF) method. Considering the multi-view comprehensive representation should contain complete information and the view-specific data contain partial information, we deem that it is unstable to directly learn the mapping from partial information to complete information. Hence, PDMF employs a progressive learning strategy, which contains the pre-training and fine-tuning stages. In the pre-training stage, PDMF decodes the auxiliary comprehensive representation to the view-specific data. It also captures the consistency and complementarity by learning the relations between the dimensions of the auxiliary comprehensive representation and all views. In the fine-tuning stage, PDMF learns the mapping from the original data to the comprehensive representation with the help of the auxiliary comprehensive representation and relations. Experiments conducted on a synthetic toy dataset and 4 real-world datasets show that PDMF outperforms state-of-the-art baseline methods. The code is released at https://github.com/winterant/PDMF.", + "primary_area": "machine learning iv", + "author": "Cai Xu; Wei Zhao; Jinglong Zhao; Ziyu Guan; Yaming Yang; Long Chen; Xiangyu Song", + "authorids": "", + "aff": "School of Computer Science and Technology, Xidian University, China; School of Computer Science and Technology, Xidian University, China; School of Computer Science and Technology, Xidian University, China; School of Computer Science and Technology, Xidian University, China; School of Computer Science and Technology, Xidian University, China; Xi\u2019an University of Posts and Telecommunications, China; Swinburne University of Technology, Melbourne, Australia", + "bibtex": "@article{Xu_Zhao_Zhao_Guan_Yang_Chen_Song_2023, title={Progressive Deep Multi-View Comprehensive Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26254}, DOI={10.1609/aaai.v37i9.26254}, abstractNote={Multi-view Comprehensive Representation Learning (MCRL) aims to synthesize information from multiple views to learn comprehensive representations of data items. Prevalent deep MCRL methods typically concatenate synergistic view-specific representations or average aligned view-specific representations in the fusion stage. However, the performance of synergistic fusion methods inevitably degenerate or even fail when partial views are missing in real-world applications; the aligned based fusion methods usually cannot fully exploit the complementarity of multi-view data. To eliminate all these drawbacks, in this work we present a Progressive Deep Multi-view Fusion (PDMF) method. Considering the multi-view comprehensive representation should contain complete information and the view-specific data contain partial information, we deem that it is unstable to directly learn the mapping from partial information to complete information. Hence, PDMF employs a progressive learning strategy, which contains the pre-training and fine-tuning stages. In the pre-training stage, PDMF decodes the auxiliary comprehensive representation to the view-specific data. It also captures the consistency and complementarity by learning the relations between the dimensions of the auxiliary comprehensive representation and all views. In the fine-tuning stage, PDMF learns the mapping from the original data to the comprehensive representation with the help of the auxiliary comprehensive representation and relations. Experiments conducted on a synthetic toy dataset and 4 real-world datasets show that PDMF outperforms state-of-the-art baseline methods. The code is released at https://github.com/winterant/PDMF.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Cai and Zhao, Wei and Zhao, Jinglong and Guan, Ziyu and Yang, Yaming and Chen, Long and Song, Xiangyu}, year={2023}, month={Jun.}, pages={10557-10565} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26254/26026", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26254", + "pdf_size": 1603895, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1330535454287550567&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn;xidian.edu.cn;xupt.edu.cn;swin.edu.au", + "email": "xidian.edu.cn;mail.xidian.edu.cn;stu.xidian.edu.cn;xidian.edu.cn;xidian.edu.cn;xupt.edu.cn;swin.edu.au", + "github": "https://github.com/winterant/PDMF", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;2", + "aff_unique_norm": "Xidian University;Xi'an University of Posts and Telecommunications;Swinburne University of Technology", + "aff_unique_dep": "School of Computer Science and Technology;;", + "aff_unique_url": "http://www.xidian.edu.cn/;http://www.xupt.edu.cn;https://www.swinburne.edu.au", + "aff_unique_abbr": "Xidian;XUPT;SUT", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Xi'an;Melbourne", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25283", + "title": "Progressive Few-Shot Adaptation of Generative Model with Align-Free Spatial Correlation", + "track": "main", + "status": "Technical", + "abstract": "In few-shot generative model adaptation, the model for target domain is prone to the mode-collapse. Recent studies attempted to mitigate the problem by matching the relationship among samples generated from the same latent codes in source and target domains. The objective is further extended to image patch-level to transfer the spatial correlation within an instance. However, the patch-level approach assumes the consistency of spatial structure between source and target domains. For example, the positions of eyes in two domains are almost identical. Thus, it can bring visual artifacts if source and target domain images are not nicely aligned. In this paper, we propose a few-shot generative model adaptation method free from such assumption, based on a motivation that generative models are progressively adapting from the source domain to the target domain. Such progressive changes allow us to identify semantically coherent image regions between instances generated by models at a neighboring training iteration to consider the spatial correlation. We also propose an importance-based patch selection strategy to reduce the complexity of patch-level correlation matching. Our method shows the state-of-the-art few-shot domain adaptation performance in the qualitative and quantitative evaluations.", + "primary_area": "computer vision ii", + "author": "Jongbo Moon; Hyunjun Kim; Jae-Pil Heo", + "authorids": "", + "aff": "Sungkyunkwan University; Sungkyunkwan University; Sungkyunkwan University", + "bibtex": "@article{Moon_Kim_Heo_2023, title={Progressive Few-Shot Adaptation of Generative Model with Align-Free Spatial Correlation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25283}, DOI={10.1609/aaai.v37i2.25283}, abstractNote={In few-shot generative model adaptation, the model for target domain is prone to the mode-collapse. Recent studies attempted to mitigate the problem by matching the relationship among samples generated from the same latent codes in source and target domains. The objective is further extended to image patch-level to transfer the spatial correlation within an instance. However, the patch-level approach assumes the consistency of spatial structure between source and target domains. For example, the positions of eyes in two domains are almost identical. Thus, it can bring visual artifacts if source and target domain images are not nicely aligned. In this paper, we propose a few-shot generative model adaptation method free from such assumption, based on a motivation that generative models are progressively adapting from the source domain to the target domain. Such progressive changes allow us to identify semantically coherent image regions between instances generated by models at a neighboring training iteration to consider the spatial correlation. We also propose an importance-based patch selection strategy to reduce the complexity of patch-level correlation matching. Our method shows the state-of-the-art few-shot domain adaptation performance in the qualitative and quantitative evaluations.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Moon, Jongbo and Kim, Hyunjun and Heo, Jae-Pil}, year={2023}, month={Jun.}, pages={1923-1930} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25283/25055", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25283", + "pdf_size": 1067636, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18357521152540453728&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "skku.edu;skku.edu;skku.edu", + "email": "skku.edu;skku.edu;skku.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Sungkyunkwan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.skku.edu", + "aff_unique_abbr": "SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25144", + "title": "Progressive Multi-View Human Mesh Recovery with Self-Supervision", + "track": "main", + "status": "Technical", + "abstract": "To date, little attention has been given to multi-view 3D human mesh estimation, despite real-life applicability (e.g., motion capture, sport analysis) and robustness to single-view ambiguities. Existing solutions typically suffer from poor generalization performance to new settings, largely due to the limited diversity of image/3D-mesh pairs in multi-view training data. To address this shortcoming, people have explored the use of synthetic images. But besides the usual impact of visual gap between rendered and target data, synthetic-data-driven multi-view estimators also suffer from overfitting to the camera viewpoint distribution sampled during training which usually differs from real-world distributions. Tackling both challenges, we propose a novel simulation-based training pipeline for multi-view human mesh recovery, which (a) relies on intermediate 2D representations which are more robust to synthetic-to-real domain gap; (b) leverages learnable calibration and triangulation to adapt to more diversified camera setups; and (c) progressively aggregates multi-view information in a canonical 3D space to remove ambiguities in 2D representations. Through extensive benchmarking, we demonstrate the superiority of the proposed solution especially for unseen in-the-wild scenarios.", + "primary_area": "computer vision i", + "author": "Xuan Gong; Liangchen Song; Meng Zheng; Benjamin Planche; Terrence Chen; Junsong Yuan; David Doermann; Ziyan Wu", + "authorids": "", + "aff": "United Imaging Intelligence, Cambridge MA 02140 USA+University at Buffalo, Buffalo NY 14260 USA; United Imaging Intelligence, Cambridge MA 02140 USA+University at Buffalo, Buffalo NY 14260 USA; United Imaging Intelligence, Cambridge MA 02140 USA; United Imaging Intelligence, Cambridge MA 02140 USA; United Imaging Intelligence, Cambridge MA 02140 USA; University at Buffalo, Buffalo NY 14260 USA; University at Buffalo, Buffalo NY 14260 USA; United Imaging Intelligence, Cambridge MA 02140 USA", + "bibtex": "@article{Gong_Song_Zheng_Planche_Chen_Yuan_Doermann_Wu_2023, title={Progressive Multi-View Human Mesh Recovery with Self-Supervision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25144}, DOI={10.1609/aaai.v37i1.25144}, abstractNote={To date, little attention has been given to multi-view 3D human mesh estimation, despite real-life applicability (e.g., motion capture, sport analysis) and robustness to single-view ambiguities. Existing solutions typically suffer from poor generalization performance to new settings, largely due to the limited diversity of image/3D-mesh pairs in multi-view training data. To address this shortcoming, people have explored the use of synthetic images. But besides the usual impact of visual gap between rendered and target data, synthetic-data-driven multi-view estimators also suffer from overfitting to the camera viewpoint distribution sampled during training which usually differs from real-world distributions. Tackling both challenges, we propose a novel simulation-based training pipeline for multi-view human mesh recovery, which (a) relies on intermediate 2D representations which are more robust to synthetic-to-real domain gap; (b) leverages learnable calibration and triangulation to adapt to more diversified camera setups; and (c) progressively aggregates multi-view information in a canonical 3D space to remove ambiguities in 2D representations. Through extensive benchmarking, we demonstrate the superiority of the proposed solution especially for unseen in-the-wild scenarios.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gong, Xuan and Song, Liangchen and Zheng, Meng and Planche, Benjamin and Chen, Terrence and Yuan, Junsong and Doermann, David and Wu, Ziyan}, year={2023}, month={Jun.}, pages={676-684} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25144/24916", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25144", + "pdf_size": 1573257, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4995411685926340403&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "buffalo.edu;buffalo.edu;uii-ai.com;uii-ai.com;uii-ai.com;buffalo.edu;buffalo.edu;uii-ai.com", + "email": "buffalo.edu;buffalo.edu;uii-ai.com;uii-ai.com;uii-ai.com;buffalo.edu;buffalo.edu;uii-ai.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0;0;0;1;1;0", + "aff_unique_norm": "United Imaging Intelligence;University at Buffalo", + "aff_unique_dep": ";", + "aff_unique_url": ";https://www.buffalo.edu", + "aff_unique_abbr": ";UB", + "aff_campus_unique_index": "0+1;0+1;0;0;0;1;1;0", + "aff_campus_unique": "Cambridge;Buffalo", + "aff_country_unique_index": "0+0;0+0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25262", + "title": "Progressive Neighborhood Aggregation for Semantic Segmentation Refinement", + "track": "main", + "status": "Technical", + "abstract": "Multi-scale features from backbone networks have been widely applied to recover object details in segmentation tasks. Generally, the multi-level features are fused in a certain manner for further pixel-level dense prediction. Whereas, the spatial structure information is not fully explored, that is similar nearby pixels can be used to complement each other. In this paper, we investigate a progressive neighborhood aggregation (PNA) framework to refine the semantic segmentation prediction, resulting in an end-to-end solution that can perform the coarse prediction and refinement in a unified network. Specifically, we first present a neighborhood aggregation module, the neighborhood similarity matrices for each pixel are estimated on multi-scale features, which are further used to progressively aggregate the high-level feature for recovering the spatial structure. In addition, to further integrate the high-resolution details into the aggregated feature, we apply a self-aggregation module on the low-level features to emphasize important semantic information for complementing losing spatial details. Extensive experiments on five segmentation datasets, including Pascal VOC 2012, CityScapes, COCO-Stuff 10k, DeepGlobe, and Trans10k, demonstrate that the proposed framework can be cascaded into existing segmentation models providing consistent improvements. In particular, our method achieves new state-of-the-art performances on two challenging datasets, DeepGlobe and Trans10k. The code is available at https://github.com/liutinglt/PNA.", + "primary_area": "computer vision ii", + "author": "Ting Liu; Yunchao Wei; Yanning Zhang", + "authorids": "", + "aff": "Northwestern Polytechnical University, China; Beijing Jiaotong University, China; Northwestern Polytechnical University, China", + "bibtex": "@article{Liu_Wei_Zhang_2023, title={Progressive Neighborhood Aggregation for Semantic Segmentation Refinement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25262}, DOI={10.1609/aaai.v37i2.25262}, abstractNote={Multi-scale features from backbone networks have been widely applied to recover object details in segmentation tasks. Generally, the multi-level features are fused in a certain manner for further pixel-level dense prediction. Whereas, the spatial structure information is not fully explored, that is similar nearby pixels can be used to complement each other. In this paper, we investigate a progressive neighborhood aggregation (PNA) framework to refine the semantic segmentation prediction, resulting in an end-to-end solution that can perform the coarse prediction and refinement in a unified network. Specifically, we first present a neighborhood aggregation module, the neighborhood similarity matrices for each pixel are estimated on multi-scale features, which are further used to progressively aggregate the high-level feature for recovering the spatial structure. In addition, to further integrate the high-resolution details into the aggregated feature, we apply a self-aggregation module on the low-level features to emphasize important semantic information for complementing losing spatial details. Extensive experiments on five segmentation datasets, including Pascal VOC 2012, CityScapes, COCO-Stuff 10k, DeepGlobe, and Trans10k, demonstrate that the proposed framework can be cascaded into existing segmentation models providing consistent improvements. In particular, our method achieves new state-of-the-art performances on two challenging datasets, DeepGlobe and Trans10k. The code is available at https://github.com/liutinglt/PNA.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Ting and Wei, Yunchao and Zhang, Yanning}, year={2023}, month={Jun.}, pages={1737-1745} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25262/25034", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25262", + "pdf_size": 1997505, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4317839272726232095&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "nwpu.edu.cn;gmail.com;nwpu.edu.cn", + "email": "nwpu.edu.cn;gmail.com;nwpu.edu.cn", + "github": "https://github.com/liutinglt/PNA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Northwestern Polytechnical University;Beijing Jiaotong University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.nwpu.edu.cn;http://www.bjtu.edu.cn", + "aff_unique_abbr": "NWPU;BJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26495", + "title": "Prompt-Augmented Linear Probing: Scaling beyond the Limit of Few-Shot In-Context Learners", + "track": "main", + "status": "Technical", + "abstract": "Through in-context learning (ICL), large-scale language models are effective few-shot learners without additional model fine-tuning. However, the ICL performance does not scale well with the number of available training sample as it is limited by the inherent input length constraint of the underlying language model. Meanwhile, many studies have revealed that language models are also powerful feature extractors, allowing them to be utilized in a black-box manner and enabling the linear probing paradigm, where lightweight discriminators are trained on top of the pre-extracted input representations. This paper proposes prompt-augmented linear probing (PALP), a hybrid of linear probing and ICL, which leverages the best of both worlds. PALP inherits the scalability of linear probing and the capability of enforcing language models to derive more meaningful representations via tailoring input into a more conceivable form. Throughout in-depth investigations on various datasets, we verified that PALP significantly closes the gap between ICL in the data-hungry scenario and fine-tuning in the data-abundant scenario with little training overhead, potentially making PALP a strong alternative in a black-box scenario.", + "primary_area": "speech natural language processing", + "author": "Hyunsoo Cho; Hyuhng Joon Kim; Junyeob Kim; Sang-Woo Lee; Sang-goo Lee; Kang Min Yoo; Taeuk Kim", + "authorids": "", + "aff": "Seoul National University; Seoul National University; Seoul National University; NA VER Cloud+KAIST; Seoul National University; Seoul National University+NA VER Cloud; Hanyang University", + "bibtex": "@article{Cho_Kim_Kim_Lee_Lee_Yoo_Kim_2023, title={Prompt-Augmented Linear Probing: Scaling beyond the Limit of Few-Shot In-Context Learners}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26495}, DOI={10.1609/aaai.v37i11.26495}, abstractNote={Through in-context learning (ICL), large-scale language models are effective few-shot learners without additional model fine-tuning. However, the ICL performance does not scale well with the number of available training sample as it is limited by the inherent input length constraint of the underlying language model. Meanwhile, many studies have revealed that language models are also powerful feature extractors, allowing them to be utilized in a black-box manner and enabling the linear probing paradigm, where lightweight discriminators are trained on top of the pre-extracted input representations. This paper proposes prompt-augmented linear probing (PALP), a hybrid of linear probing and ICL, which leverages the best of both worlds. PALP inherits the scalability of linear probing and the capability of enforcing language models to derive more meaningful representations via tailoring input into a more conceivable form. Throughout in-depth investigations on various datasets, we verified that PALP significantly closes the gap between ICL in the data-hungry scenario and fine-tuning in the data-abundant scenario with little training overhead, potentially making PALP a strong alternative in a black-box scenario.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cho, Hyunsoo and Kim, Hyuhng Joon and Kim, Junyeob and Lee, Sang-Woo and Lee, Sang-goo and Yoo, Kang Min and Kim, Taeuk}, year={2023}, month={Jun.}, pages={12709-12718} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26495/26267", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26495", + "pdf_size": 3949970, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16002462555393912854&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "europa.snu.ac.kr;europa.snu.ac.kr;europa.snu.ac.kr;europa.snu.ac.kr;navercorp.com;navercorp.com;hanyang.ac.kr", + "email": "europa.snu.ac.kr;europa.snu.ac.kr;europa.snu.ac.kr;europa.snu.ac.kr;navercorp.com;navercorp.com;hanyang.ac.kr", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1+2;0;0+1;3", + "aff_unique_norm": "Seoul National University;NAVER Cloud;Korea Advanced Institute of Science and Technology;Hanyang University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.snu.ac.kr;https://www.naver.com;https://www.kaist.ac.kr;https://www.hanyang.ac.kr", + "aff_unique_abbr": "SNU;NAVER;KAIST;HYU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0;0+0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26585", + "title": "Prompting Neural Machine Translation with Translation Memories", + "track": "main", + "status": "Technical", + "abstract": "Improving machine translation (MT) systems with translation memories (TMs) is of great interest to practitioners in the MT community. However, previous approaches require either a significant update of the model architecture and/or additional training efforts to make the models well-behaved when TMs are taken as additional input. In this paper, we present a simple but effective method to introduce TMs into neural machine translation (NMT) systems. Specifically, we treat TMs as prompts to the NMT model at test time, but leave the training process unchanged. The result is a slight update of an existing NMT system, which can be implemented in a few hours by anyone who is familiar with NMT. Experimental results on several datasets demonstrate that our system significantly outperforms strong baselines.", + "primary_area": "speech natural language processing", + "author": "Abudurexiti Reheman; Tao Zhou; Yingfeng Luo; Di Yang; Tong Xiao; Jingbo Zhu", + "authorids": "", + "aff": "School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China; NiuTrans Research, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China; School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China", + "bibtex": "@article{Reheman_Zhou_Luo_Yang_Xiao_Zhu_2023, title={Prompting Neural Machine Translation with Translation Memories}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26585}, DOI={10.1609/aaai.v37i11.26585}, abstractNote={Improving machine translation (MT) systems with translation memories (TMs) is of great interest to practitioners in the MT community. However, previous approaches require either a significant update of the model architecture and/or additional training efforts to make the models well-behaved when TMs are taken as additional input. In this paper, we present a simple but effective method to introduce TMs into neural machine translation (NMT) systems. Specifically, we treat TMs as prompts to the NMT model at test time, but leave the training process unchanged. The result is a slight update of an existing NMT system, which can be implemented in a few hours by anyone who is familiar with NMT. Experimental results on several datasets demonstrate that our system significantly outperforms strong baselines.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Reheman, Abudurexiti and Zhou, Tao and Luo, Yingfeng and Yang, Di and Xiao, Tong and Zhu, Jingbo}, year={2023}, month={Jun.}, pages={13519-13527} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26585/26357", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26585", + "pdf_size": 235555, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4692203239317267736&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "outlook.com;outlook.com;163.com;niutrans.com;mail.neu.edu.cn;mail.neu.edu.cn", + "email": "outlook.com;outlook.com;163.com;niutrans.com;mail.neu.edu.cn;mail.neu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0+1;0+1", + "aff_unique_norm": "Northeastern University;NiuTrans Research", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.neu.edu.cn/;", + "aff_unique_abbr": "NEU;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenyang;", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25684", + "title": "Properties of Position Matrices and Their Elections", + "track": "main", + "status": "Technical", + "abstract": "We study the properties of elections that have a given position matrix (in such elections each candidate is ranked on each position by a number of voters specified in the matrix). We show that counting elections that generate a given position matrix is #P-complete. Consequently, sampling such elections uniformly at random seems challenging and we propose a simpler algorithm, without hard guarantees. Next, we consider the problem of testing if a given matrix can be implemented by an election with a certain structure (such as single-peakedness or group-separability). Finally, we consider the problem of checking if a given position matrix can be implemented by an election with a Condorcet winner. We complement our theoretical findings with experiments.", + "primary_area": "game theory and economic paradigms", + "author": "Niclas Boehmer; Jin-Yi Cai; Piotr Faliszewski; Austen Z. Fan; \u0141ukasz Janeczko; Andrzej Kaczmarczyk; Tomasz W\u0105s", + "authorids": "", + "aff": "Algorithmics and Computational Complexity, Technische Universit\u00e4t Berlin; University of Wisconsin-Madison; AGH University; University of Wisconsin-Madison; AGH University; AGH University; AGH University+ Pennsylvania State University", + "bibtex": "@article{Boehmer_Cai_Faliszewski_Fan_Janeczko_Kaczmarczyk_W\u0105s_2023, title={Properties of Position Matrices and Their Elections}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25684}, DOI={10.1609/aaai.v37i5.25684}, abstractNote={We study the properties of elections that have a given position matrix (in such elections each candidate is ranked on each position by a number of voters specified in the matrix). We show that counting elections that generate a given position matrix is #P-complete. Consequently, sampling such elections uniformly at random seems challenging and we propose a simpler algorithm, without hard guarantees. Next, we consider the problem of testing if a given matrix can be implemented by an election with a certain structure (such as single-peakedness or group-separability). Finally, we consider the problem of checking if a given position matrix can be implemented by an election with a Condorcet winner. We complement our theoretical findings with experiments.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Boehmer, Niclas and Cai, Jin-Yi and Faliszewski, Piotr and Fan, Austen Z. and Janeczko, \u0141ukasz and Kaczmarczyk, Andrzej and W\u0105s, Tomasz}, year={2023}, month={Jun.}, pages={5507-5514} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25684/25456", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25684", + "pdf_size": 3867486, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13207227908037565439&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "tu-berlin.de;cs.wisc.edu;agh.edu.pl;cs.wisc.edu;agh.edu.pl;agh.edu.pl;psu.edu", + "email": "tu-berlin.de;cs.wisc.edu;agh.edu.pl;cs.wisc.edu;agh.edu.pl;agh.edu.pl;psu.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;2;2;2+3", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;University of Wisconsin-Madison;AGH University of Science and Technology;Pennsylvania State University", + "aff_unique_dep": "Algorithmics and Computational Complexity;;;", + "aff_unique_url": "https://www.tu-berlin.de;https://www.wisc.edu;https://www.agh.edu.pl;https://www.psu.edu", + "aff_unique_abbr": "TU Berlin;UW-Madison;AGH;PSU", + "aff_campus_unique_index": "0;1;1;", + "aff_campus_unique": "Berlin;Madison;", + "aff_country_unique_index": "0;1;2;1;2;2;2+1", + "aff_country_unique": "Germany;United States;Poland" + }, + { + "id": "article-25710", + "title": "Proportional Decisions in Perpetual Voting", + "track": "main", + "status": "Technical", + "abstract": "Perpetual voting is a framework for long-term collective decision making. In this framework, we consider a sequence of subsequent approval-based elections and try to achieve a fair overall outcome. To achieve fairness over time, perpetual voting rules take the history of previous decisions into account and identify voters that were dissatisfied with previous decisions. In this paper, we look at perpetual voting rules from an axiomatic perspective. First, we define two classes of perpetual voting rules that are particularly easy to explain to voters and explore the bounds imposed by this simplicity. Second, we study proportionality in the perpetual setting and identify two rules with strong proportionality guarantees. However, both rules yield different guarantees and we prove them to be incompatible with each other.", + "primary_area": "game theory and economic paradigms", + "author": "Martin Lackner; Jan Maly", + "authorids": "", + "aff": "TU Wien, Vienna, Austria; ILLC, University of Amsterdam, Netherlands", + "bibtex": "@article{Lackner_Maly_2023, title={Proportional Decisions in Perpetual Voting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25710}, DOI={10.1609/aaai.v37i5.25710}, abstractNote={Perpetual voting is a framework for long-term collective decision making. In this framework, we consider a sequence of subsequent approval-based elections and try to achieve a fair overall outcome. To achieve fairness over time, perpetual voting rules take the history of previous decisions into account and identify voters that were dissatisfied with previous decisions. In this paper, we look at perpetual voting rules from an axiomatic perspective. First, we define two classes of perpetual voting rules that are particularly easy to explain to voters and explore the bounds imposed by this simplicity. Second, we study proportionality in the perpetual setting and identify two rules with strong proportionality guarantees. However, both rules yield different guarantees and we prove them to be incompatible with each other.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lackner, Martin and Maly, Jan}, year={2023}, month={Jun.}, pages={5722-5729} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25710/25482", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25710", + "pdf_size": 164129, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14928719502143299325&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "dbai.tuwien.ac.at;uva.nl", + "email": "dbai.tuwien.ac.at;uva.nl", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Technical University of Vienna;University of Amsterdam", + "aff_unique_dep": ";ILLC", + "aff_unique_url": "https://www.tuwien.ac.at;https://www.uva.nl", + "aff_unique_abbr": "TU Wien;UvA", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Vienna;Amsterdam", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Austria;Netherlands" + }, + { + "id": "article-25686", + "title": "Proportionality in Approval-Based Participatory Budgeting", + "track": "main", + "status": "Technical", + "abstract": "The ability to measure the satisfaction of (groups of) voters is a crucial prerequisite for formulating proportionality axioms in approval-based participatory budgeting elections. Two common -- but very different -- ways to measure the satisfaction of a voter consider (i) the number of approved projects and (ii) the total cost of approved projects, respectively. In general, it is difficult to decide which measure of satisfaction best reflects the voters' true utilities. In this paper, we study proportionality axioms with respect to large classes of approval-based satisfaction functions. We establish logical implications among our axioms and related notions from the literature, and we ask whether outcomes can be achieved that are proportional with respect to more than one satisfaction function. We show that this is impossible for the two commonly used satisfaction functions when considering proportionality notions based on extended justified representation, but achievable for a notion based on proportional justified representation. For the latter result, we introduce a strengthening of priceability and show that it is satisfied by several polynomial-time computable rules, including the Method of Equal Shares and Phragm\u00e9n's sequential rule.", + "primary_area": "game theory and economic paradigms", + "author": "Markus Brill; Stefan Forster; Martin Lackner; Jan Maly; Jannik Peters", + "authorids": "", + "aff": "TU Berlin, Berlin, Germany + University of Warwick, Coventry, UK; TU Wien, Vienna, Austria; TU Wien, Vienna, Austria; ILLC, University of Amsterdam, Amsterdam, Netherlands; TU Berlin, Berlin, Germany", + "bibtex": "@article{Brill_Forster_Lackner_Maly_Peters_2023, title={Proportionality in Approval-Based Participatory Budgeting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25686}, DOI={10.1609/aaai.v37i5.25686}, abstractNote={The ability to measure the satisfaction of (groups of) voters is a crucial prerequisite for formulating proportionality axioms in approval-based participatory budgeting elections. Two common -- but very different -- ways to measure the satisfaction of a voter consider (i) the number of approved projects and (ii) the total cost of approved projects, respectively. In general, it is difficult to decide which measure of satisfaction best reflects the voters\u2019 true utilities. In this paper, we study proportionality axioms with respect to large classes of approval-based satisfaction functions. We establish logical implications among our axioms and related notions from the literature, and we ask whether outcomes can be achieved that are proportional with respect to more than one satisfaction function. We show that this is impossible for the two commonly used satisfaction functions when considering proportionality notions based on extended justified representation, but achievable for a notion based on proportional justified representation. For the latter result, we introduce a strengthening of priceability and show that it is satisfied by several polynomial-time computable rules, including the Method of Equal Shares and Phragm\u00e9n\u2019s sequential rule.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brill, Markus and Forster, Stefan and Lackner, Martin and Maly, Jan and Peters, Jannik}, year={2023}, month={Jun.}, pages={5524-5531} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25686/25458", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25686", + "pdf_size": 152628, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8144151661821717040&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff_domain": "warwick.ac.uk;tuwien.ac.at;dbai.tuwien.ac.at;uva.nl;tu-berlin.de", + "email": "warwick.ac.uk;tuwien.ac.at;dbai.tuwien.ac.at;uva.nl;tu-berlin.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;2;3;0", + "aff_unique_norm": "Technical University of Berlin;University of Warwick;Technical University of Vienna;University of Amsterdam", + "aff_unique_dep": ";;;ILLC", + "aff_unique_url": "https://www.tu-berlin.de;https://www.warwick.ac.uk;https://www.tuwien.ac.at;https://www.uva.nl", + "aff_unique_abbr": "TU Berlin;Warwick;TU Wien;UvA", + "aff_campus_unique_index": "0+1;2;2;3;0", + "aff_campus_unique": "Berlin;Coventry;Vienna;Amsterdam", + "aff_country_unique_index": "0+1;2;2;3;0", + "aff_country_unique": "Germany;United Kingdom;Austria;Netherlands" + }, + { + "id": "article-26524", + "title": "Prototypical Fine-Tuning: Towards Robust Performance under Varying Data Sizes", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we move towards combining large parametric models with non-parametric prototypical networks. We propose prototypical fine-tuning, a novel prototypical framework for fine-tuning pretrained language models (LM), which automatically learns a bias to improve predictive performance for varying data sizes, especially low-resource settings. Our prototypical fine-tuning approach can automatically adjust the model capacity according to the number of data points and the model's inherent attributes. Moreover, we propose four principles for effective prototype fine-tuning towards the optimal solution. Experimental results across various datasets show that our work achieves significant performance improvements under various low-resource settings, as well as comparable and usually better performances in high-resource scenarios.", + "primary_area": "speech natural language processing", + "author": "Yiqiao Jin; Xiting Wang; Yaru Hao; Yizhou Sun; Xing Xie", + "authorids": "", + "aff": "Georgia Institute of Technology; Microsoft Research Asia; Microsoft Research Asia; University of California, Los Angeles; Microsoft Research Asia", + "bibtex": "@article{Jin_Wang_Hao_Sun_Xie_2023, title={Prototypical Fine-Tuning: Towards Robust Performance under Varying Data Sizes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26524}, DOI={10.1609/aaai.v37i11.26524}, abstractNote={In this paper, we move towards combining large parametric models with non-parametric prototypical networks. We propose prototypical fine-tuning, a novel prototypical framework for fine-tuning pretrained language models (LM), which automatically learns a bias to improve predictive performance for varying data sizes, especially low-resource settings. Our prototypical fine-tuning approach can automatically adjust the model capacity according to the number of data points and the model\u2019s inherent attributes. Moreover, we propose four principles for effective prototype fine-tuning towards the optimal solution. Experimental results across various datasets show that our work achieves significant performance improvements under various low-resource settings, as well as comparable and usually better performances in high-resource scenarios.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Yiqiao and Wang, Xiting and Hao, Yaru and Sun, Yizhou and Xie, Xing}, year={2023}, month={Jun.}, pages={12968-12976} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26524/26296", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26524", + "pdf_size": 306722, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13054663492164664660&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gatech.edu;microsoft.com;microsoft.com;cs.ucla.edu;microsoft.com", + "email": "gatech.edu;microsoft.com;microsoft.com;cs.ucla.edu;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;1", + "aff_unique_norm": "Georgia Institute of Technology;Microsoft Research;University of California, Los Angeles", + "aff_unique_dep": ";Research;", + "aff_unique_url": "https://www.gatech.edu;https://www.microsoft.com/en-us/research/group/asia;https://www.ucla.edu", + "aff_unique_abbr": "Georgia Tech;MSR Asia;UCLA", + "aff_campus_unique_index": "1;1;2;1", + "aff_campus_unique": ";Asia;Los Angeles", + "aff_country_unique_index": "0;1;1;0;1", + "aff_country_unique": "United States;China" + }, + { + "id": "article-26287", + "title": "Prototypical Partial Optimal Transport for Universal Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Universal domain adaptation (UniDA) aims to transfer knowledge from a labeled source domain to an unlabeled target domain without requiring the same label sets of both domains. The existence of domain and category shift makes the task challenging and requires us to distinguish \u201cknown\u201d samples (i.e., samples whose labels exist in both domains) and \u201cunknown\u201d samples (i.e., samples whose labels exist in only one domain) in both domains before reducing the domain gap. In this paper, we consider the problem from the point of view of distribution matching which we only need to align two distributions partially. A novel approach, dubbed mini-batch Prototypical Partial Optimal Transport (m-PPOT), is proposed to conduct partial distribution alignment for UniDA. In training phase, besides minimizing m-PPOT, we also leverage the transport plan of m-PPOT to reweight source prototypes and target samples, and design reweighted entropy loss and reweighted cross-entropy loss to distinguish \u201cknown\u201d and \u201cunknown\u201d samples. Experiments on four benchmarks show that our method outperforms the previous state-of-the-art UniDA methods.", + "primary_area": "machine learning iv", + "author": "Yucheng Yang; Xiang Gu; Jian Sun", + "authorids": "", + "aff": "School of Mathematics and Statistics, Xi\u2019an Jiaotong University, Xi\u2019an, China; School of Mathematics and Statistics, Xi\u2019an Jiaotong University, Xi\u2019an, China; School of Mathematics and Statistics, Xi\u2019an Jiaotong University, Xi\u2019an, China", + "bibtex": "@article{Yang_Gu_Sun_2023, title={Prototypical Partial Optimal Transport for Universal Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26287}, DOI={10.1609/aaai.v37i9.26287}, abstractNote={Universal domain adaptation (UniDA) aims to transfer knowledge from a labeled source domain to an unlabeled target domain without requiring the same label sets of both domains. The existence of domain and category shift makes the task challenging and requires us to distinguish \u201cknown\u201d samples (i.e., samples whose labels exist in both domains) and \u201cunknown\u201d samples (i.e., samples whose labels exist in only one domain) in both domains before reducing the domain gap. In this paper, we consider the problem from the point of view of distribution matching which we only need to align two distributions partially. A novel approach, dubbed mini-batch Prototypical Partial Optimal Transport (m-PPOT), is proposed to conduct partial distribution alignment for UniDA. In training phase, besides minimizing m-PPOT, we also leverage the transport plan of m-PPOT to reweight source prototypes and target samples, and design reweighted entropy loss and reweighted cross-entropy loss to distinguish \u201cknown\u201d and \u201cunknown\u201d samples. Experiments on four benchmarks show that our method outperforms the previous state-of-the-art UniDA methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Yucheng and Gu, Xiang and Sun, Jian}, year={2023}, month={Jun.}, pages={10852-10860} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26287/26059", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26287", + "pdf_size": 4678757, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=111940832254005130&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;stu.xjtu.edu.cn;xjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Xi'an Jiaotong University", + "aff_unique_dep": "School of Mathematics and Statistics", + "aff_unique_url": "http://en.xjtu.edu.cn/", + "aff_unique_abbr": "XJTU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27083", + "title": "Prototyping Logic-Based AI Services with LogicUS", + "track": "demonstrations", + "status": "Technical", + "abstract": "Currently, there is renewed interest in logic-related solutions for AI and Computer Science. The availability of software tools to support the realization of such studies (both as powerful and versatile prototyping tools and as teaching tools) has become a necessity. Intending to contribute to this field, we present a tool that allows the unification of different logic tasks, focused on Computer Logic but adaptable to the treatment in several subfields, contexts, and abstraction levels (LogicUS-LIB, LogicUS-NB, LogicUS-GUI). \n\nThe tool provides a sound framework for two activity fields. On the one hand, in the topic of logic-based systems research, prototyping is facilitated in a relatively fast, simple, and highly adaptable way. On the other hand, in Education, by allowing the student to abstract from low-level execution of algorithms whilst preserving the conceptual structures and procedural methodologies underlying the logical foundations.", + "primary_area": "", + "author": "V\u00edctor Ramos-Gonz\u00e1lez; Joaqu\u00edn Borrego-D\u00edaz; Fernando Sancho-Caparrini", + "authorids": "", + "aff": "Department of Computer Science and Artificial Intelligence \u2013 University of Seville, Seville, Spain; Department of Computer Science and Artificial Intelligence \u2013 University of Seville, Seville, Spain; Department of Computer Science and Artificial Intelligence \u2013 University of Seville, Seville, Spain", + "bibtex": "@article{Ramos-Gonz\u00e1lez_Borrego-D\u00edaz_Sancho-Caparrini_2024, title={Prototyping Logic-Based AI Services with LogicUS}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27083}, DOI={10.1609/aaai.v37i13.27083}, abstractNote={Currently, there is renewed interest in logic-related solutions for AI and Computer Science. The availability of software tools to support the realization of such studies (both as powerful and versatile prototyping tools and as teaching tools) has become a necessity. Intending to contribute to this field, we present a tool that allows the unification of different logic tasks, focused on Computer Logic but adaptable to the treatment in several subfields, contexts, and abstraction levels (LogicUS-LIB, LogicUS-NB, LogicUS-GUI). The tool provides a sound framework for two activity fields. On the one hand, in the topic of logic-based systems research, prototyping is facilitated in a relatively fast, simple, and highly adaptable way. On the other hand, in Education, by allowing the student to abstract from low-level execution of algorithms whilst preserving the conceptual structures and procedural methodologies underlying the logical foundations.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ramos-Gonz\u00e1lez, V\u00edctor and Borrego-D\u00edaz, Joaqu\u00edn and Sancho-Caparrini, Fernando}, year={2024}, month={Jul.}, pages={16473-16475} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27083/26855", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27083", + "pdf_size": 4877719, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:EZiY_OTLVbMJ:scholar.google.com/&scioq=Prototyping+Logic-Based+AI+Services+with+LogicUS&hl=en&as_sdt=0,11", + "gs_version_total": 2, + "aff_domain": "us.es;us.es;us.es", + "email": "us.es;us.es;us.es", + "github": "https://logicus-es.github.io", + "project": "https://www.issac-conference.org/2022/software.php", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Seville", + "aff_unique_dep": "Department of Computer Science and Artificial Intelligence", + "aff_unique_url": "https://www.us.seville.es", + "aff_unique_abbr": "US", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seville", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "article-26144", + "title": "Provable Detection of Propagating Sampling Bias in Prediction Models", + "track": "main", + "status": "Technical", + "abstract": "With an increased focus on incorporating fairness in machine learning models, it becomes imperative not only to assess and mitigate bias at each stage of the machine learning pipeline but also to understand the downstream impacts of bias across stages. Here we consider a general, but realistic, scenario in which a predictive model is learned from (potentially biased) training data, and model predictions are assessed post-hoc for fairness by some auditing method. We provide a theoretical analysis of how a specific form of data bias, differential sampling bias, propagates from the data stage to the prediction stage. Unlike prior work, we evaluate the downstream impacts of data biases quantitatively rather than qualitatively and prove theoretical guarantees for detection. Under reasonable assumptions, we quantify how the amount of bias in the model predictions varies as a function of the amount of differential sampling bias in the data, and at what point this bias becomes provably detectable by the auditor. Through experiments on two criminal justice datasets-- the well-known COMPAS dataset and historical data from NYPD's stop and frisk policy-- we demonstrate that the theoretical results hold in practice even when our assumptions are relaxed.", + "primary_area": "machine learning iii", + "author": "Pavan Ravishankar; Qingyu Mo; Edward McFowland III; Daniel B. Neill", + "authorids": "", + "aff": "Machine Learning for Good Laboratory, New York University; Machine Learning for Good Laboratory, New York University; Harvard Business School; Machine Learning for Good Laboratory, New York University", + "bibtex": "@article{Ravishankar_Mo_McFowland III_Neill_2023, title={Provable Detection of Propagating Sampling Bias in Prediction Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26144}, DOI={10.1609/aaai.v37i8.26144}, abstractNote={With an increased focus on incorporating fairness in machine learning models, it becomes imperative not only to assess and mitigate bias at each stage of the machine learning pipeline but also to understand the downstream impacts of bias across stages. Here we consider a general, but realistic, scenario in which a predictive model is learned from (potentially biased) training data, and model predictions are assessed post-hoc for fairness by some auditing method. We provide a theoretical analysis of how a specific form of data bias, differential sampling bias, propagates from the data stage to the prediction stage. Unlike prior work, we evaluate the downstream impacts of data biases quantitatively rather than qualitatively and prove theoretical guarantees for detection. Under reasonable assumptions, we quantify how the amount of bias in the model predictions varies as a function of the amount of differential sampling bias in the data, and at what point this bias becomes provably detectable by the auditor. Through experiments on two criminal justice datasets-- the well-known COMPAS dataset and historical data from NYPD\u2019s stop and frisk policy-- we demonstrate that the theoretical results hold in practice even when our assumptions are relaxed.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ravishankar, Pavan and Mo, Qingyu and McFowland III, Edward and Neill, Daniel B.}, year={2023}, month={Jun.}, pages={9562-9569} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26144/25916", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26144", + "pdf_size": 320813, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17969115167013783975&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "nyu.edu;nyu.edu;hbs.edu;nyu.edu", + "email": "nyu.edu;nyu.edu;hbs.edu;nyu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "New York University;Harvard University", + "aff_unique_dep": "Machine Learning for Good Laboratory;Harvard Business School", + "aff_unique_url": "https://www.nyu.edu;https://www.hbs.edu", + "aff_unique_abbr": "NYU;HBS", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "New York;Cambridge", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26047", + "title": "Provable Pathways: Learning Multiple Tasks over Multiple Paths", + "track": "main", + "status": "Technical", + "abstract": "Constructing useful representations across a large number of tasks is a key requirement for sample-efficient intelligent systems. A traditional idea in multitask learning (MTL) is building a shared representation across tasks which can then be adapted to new tasks by tuning last layers. A desirable refinement of using a shared one-fits-all representation is to construct task-specific representations. To this end, recent PathNet/muNet architectures represent individual tasks as pathways within a larger supernet. The subnetworks induced by pathways can be viewed as task-specific representations that are composition of modules within supernet's computation graph. This work explores the pathways proposal from the lens of statistical learning: We first develop novel generalization bounds for empirical risk minimization problems learning multiple tasks over multiple paths (Multipath MTL). In conjunction, we formalize the benefits of resulting multipath representation when adapting to new downstream tasks. Our bounds are expressed in terms of Gaussian complexity, lead to tangible guarantees for the class of linear representations, and provide novel insights into the quality and benefits of a multipath representation. When computation graph is a tree, Multipath MTL hierarchically clusters the tasks and builds cluster-specific representations. We provide further discussion and experiments for hierarchical MTL and rigorously identify the conditions under which Multipath MTL is provably superior to traditional MTL approaches with shallow supernets.", + "primary_area": "machine learning ii", + "author": "Yingcong Li; Samet Oymak", + "authorids": "", + "aff": "University of California, Riverside; University of California, Riverside + University of Michigan, Ann Arbor", + "bibtex": "@article{Li_Oymak_2023, title={Provable Pathways: Learning Multiple Tasks over Multiple Paths}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26047}, DOI={10.1609/aaai.v37i7.26047}, abstractNote={Constructing useful representations across a large number of tasks is a key requirement for sample-efficient intelligent systems. A traditional idea in multitask learning (MTL) is building a shared representation across tasks which can then be adapted to new tasks by tuning last layers. A desirable refinement of using a shared one-fits-all representation is to construct task-specific representations. To this end, recent PathNet/muNet architectures represent individual tasks as pathways within a larger supernet. The subnetworks induced by pathways can be viewed as task-specific representations that are composition of modules within supernet\u2019s computation graph. This work explores the pathways proposal from the lens of statistical learning: We first develop novel generalization bounds for empirical risk minimization problems learning multiple tasks over multiple paths (Multipath MTL). In conjunction, we formalize the benefits of resulting multipath representation when adapting to new downstream tasks. Our bounds are expressed in terms of Gaussian complexity, lead to tangible guarantees for the class of linear representations, and provide novel insights into the quality and benefits of a multipath representation. When computation graph is a tree, Multipath MTL hierarchically clusters the tasks and builds cluster-specific representations. We provide further discussion and experiments for hierarchical MTL and rigorously identify the conditions under which Multipath MTL is provably superior to traditional MTL approaches with shallow supernets.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yingcong and Oymak, Samet}, year={2023}, month={Jun.}, pages={8701-8710} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26047/25819", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26047", + "pdf_size": 238559, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5591124686435479695&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ucr.edu;ece.ucr.edu", + "email": "ucr.edu;ece.ucr.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "University of California, Riverside;University of Michigan", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucr.edu;https://www.umich.edu", + "aff_unique_abbr": "UCR;UM", + "aff_campus_unique_index": "0;0+1", + "aff_campus_unique": "Riverside;Ann Arbor", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26109", + "title": "Provably Efficient Causal Model-Based Reinforcement Learning for Systematic Generalization", + "track": "main", + "status": "Technical", + "abstract": "In the sequential decision making setting, an agent aims to achieve systematic generalization over a large, possibly infinite, set of environments. Such environments are modeled as discrete Markov decision processes with both states and actions represented through a feature vector. The underlying structure of the environments allows the transition dynamics to be factored into two components: one that is environment-specific and another that is shared. Consider a set of environments that share the laws of motion as an example. In this setting, the agent can take a finite amount of reward-free interactions from a subset of these environments. The agent then must be able to approximately solve any planning task defined over any environment in the original set, relying on the above interactions only. Can we design a provably efficient algorithm that achieves this ambitious goal of systematic generalization? In this paper, we give a partially positive answer to this question. First, we provide a tractable formulation of systematic generalization by employing a causal viewpoint. Then, under specific structural assumptions, we provide a simple learning algorithm that guarantees any desired planning error up to an unavoidable sub-optimality term, while showcasing a polynomial sample complexity.", + "primary_area": "machine learning iii", + "author": "Mirco Mutti; Riccardo De Santi; Emanuele Rossi; Juan Felipe Calderon; Michael Bronstein; Marcello Restelli", + "authorids": "", + "aff": "Politecnico di Milano+Universit `a di Bologna; ETH Zurich; Imperial College London+Twitter; Politecnico di Milano; Twitter+University of Oxford; Politecnico di Milano", + "bibtex": "@article{Mutti_De Santi_Rossi_Calderon_Bronstein_Restelli_2023, title={Provably Efficient Causal Model-Based Reinforcement Learning for Systematic Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26109}, DOI={10.1609/aaai.v37i8.26109}, abstractNote={In the sequential decision making setting, an agent aims to achieve systematic generalization over a large, possibly infinite, set of environments. Such environments are modeled as discrete Markov decision processes with both states and actions represented through a feature vector. The underlying structure of the environments allows the transition dynamics to be factored into two components: one that is environment-specific and another that is shared. Consider a set of environments that share the laws of motion as an example. In this setting, the agent can take a finite amount of reward-free interactions from a subset of these environments. The agent then must be able to approximately solve any planning task defined over any environment in the original set, relying on the above interactions only. Can we design a provably efficient algorithm that achieves this ambitious goal of systematic generalization? In this paper, we give a partially positive answer to this question. First, we provide a tractable formulation of systematic generalization by employing a causal viewpoint. Then, under specific structural assumptions, we provide a simple learning algorithm that guarantees any desired planning error up to an unavoidable sub-optimality term, while showcasing a polynomial sample complexity.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mutti, Mirco and De Santi, Riccardo and Rossi, Emanuele and Calderon, Juan Felipe and Bronstein, Michael and Restelli, Marcello}, year={2023}, month={Jun.}, pages={9251-9259} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26109/25881", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26109", + "pdf_size": 622685, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10524036539578131823&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "polimi.it;ethz.ch; ; ; ;", + "email": "polimi.it;ethz.ch; ; ; ;", + "github": "", + "project": "https://arxiv.org/abs/2202.06545", + "author_num": 6, + "aff_unique_index": "0+1;2;3+4;0;4+5;0", + "aff_unique_norm": "Politecnico di Milano;University of Bologna;ETH Zurich;Imperial College London;Twitter, Inc.;University of Oxford", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.polimi.it;https://www.unibo.it;https://www.ethz.ch;https://www.imperial.ac.uk;https://twitter.com;https://www.ox.ac.uk", + "aff_unique_abbr": "Polimi;Unibo;ETHZ;ICL;Twitter;Oxford", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;2+3;0;3+2;0", + "aff_country_unique": "Italy;Switzerland;United Kingdom;United States" + }, + { + "id": "article-25900", + "title": "Provably Efficient Primal-Dual Reinforcement Learning for CMDPs with Non-stationary Objectives and Constraints", + "track": "main", + "status": "Technical", + "abstract": "We consider primal-dual-based reinforcement learning (RL) in episodic constrained Markov decision processes (CMDPs) with non-stationary objectives and constraints, which plays a central role in ensuring the safety of RL in time-varying environments. In this problem, the reward/utility functions and the state transition functions are both allowed to vary arbitrarily over time as long as their cumulative variations do not exceed certain known variation budgets. Designing safe RL algorithms in time-varying environments is particularly challenging because of the need to integrate the constraint violation reduction, safe exploration, and adaptation to the non-stationarity. To this end, we identify two alternative conditions on the time-varying constraints under which we can guarantee the safety in the long run. We also propose the Periodically Restarted Optimistic Primal-Dual Proximal Policy Optimization (PROPD-PPO) algorithm that can coordinate with both two conditions. Furthermore, a dynamic regret bound and a constraint violation bound are established for the proposed algorithm in both the linear kernel CMDP function approximation setting and the tabular CMDP setting under two alternative conditions. This paper provides the first provably efficient algorithm for non-stationary CMDPs with safe exploration.", + "primary_area": "machine learning i", + "author": "Yuhao Ding; Javad Lavaei", + "authorids": "", + "aff": "UC Berkeley, Department of Industrial Engineering and Operations Research; UC Berkeley, Department of Industrial Engineering and Operations Research", + "bibtex": "@article{Ding_Lavaei_2023, title={Provably Efficient Primal-Dual Reinforcement Learning for CMDPs with Non-stationary Objectives and Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25900}, DOI={10.1609/aaai.v37i6.25900}, abstractNote={We consider primal-dual-based reinforcement learning (RL) in episodic constrained Markov decision processes (CMDPs) with non-stationary objectives and constraints, which plays a central role in ensuring the safety of RL in time-varying environments. In this problem, the reward/utility functions and the state transition functions are both allowed to vary arbitrarily over time as long as their cumulative variations do not exceed certain known variation budgets. Designing safe RL algorithms in time-varying environments is particularly challenging because of the need to integrate the constraint violation reduction, safe exploration, and adaptation to the non-stationarity. To this end, we identify two alternative conditions on the time-varying constraints under which we can guarantee the safety in the long run. We also propose the Periodically Restarted Optimistic Primal-Dual Proximal Policy Optimization (PROPD-PPO) algorithm that can coordinate with both two conditions. Furthermore, a dynamic regret bound and a constraint violation bound are established for the proposed algorithm in both the linear kernel CMDP function approximation setting and the tabular CMDP setting under two alternative conditions. This paper provides the first provably efficient algorithm for non-stationary CMDPs with safe exploration.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Yuhao and Lavaei, Javad}, year={2023}, month={Jun.}, pages={7396-7404} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25900/25672", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25900", + "pdf_size": 172682, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17915968989654764654&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "berkeley.edu;berkeley.edu", + "email": "berkeley.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Berkeley", + "aff_unique_dep": "Department of Industrial Engineering and Operations Research", + "aff_unique_url": "https://www.berkeley.edu", + "aff_unique_abbr": "UC Berkeley", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26087", + "title": "Proximal Stochastic Recursive Momentum Methods for Nonconvex Composite Decentralized Optimization", + "track": "main", + "status": "Technical", + "abstract": "Consider a network of N decentralized computing agents collaboratively solving a nonconvex stochastic composite problem. In this work, we propose a single-loop algorithm, called DEEPSTORM, that achieves optimal sample complexity for this setting. Unlike double-loop algorithms that require a large batch size to compute the (stochastic) gradient once in a while, DEEPSTORM uses a small batch size, creating advantages in occasions such as streaming data and online learning. This is the first method achieving optimal sample complexity for decentralized nonconvex stochastic composite problems, requiring O(1) batch size. We conduct convergence analysis for DEEPSTORM with both constant and diminishing step sizes. Additionally, under proper initialization and a small enough desired solution error, we show that DEEPSTORM with a constant step size achieves a network-independent sample complexity, with an additional linear speed-up with respect to N over centralized methods. All codes are made available at https://github.com/gmancino/DEEPSTORM.", + "primary_area": "machine learning ii", + "author": "Gabriel Mancino-Ball; Shengnan Miao; Yangyang Xu; Jie Chen", + "authorids": "", + "aff": "Department of Mathematical Sciences, Rensselaer Polytechnic Institute, Troy, NY 12180; Department of Mathematical Sciences, Rensselaer Polytechnic Institute, Troy, NY 12180; Department of Mathematical Sciences, Rensselaer Polytechnic Institute, Troy, NY 12180; MIT IBM-Watson AI Lab, IBM Research, Cambridge, MA 02142", + "bibtex": "@article{Mancino-Ball_Miao_Xu_Chen_2023, title={Proximal Stochastic Recursive Momentum Methods for Nonconvex Composite Decentralized Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26087}, DOI={10.1609/aaai.v37i7.26087}, abstractNote={Consider a network of N decentralized computing agents collaboratively solving a nonconvex stochastic composite problem. In this work, we propose a single-loop algorithm, called DEEPSTORM, that achieves optimal sample complexity for this setting. Unlike double-loop algorithms that require a large batch size to compute the (stochastic) gradient once in a while, DEEPSTORM uses a small batch size, creating advantages in occasions such as streaming data and online learning. This is the first method achieving optimal sample complexity for decentralized nonconvex stochastic composite problems, requiring O(1) batch size. We conduct convergence analysis for DEEPSTORM with both constant and diminishing step sizes. Additionally, under proper initialization and a small enough desired solution error, we show that DEEPSTORM with a constant step size achieves a network-independent sample complexity, with an additional linear speed-up with respect to N over centralized methods. All codes are made available at https://github.com/gmancino/DEEPSTORM.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mancino-Ball, Gabriel and Miao, Shengnan and Xu, Yangyang and Chen, Jie}, year={2023}, month={Jun.}, pages={9055-9063} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26087/25859", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26087", + "pdf_size": 905981, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3242799088103530497&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "rpi.edu;gmail.com;rpi.edu;us.ibm.com", + "email": "rpi.edu;gmail.com;rpi.edu;us.ibm.com", + "github": "https://github.com/gmancino/DEEPSTORM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Rensselaer Polytechnic Institute;Massachusetts Institute of Technology", + "aff_unique_dep": "Department of Mathematical Sciences;IBM-Watson AI Lab", + "aff_unique_url": "https://www.rpi.edu;https://www.mit.edu", + "aff_unique_abbr": "RPI;MIT", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Troy;Cambridge", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26169", + "title": "ProxyBO: Accelerating Neural Architecture Search via Bayesian Optimization with Zero-Cost Proxies", + "track": "main", + "status": "Technical", + "abstract": "Designing neural architectures requires immense manual efforts. This has promoted the development of neural architecture search (NAS) to automate the design. While previous NAS methods achieve promising results but run slowly, zero-cost proxies run extremely fast but are less promising. Therefore, it\u2019s of great potential to accelerate NAS via those zero-cost proxies. The existing method has two limitations, which are unforeseeable reliability and one-shot usage. To address the limitations, we present ProxyBO, an efficient Bayesian optimization (BO) framework that utilizes the zero-cost proxies to accelerate neural architecture search. We apply the generalization ability measurement to estimate the fitness of proxies on the task during each iteration and design a novel acquisition function to combine BO with zero-cost proxies based on their dynamic influence. Extensive empirical studies show that ProxyBO consistently outperforms competitive baselines on five tasks from three public benchmarks. Concretely, ProxyBO achieves up to 5.41\u00d7 and 3.86\u00d7 speedups over the state-of-the-art approaches REA and BRP-NAS.", + "primary_area": "machine learning iii", + "author": "Yu Shen; Yang Li; Jian Zheng; Wentao Zhang; Peng Yao; Jixiang Li; Sen Yang; Ji Liu; Bin Cui", + "authorids": "", + "aff": "Key Lab of High Confidence Software Technologies, Peking University, China+Kuaishou Technology, China; Data Platform, TEG, Tencent Inc., China; School of Computer Science and Engineering, Beihang University, China; Mila - Qu\u00e9bec AI Institute+HEC, Montr \u0301eal, Canada; Kuaishou Technology, China; Kuaishou Technology, China; Kuaishou Technology, China; Kuaishou Technology, China; Key Lab of High Confidence Software Technologies, Peking University, China+Institute of Computational Social Science, Peking University (Qingdao), China", + "bibtex": "@article{Shen_Li_Zheng_Zhang_Yao_Li_Yang_Liu_Cui_2023, title={ProxyBO: Accelerating Neural Architecture Search via Bayesian Optimization with Zero-Cost Proxies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26169}, DOI={10.1609/aaai.v37i8.26169}, abstractNote={Designing neural architectures requires immense manual efforts. This has promoted the development of neural architecture search (NAS) to automate the design. While previous NAS methods achieve promising results but run slowly, zero-cost proxies run extremely fast but are less promising. Therefore, it\u2019s of great potential to accelerate NAS via those zero-cost proxies. The existing method has two limitations, which are unforeseeable reliability and one-shot usage. To address the limitations, we present ProxyBO, an efficient Bayesian optimization (BO) framework that utilizes the zero-cost proxies to accelerate neural architecture search. We apply the generalization ability measurement to estimate the fitness of proxies on the task during each iteration and design a novel acquisition function to combine BO with zero-cost proxies based on their dynamic influence. Extensive empirical studies show that ProxyBO consistently outperforms competitive baselines on five tasks from three public benchmarks. Concretely, ProxyBO achieves up to 5.41\u00d7 and 3.86\u00d7 speedups over the state-of-the-art approaches REA and BRP-NAS.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shen, Yu and Li, Yang and Zheng, Jian and Zhang, Wentao and Yao, Peng and Li, Jixiang and Yang, Sen and Liu, Ji and Cui, Bin}, year={2023}, month={Jun.}, pages={9792-9801} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26169/25941", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26169", + "pdf_size": 415724, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7173973033435477663&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "pku.edu.cn;tencent.com;buaa.edu.cn;mila.quebec;kuaishou.com;kuaishou.com;kuaishou.com;kwai.com;pku.edu.cn", + "email": "pku.edu.cn;tencent.com;buaa.edu.cn;mila.quebec;kuaishou.com;kuaishou.com;kuaishou.com;kwai.com;pku.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;2;3;4+5;1;1;1;1;0+0", + "aff_unique_norm": "Peking University;Kuaishou Technology;Tencent Inc.;Beihang University;Qu\u00e9bec AI Institute;HEC Montr\u00e9al", + "aff_unique_dep": "Key Lab of High Confidence Software Technologies;;Data Platform, TEG;School of Computer Science and Engineering;AI Institute;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.kuaishou.com;https://www.tencent.com;http://www.buaa.edu.cn;https://mila.quebec;https://www.hec.ca", + "aff_unique_abbr": "PKU;Kuaishou;Tencent;Beihang;Mila;HEC", + "aff_campus_unique_index": ";1;2", + "aff_campus_unique": ";Montr\u00e9al;Qingdao", + "aff_country_unique_index": "0+0;0;0;1+1;0;0;0;0;0+0", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-25442", + "title": "Pseudo Label-Guided Model Inversion Attack via Conditional Generative Adversarial Network", + "track": "main", + "status": "Technical", + "abstract": "Model inversion (MI) attacks have raised increasing concerns about privacy, which can reconstruct training data from public models. Indeed, MI attacks can be formalized as an optimization problem that seeks private data in a certain space. Recent MI attacks leverage a generative adversarial network (GAN) as an image prior to narrow the search space, and can successfully reconstruct even the high-dimensional data (e.g., face images). However, these generative MI attacks do not fully exploit the potential capabilities of the target model, still leading to a vague and coupled search space, i.e., different classes of images are coupled in the search space. Besides, the widely used cross-entropy loss in these attacks suffers from gradient vanishing. To address these problems, we propose Pseudo Label-Guided MI (PLG-MI) attack via conditional GAN (cGAN). At first, a top-n selection strategy is proposed to provide pseudo-labels for public data, and use pseudo-labels to guide the training of the cGAN. In this way, the search space is decoupled for different classes of images. Then a max-margin loss is introduced to improve the search process on the subspace of a target class. Extensive experiments demonstrate that our PLG-MI attack significantly improves the attack success rate and visual quality for various datasets and models, notably, 2 \u223c 3\u00d7 better than state-of-the-art attacks under large distributional shifts. Our code is available at: https://github.com/LetheSec/PLG-MI-Attack.", + "primary_area": "computer vision iii", + "author": "Xiaojian Yuan; Kejiang Chen; Jie Zhang; Weiming Zhang; Nenghai Yu; Yang Zhang", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China+University of Waterloo; University of Science and Technology of China; University of Science and Technology of China; CISPA Helmholtz Center for Information Security", + "bibtex": "@article{Yuan_Chen_Zhang_Zhang_Yu_Zhang_2023, title={Pseudo Label-Guided Model Inversion Attack via Conditional Generative Adversarial Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25442}, DOI={10.1609/aaai.v37i3.25442}, abstractNote={Model inversion (MI) attacks have raised increasing concerns about privacy, which can reconstruct training data from public models. Indeed, MI attacks can be formalized as an optimization problem that seeks private data in a certain space. Recent MI attacks leverage a generative adversarial network (GAN) as an image prior to narrow the search space, and can successfully reconstruct even the high-dimensional data (e.g., face images). However, these generative MI attacks do not fully exploit the potential capabilities of the target model, still leading to a vague and coupled search space, i.e., different classes of images are coupled in the search space. Besides, the widely used cross-entropy loss in these attacks suffers from gradient vanishing. To address these problems, we propose Pseudo Label-Guided MI (PLG-MI) attack via conditional GAN (cGAN). At first, a top-n selection strategy is proposed to provide pseudo-labels for public data, and use pseudo-labels to guide the training of the cGAN. In this way, the search space is decoupled for different classes of images. Then a max-margin loss is introduced to improve the search process on the subspace of a target class. Extensive experiments demonstrate that our PLG-MI attack significantly improves the attack success rate and visual quality for various datasets and models, notably, 2 \u223c 3\u00d7 better than state-of-the-art attacks under large distributional shifts. Our code is available at: https://github.com/LetheSec/PLG-MI-Attack.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Xiaojian and Chen, Kejiang and Zhang, Jie and Zhang, Weiming and Yu, Nenghai and Zhang, Yang}, year={2023}, month={Jun.}, pages={3349-3357} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25442/25214", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25442", + "pdf_size": 527099, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8603883117099146134&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;gmail.com;ustc.edu.cn;ustc.edu.cn;cispa.de", + "email": "mail.ustc.edu.cn;ustc.edu.cn;gmail.com;ustc.edu.cn;ustc.edu.cn;cispa.de", + "github": "https://github.com/LetheSec/PLG-MI-Attack", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1;0;0;2", + "aff_unique_norm": "University of Science and Technology of China;University of Waterloo;CISPA Helmholtz Center for Information Security", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://uwaterloo.ca;https://www.cispa.de/", + "aff_unique_abbr": "USTC;UW;CISPA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1;0;0;2", + "aff_country_unique": "China;Canada;Germany" + }, + { + "id": "article-26289", + "title": "Purifier: Defending Data Inference Attacks via Transforming Confidence Scores", + "track": "main", + "status": "Technical", + "abstract": "Neural networks are susceptible to data inference attacks such as the membership inference attack, the adversarial model inversion attack and the attribute inference attack, where the attacker could infer useful information such as the membership, the reconstruction or the sensitive attributes of a data sample from the confidence scores predicted by the target classifier. In this paper, we propose a method, namely PURIFIER, to defend against membership inference attacks. It transforms the confidence score vectors predicted by the target classifier and makes purified confidence scores indistinguishable in individual shape, statistical distribution and prediction label between members and non-members. The experimental results show that PURIFIER helps defend membership inference attacks with high effectiveness and efficiency, outperforming previous defense methods, and also incurs negligible utility loss. Besides, our further experiments show that PURIFIER is also effective in defending adversarial model inversion attacks and attribute inference attacks. For example, the inversion error is raised about 4+ times on the Facescrub530 classifier, and the attribute inference accuracy drops significantly when PURIFIER is deployed in our experiment.", + "primary_area": "machine learning iv", + "author": "Ziqi Yang; Lijin Wang; Da Yang; Jie Wan; Ziming Zhao; Ee-Chien Chang; Fan Zhang; Kui Ren", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University; National University of Singapore; Zhejiang University+ZJU-Hangzhou Global Scientific and Technological Innovation Center+Key Laboratory of Blockchain and Cyberspace Governance of Zhejiang Province+Jiaxing Research Institute, Zhejiang University+Zhengzhou Xinda Institute of Advanced Technology; Zhejiang University+ZJU-Hangzhou Global Scientific and Technological Innovation Center+Key Laboratory of Blockchain and Cyberspace Governance of Zhejiang Province+Jiaxing Research Institute, Zhejiang University", + "bibtex": "@article{Yang_Wang_Yang_Wan_Zhao_Chang_Zhang_Ren_2023, title={Purifier: Defending Data Inference Attacks via Transforming Confidence Scores}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26289}, DOI={10.1609/aaai.v37i9.26289}, abstractNote={Neural networks are susceptible to data inference attacks such as the membership inference attack, the adversarial model inversion attack and the attribute inference attack, where the attacker could infer useful information such as the membership, the reconstruction or the sensitive attributes of a data sample from the confidence scores predicted by the target classifier. In this paper, we propose a method, namely PURIFIER, to defend against membership inference attacks. It transforms the confidence score vectors predicted by the target classifier and makes purified confidence scores indistinguishable in individual shape, statistical distribution and prediction label between members and non-members. The experimental results show that PURIFIER helps defend membership inference attacks with high effectiveness and efficiency, outperforming previous defense methods, and also incurs negligible utility loss. Besides, our further experiments show that PURIFIER is also effective in defending adversarial model inversion attacks and attribute inference attacks. For example, the inversion error is raised about 4+ times on the Facescrub530 classifier, and the attribute inference accuracy drops significantly when PURIFIER is deployed in our experiment.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Ziqi and Wang, Lijin and Yang, Da and Wan, Jie and Zhao, Ziming and Chang, Ee-Chien and Zhang, Fan and Ren, Kui}, year={2023}, month={Jun.}, pages={10871-10879} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26289/26061", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26289", + "pdf_size": 621687, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5719033326452693335&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;comp.nus.edu.sg;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;comp.nus.edu.sg;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1;0+0+2+0+3;0+0+2+0", + "aff_unique_norm": "Zhejiang University;National University of Singapore;Zhejiang Province Key Laboratory of Blockchain and Cyberspace Governance;Zhengzhou Xinda Institute of Advanced Technology", + "aff_unique_dep": ";;Blockchain and Cyberspace Governance;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.nus.edu.sg;;", + "aff_unique_abbr": "ZJU;NUS;;", + "aff_campus_unique_index": "1+2;1+2", + "aff_campus_unique": ";Hangzhou;Jiaxing", + "aff_country_unique_index": "0;0;0;0;0;1;0+0+0+0+0;0+0+0+0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26073", + "title": "Q-functionals for Value-Based Continuous Control", + "track": "main", + "status": "Technical", + "abstract": "We present Q-functionals, an alternative architecture for continuous control deep reinforcement learning. Instead of returning a single value for a state-action pair, our network transforms a state into a function that can be rapidly evaluated in parallel for many actions, allowing us to efficiently choose high-value actions through sampling. This contrasts with the typical architecture of off-policy continuous control, where a policy network is trained for the sole purpose of selecting actions from the Q-function. We represent our action-dependent Q-function as a weighted sum of basis functions (Fourier, Polynomial, etc) over the action space, where the weights are state-dependent and output by the Q-functional network. Fast sampling makes practical a variety of techniques that require Monte-Carlo integration over Q-functions, and enables action-selection strategies besides simple value-maximization. We characterize our framework, describe various implementations of Q-functionals, and demonstrate strong performance on a suite of continuous control tasks.", + "primary_area": "machine learning ii", + "author": "Samuel Lobel; Sreehari Rammohan; Bowen He; Shangqun Yu; George Konidaris", + "authorids": "", + "aff": "Brown University; Brown University; Brown University; University of Massachusetts, Amherst; Brown University", + "bibtex": "@article{Lobel_Rammohan_He_Yu_Konidaris_2023, title={Q-functionals for Value-Based Continuous Control}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26073}, DOI={10.1609/aaai.v37i7.26073}, abstractNote={We present Q-functionals, an alternative architecture for continuous control deep reinforcement learning. Instead of returning a single value for a state-action pair, our network transforms a state into a function that can be rapidly evaluated in parallel for many actions, allowing us to efficiently choose high-value actions through sampling. This contrasts with the typical architecture of off-policy continuous control, where a policy network is trained for the sole purpose of selecting actions from the Q-function. We represent our action-dependent Q-function as a weighted sum of basis functions (Fourier, Polynomial, etc) over the action space, where the weights are state-dependent and output by the Q-functional network. Fast sampling makes practical a variety of techniques that require Monte-Carlo integration over Q-functions, and enables action-selection strategies besides simple value-maximization. We characterize our framework, describe various implementations of Q-functionals, and demonstrate strong performance on a suite of continuous control tasks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lobel, Samuel and Rammohan, Sreehari and He, Bowen and Yu, Shangqun and Konidaris, George}, year={2023}, month={Jun.}, pages={8932-8939} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26073/25845", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26073", + "pdf_size": 966444, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6278559206911057443&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 11, + "aff_domain": "brown.edu;brown.edu;brown.edu;umass.edu;cs.brown.edu", + "email": "brown.edu;brown.edu;brown.edu;umass.edu;cs.brown.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Brown University;University of Massachusetts Amherst", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.brown.edu;https://www.umass.edu", + "aff_unique_abbr": "Brown;UMass Amherst", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Amherst", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26794", + "title": "QA Is the New KR: Question-Answer Pairs as Knowledge Bases", + "track": "senior member presentation bridge papers", + "status": "Technical", + "abstract": "We propose a new knowledge representation (KR) based on knowledge bases (KBs) derived from text, based on question generation and entity linking. We argue that the proposed type of KB has many of the key advantages of a traditional symbolic KB: in particular, it consists of small modular components, which can be combined compositionally to answer complex queries, including relational queries and queries involving ``multi-hop'' inferences. However, unlike a traditional KB, this information store is well-aligned with common user information needs. We present one such KB, called a QEDB, and give qualitative evidence that the atomic components are high-quality and meaningful, and that atomic components can be combined in ways similar to the triples in a symbolic KB. We also show experimentally that questions reflective of typical user questions are more easily answered with a QEDB than a symbolic KB.", + "primary_area": "", + "author": "William W. Cohen; Wenhu Chen; Michiel De Jong; Nitish Gupta; Alessandro Presta; Pat Verga; John Wieting", + "authorids": "", + "aff": "Google AI; Google AI; Google AI; Google AI; Google AI; Google AI; Google AI", + "bibtex": "@article{Cohen_Chen_De Jong_Gupta_Presta_Verga_Wieting_2024, title={QA Is the New KR: Question-Answer Pairs as Knowledge Bases}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26794}, DOI={10.1609/aaai.v37i13.26794}, abstractNote={We propose a new knowledge representation (KR) based on knowledge bases (KBs) derived from text, based on question generation and entity linking. We argue that the proposed type of KB has many of the key advantages of a traditional symbolic KB: in particular, it consists of small modular components, which can be combined compositionally to answer complex queries, including relational queries and queries involving ``multi-hop\u2019\u2019 inferences. However, unlike a traditional KB, this information store is well-aligned with common user information needs. We present one such KB, called a QEDB, and give qualitative evidence that the atomic components are high-quality and meaningful, and that atomic components can be combined in ways similar to the triples in a symbolic KB. We also show experimentally that questions reflective of typical user questions are more easily answered with a QEDB than a symbolic KB.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cohen, William W. and Chen, Wenhu and De Jong, Michiel and Gupta, Nitish and Presta, Alessandro and Verga, Pat and Wieting, John}, year={2024}, month={Jul.}, pages={15385-15392} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26794/26566", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26794", + "pdf_size": 432570, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2900424607051271933&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google AI", + "aff_unique_url": "https://ai.google", + "aff_unique_abbr": "Google AI", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25811", + "title": "Quality-Aware Self-Training on Differentiable Synthesis of Rare Relational Data", + "track": "main", + "status": "Technical", + "abstract": "Data scarcity is a very common real-world problem that poses a major challenge to data-driven analytics. Although a lot of data-balancing approaches have been proposed to mitigate this problem, they may drop some useful information or fall into the overfitting problem. Generative Adversarial Network (GAN) based data synthesis methods can alleviate such a problem but lack of quality control over the generated samples. Moreover, the latent associations between the attribute set and the class labels in a relational data cannot be easily captured by a vanilla GAN. In light of this, we introduce an end-to-end self-training scheme (namely, Quality-Aware Self-Training) for rare relational data synthesis, which generates labeled synthetic data via pseudo labeling on GAN-based synthesis. We design a semantic pseudo labeling module to first control the quality of the generated features/samples, then calibrate their semantic labels via a classifier committee consisting of multiple pre-trained shallow classifiers. The high-confident generated samples with calibrated pseudo labels are then fed into a semantic classification network as augmented samples for self-training. We conduct extensive experiments on 20 benchmark datasets of different domains, including 14 industrial datasets. The results show that our method significantly outperforms state-of-the-art methods, including two recent GAN-based data synthesis schemes. Codes are available at https://github.com/yaxinhou/QAST.", + "primary_area": "knowledge representation and reasoning", + "author": "Chongsheng Zhang; Yaxin Hou; Ke Chen; Shuang Cao; Gaojuan Fan; Ji Liu", + "authorids": "", + "aff": "Henan University; Henan University; South China University of Technology+Peng Cheng Laboratory; Henan University; Henan University; Baidu Research", + "bibtex": "@article{Zhang_Hou_Chen_Cao_Fan_Liu_2023, title={Quality-Aware Self-Training on Differentiable Synthesis of Rare Relational Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25811}, DOI={10.1609/aaai.v37i5.25811}, abstractNote={Data scarcity is a very common real-world problem that poses a major challenge to data-driven analytics. Although a lot of data-balancing approaches have been proposed to mitigate this problem, they may drop some useful information or fall into the overfitting problem. Generative Adversarial Network (GAN) based data synthesis methods can alleviate such a problem but lack of quality control over the generated samples. Moreover, the latent associations between the attribute set and the class labels in a relational data cannot be easily captured by a vanilla GAN. In light of this, we introduce an end-to-end self-training scheme (namely, Quality-Aware Self-Training) for rare relational data synthesis, which generates labeled synthetic data via pseudo labeling on GAN-based synthesis. We design a semantic pseudo labeling module to first control the quality of the generated features/samples, then calibrate their semantic labels via a classifier committee consisting of multiple pre-trained shallow classifiers. The high-confident generated samples with calibrated pseudo labels are then fed into a semantic classification network as augmented samples for self-training. We conduct extensive experiments on 20 benchmark datasets of different domains, including 14 industrial datasets. The results show that our method significantly outperforms state-of-the-art methods, including two recent GAN-based data synthesis schemes. Codes are available at https://github.com/yaxinhou/QAST.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chongsheng and Hou, Yaxin and Chen, Ke and Cao, Shuang and Fan, Gaojuan and Liu, Ji}, year={2023}, month={Jun.}, pages={6602-6611} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25811/25583", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25811", + "pdf_size": 542684, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1691212080345440823&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "ieee.org;henu.edu.cn;scut.edu.cn;henu.edu.cn;henu.edu.cn;gmail.com", + "email": "ieee.org;henu.edu.cn;scut.edu.cn;henu.edu.cn;henu.edu.cn;gmail.com", + "github": "https://github.com/yaxinhou/QAST", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1+2;0;0;3", + "aff_unique_norm": "Henan University;South China University of Technology;Peng Cheng Laboratory;Baidu", + "aff_unique_dep": ";;;Baidu Research", + "aff_unique_url": "http://www.henu.edu.cn;https://www.scut.edu.cn;http://www.pcl.ac.cn;https://research.baidu.com", + "aff_unique_abbr": "HENU;SCUT;PCL;Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27037", + "title": "Quantify the Political Bias in News Edits: Experiments with Few-Shot Learners (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The rapid growth of information and communication technologies in recent years, and the different forms of digital connectivity, have profoundly affected how news is generated and consumed. Digital traces and computational methods offer new opportunities to model and track the provenance of news. This project is the first study to characterize and predict how prominent news outlets make edits to news frames and their implications for geopolitical relationships and attitudes. We evaluate the feasibility of training few-shot learners on the editing patterns of articles discussing different countries, for understanding their wider implications in preserving or damaging geopolitical relationships.", + "primary_area": "", + "author": "Preetika Verma; Hansin Ahuja; Kokil Jaidka", + "authorids": "", + "aff": "Birla Institute of Technology and Science, Pilani; Google India; National University of Singapore", + "bibtex": "@article{Verma_Ahuja_Jaidka_2024, title={Quantify the Political Bias in News Edits: Experiments with Few-Shot Learners (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27037}, DOI={10.1609/aaai.v37i13.27037}, abstractNote={The rapid growth of information and communication technologies in recent years, and the different forms of digital connectivity, have profoundly affected how news is generated and consumed. Digital traces and computational methods offer new opportunities to model and track the provenance of news. This project is the first study to characterize and predict how prominent news outlets make edits to news frames and their implications for geopolitical relationships and attitudes. We evaluate the feasibility of training few-shot learners on the editing patterns of articles discussing different countries, for understanding their wider implications in preserving or damaging geopolitical relationships.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Verma, Preetika and Ahuja, Hansin and Jaidka, Kokil}, year={2024}, month={Jul.}, pages={16354-16355} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27037/26809", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27037", + "pdf_size": 144296, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14784574491945780705&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; ;nus.edu.sg", + "email": "gmail.com; ;nus.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Birla Institute of Technology and Science;Google;National University of Singapore", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.bits-pilani.ac.in;https://www.google.com;https://www.nus.edu.sg", + "aff_unique_abbr": "BITS Pilani;Google India;NUS", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Pilani;Bangalore;", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "India;Singapore" + }, + { + "id": "article-26747", + "title": "Quantization-Aware Interval Bound Propagation for Training Certifiably Robust Quantized Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness.\nIn this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs.\nInspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. \nBased on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. \nWe demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.", + "primary_area": "safe and robust ai", + "author": "Mathias Lechner; \u0110or\u0111e \u017dikeli\u0107; Krishnendu Chatterjee; Thomas A. Henzinger; Daniela Rus", + "authorids": "", + "aff": "Massachusetts Institute of Technology (MIT); Institute of Science and Technology Austria (ISTA); Institute of Science and Technology Austria (ISTA); Institute of Science and Technology Austria (ISTA); Massachusetts Institute of Technology (MIT)", + "bibtex": "@article{Lechner_\u017dikeli\u0107_Chatterjee_Henzinger_Rus_2023, title={Quantization-Aware Interval Bound Propagation for Training Certifiably Robust Quantized Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26747}, DOI={10.1609/aaai.v37i12.26747}, abstractNote={We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness.\nIn this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs.\nInspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. Based on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. We demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lechner, Mathias and \u017dikeli\u0107, \u0110or\u0111e and Chatterjee, Krishnendu and Henzinger, Thomas A. and Rus, Daniela}, year={2023}, month={Jun.}, pages={14964-14973} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26747/26519", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26747", + "pdf_size": 173042, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15139814916836318969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mit.edu; ; ; ; ", + "email": "mit.edu; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Institute of Science and Technology Austria", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.ista.ac.at", + "aff_unique_abbr": "MIT;ISTA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;0", + "aff_country_unique": "United States;Austria" + }, + { + "id": "article-26354", + "title": "Quantized Feature Distillation for Network Quantization", + "track": "main", + "status": "Technical", + "abstract": "Neural network quantization aims to accelerate and trim full-precision neural network models by using low bit approximations. Methods adopting the quantization aware training (QAT) paradigm have recently seen a rapid growth, but are often conceptually complicated. This paper proposes a novel and highly effective QAT method, quantized feature distillation (QFD). QFD first trains a quantized (or binarized) representation as the teacher, then quantize the network using knowledge distillation (KD). Quantitative results show that QFD is more flexible and effective (i.e., quantization friendly) than previous quantization methods. QFD surpasses existing methods by a noticeable margin on not only image classification but also object detection, albeit being much simpler. Furthermore, QFD quantizes ViT and Swin-Transformer on MS-COCO detection and segmentation, which verifies its potential in real world deployment. To the best of our knowledge, this is the first time that vision transformers have been quantized in object detection and image segmentation tasks.", + "primary_area": "machine learning iv", + "author": "Ke Zhu; Yin-Yin He; Jianxin Wu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Zhu_He_Wu_2023, title={Quantized Feature Distillation for Network Quantization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26354}, DOI={10.1609/aaai.v37i9.26354}, abstractNote={Neural network quantization aims to accelerate and trim full-precision neural network models by using low bit approximations. Methods adopting the quantization aware training (QAT) paradigm have recently seen a rapid growth, but are often conceptually complicated. This paper proposes a novel and highly effective QAT method, quantized feature distillation (QFD). QFD first trains a quantized (or binarized) representation as the teacher, then quantize the network using knowledge distillation (KD). Quantitative results show that QFD is more flexible and effective (i.e., quantization friendly) than previous quantization methods. QFD surpasses existing methods by a noticeable margin on not only image classification but also object detection, albeit being much simpler. Furthermore, QFD quantizes ViT and Swin-Transformer on MS-COCO detection and segmentation, which verifies its potential in real world deployment. To the best of our knowledge, this is the first time that vision transformers have been quantized in object detection and image segmentation tasks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Ke and He, Yin-Yin and Wu, Jianxin}, year={2023}, month={Jun.}, pages={11452-11460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26354/26126", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26354", + "pdf_size": 271185, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4527021902114833066&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26313", + "title": "Quantum Multi-Agent Meta Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Although quantum supremacy is yet to come, there has recently been an increasing interest in identifying the potential of quantum machine learning (QML) in the looming era of practical quantum computing. Motivated by this, in this article we re-design multi-agent reinforcement learning (MARL) based on the unique characteristics of quantum neural networks (QNNs) having two separate dimensions of trainable parameters: angle parameters affecting the output qubit states, and pole parameters associated with the output measurement basis. Exploiting this dyadic trainability as meta-learning capability, we propose quantum meta MARL (QM2ARL) that first applies angle training for meta-QNN learning, followed by pole training for few-shot or local-QNN training. To avoid overfitting, we develop an angle-to-pole regularization technique injecting noise into the pole domain during angle training. Furthermore, by exploiting the pole as the memory address of each trained QNN, we introduce the concept of pole memory allowing one to save and load trained QNNs using only two-parameter pole values. We theoretically prove the convergence of angle training under the angle-to-pole regularization, and by simulation corroborate the effectiveness of QM2ARL in achieving high reward and fast convergence, as well as of the pole memory in fast adaptation to a time-varying environment.", + "primary_area": "machine learning iv", + "author": "Won Joon Yun; Jihong Park; Joongheon Kim", + "authorids": "", + "aff": "School of Electrical Engineering, Korea University, Seoul, Republic of Korea; School of Information Technology, Deakin University, Geelong, VIC, Australia; School of Electrical Engineering, Korea University, Seoul, Republic of Korea", + "bibtex": "@article{Yun_Park_Kim_2023, title={Quantum Multi-Agent Meta Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26313}, DOI={10.1609/aaai.v37i9.26313}, abstractNote={Although quantum supremacy is yet to come, there has recently been an increasing interest in identifying the potential of quantum machine learning (QML) in the looming era of practical quantum computing. Motivated by this, in this article we re-design multi-agent reinforcement learning (MARL) based on the unique characteristics of quantum neural networks (QNNs) having two separate dimensions of trainable parameters: angle parameters affecting the output qubit states, and pole parameters associated with the output measurement basis. Exploiting this dyadic trainability as meta-learning capability, we propose quantum meta MARL (QM2ARL) that first applies angle training for meta-QNN learning, followed by pole training for few-shot or local-QNN training. To avoid overfitting, we develop an angle-to-pole regularization technique injecting noise into the pole domain during angle training. Furthermore, by exploiting the pole as the memory address of each trained QNN, we introduce the concept of pole memory allowing one to save and load trained QNNs using only two-parameter pole values. We theoretically prove the convergence of angle training under the angle-to-pole regularization, and by simulation corroborate the effectiveness of QM2ARL in achieving high reward and fast convergence, as well as of the pole memory in fast adaptation to a time-varying environment.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yun, Won Joon and Park, Jihong and Kim, Joongheon}, year={2023}, month={Jun.}, pages={11087-11095} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26313/26085", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26313", + "pdf_size": 2307005, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16677593255309028201&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "korea.ac.kr;deakin.edu.au;korea.ac.kr", + "email": "korea.ac.kr;deakin.edu.au;korea.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Korea University;Deakin University", + "aff_unique_dep": "School of Electrical Engineering;School of Information Technology", + "aff_unique_url": "http://www.korea.ac.kr;https://www.deakin.edu.au", + "aff_unique_abbr": "KU;Deakin", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Seoul;Geelong", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Republic of Korea;Australia" + }, + { + "id": "article-26202", + "title": "Quantum Multi-Armed Bandits and Stochastic Linear Bandits Enjoy Logarithmic Regrets", + "track": "main", + "status": "Technical", + "abstract": "Multi-arm bandit (MAB) and stochastic linear bandit (SLB) are important models in reinforcement learning, and it is well-known that classical algorithms for bandits with time horizon T suffer from the regret of at least the square root of T. In this paper, we study MAB and SLB with quantum reward oracles and propose quantum algorithms for both models with the order of the polylog T regrets, exponentially improving the dependence in terms of T. To the best of our knowledge, this is the first provable quantum speedup for regrets of bandit problems and in general exploitation in reinforcement learning. Compared to previous literature on quantum exploration algorithms for MAB and reinforcement learning, our quantum input model is simpler and only assumes quantum oracles for each individual arm.", + "primary_area": "machine learning iii", + "author": "Zongqi Wan; Zhijie Zhang; Tongyang Li; Jialin Zhang; Xiaoming Sun", + "authorids": "", + "aff": "Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Center for Applied Mathematics of Fujian Province, School of Mathematics and Statistics, Fuzhou University; Center on Frontiers of Computing Studies, Peking University + School of Computer Science, Peking University; Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences", + "bibtex": "@article{Wan_Zhang_Li_Zhang_Sun_2023, title={Quantum Multi-Armed Bandits and Stochastic Linear Bandits Enjoy Logarithmic Regrets}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26202}, DOI={10.1609/aaai.v37i8.26202}, abstractNote={Multi-arm bandit (MAB) and stochastic linear bandit (SLB) are important models in reinforcement learning, and it is well-known that classical algorithms for bandits with time horizon T suffer from the regret of at least the square root of T. In this paper, we study MAB and SLB with quantum reward oracles and propose quantum algorithms for both models with the order of the polylog T regrets, exponentially improving the dependence in terms of T. To the best of our knowledge, this is the first provable quantum speedup for regrets of bandit problems and in general exploitation in reinforcement learning. Compared to previous literature on quantum exploration algorithms for MAB and reinforcement learning, our quantum input model is simpler and only assumes quantum oracles for each individual arm.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Zongqi and Zhang, Zhijie and Li, Tongyang and Zhang, Jialin and Sun, Xiaoming}, year={2023}, month={Jun.}, pages={10087-10094} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26202/25974", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26202", + "pdf_size": 2129429, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10879956300002918085&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ict.ac.cn;fzu.edu.cn;pku.edu.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;fzu.edu.cn;pku.edu.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "arXiv:2205.14988", + "author_num": 5, + "aff_unique_index": "0+1;2;3+3;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Fuzhou University;Peking University", + "aff_unique_dep": "Institute of Computing Technology;;School of Mathematics and Statistics;Center on Frontiers of Computing Studies", + "aff_unique_url": "http://www.ict.ac.cn;http://www.ucas.ac.cn;https://www.fzu.edu.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "CAS;UCAS;FZU;Peking U", + "aff_campus_unique_index": ";1;2;;", + "aff_campus_unique": ";Fuzhou;Beijing", + "aff_country_unique_index": "0+0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26633", + "title": "Quantum-Inspired Representation for Long-Tail Senses of Word Sense Disambiguation", + "track": "main", + "status": "Technical", + "abstract": "Data imbalance, also known as the long-tail distribution of data, is an important challenge for data-driven models. In the Word Sense Disambiguation (WSD) task, the long-tail phenomenon of word sense distribution is more common, making it difficult to effectively represent and identify Long-Tail Senses (LTSs). Therefore exploring representation methods that do not rely heavily on the training sample size is an important way to combat LTSs. Considering that many new states, namely superposition states, can be constructed from several known states in quantum mechanics, superposition states provide the possibility to obtain more accurate representations from inferior representations learned from a small sample size. Inspired by quantum superposition states, a representation method in Hilbert space is proposed to reduce the dependence on large sample sizes and thus combat LTSs. We theoretically prove the correctness of the method, and verify its effectiveness under the standard WSD evaluation framework and obtain state-of-the-art performance. Furthermore, we also test on the constructed LTS and the latest cross-lingual datasets, and achieve promising results.", + "primary_area": "speech natural language processing", + "author": "Junwei Zhang; Ruifang He; Fengyu Guo", + "authorids": "", + "aff": "Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China; Tianjin Key Laboratory of Cognitive Computing and Application, College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Computer and Information Engineering, Tianjin Normal University, Tianjin, China", + "bibtex": "@article{Zhang_He_Guo_2023, title={Quantum-Inspired Representation for Long-Tail Senses of Word Sense Disambiguation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26633}, DOI={10.1609/aaai.v37i11.26633}, abstractNote={Data imbalance, also known as the long-tail distribution of data, is an important challenge for data-driven models. In the Word Sense Disambiguation (WSD) task, the long-tail phenomenon of word sense distribution is more common, making it difficult to effectively represent and identify Long-Tail Senses (LTSs). Therefore exploring representation methods that do not rely heavily on the training sample size is an important way to combat LTSs. Considering that many new states, namely superposition states, can be constructed from several known states in quantum mechanics, superposition states provide the possibility to obtain more accurate representations from inferior representations learned from a small sample size. Inspired by quantum superposition states, a representation method in Hilbert space is proposed to reduce the dependence on large sample sizes and thus combat LTSs. We theoretically prove the correctness of the method, and verify its effectiveness under the standard WSD evaluation framework and obtain state-of-the-art performance. Furthermore, we also test on the constructed LTS and the latest cross-lingual datasets, and achieve promising results.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Junwei and He, Ruifang and Guo, Fengyu}, year={2023}, month={Jun.}, pages={13949-13957} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26633/26405", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26633", + "pdf_size": 392564, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5826832556550255701&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "tju.edu.cn;tju.edu.cn;tjnu.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tjnu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Tianjin University;Tianjin Normal University", + "aff_unique_dep": "College of Intelligence and Computing;College of Computer and Information Engineering", + "aff_unique_url": "https://www.tju.edu.cn;http://www.tjnu.edu.cn", + "aff_unique_abbr": "Tianjin University;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Tianjin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26642", + "title": "Query Your Model with Definitions in FrameNet: An Effective Method for Frame Semantic Role Labeling", + "track": "main", + "status": "Technical", + "abstract": "Frame Semantic Role Labeling (FSRL) identifies arguments and labels them with frame semantic roles defined in FrameNet. Previous researches tend to divide FSRL into argument identification and role classification. Such methods usually model role classification as naive multi-class classification and treat arguments individually, which neglects label semantics and interactions between arguments and thus hindering performance and generalization of models. In this paper, we propose a query-based framework named ArGument Extractor with Definitions in FrameNet (AGED) to mitigate these problems. Definitions of frames and frame elements (FEs) in FrameNet can be used to query arguments in text. Encoding text-definition pairs can guide models in learning label semantics and strengthening argument interactions. Experiments show that AGED outperforms previous state-of-the-art by up to 1.3 F1-score in two FrameNet datasets and the generalization power of AGED in zero-shot and fewshot scenarios. Our code and technical appendix is available at https://github.com/PKUnlp-icler/AGED.", + "primary_area": "speech natural language processing", + "author": "Ce Zheng; Yiming Wang; Baobao Chang", + "authorids": "", + "aff": "The MOE Key Laboratory of Computational Linguistics, Peking University, China; The MOE Key Laboratory of Computational Linguistics, Peking University, China; The MOE Key Laboratory of Computational Linguistics, Peking University, China", + "bibtex": "@article{Zheng_Wang_Chang_2023, title={Query Your Model with Definitions in FrameNet: An Effective Method for Frame Semantic Role Labeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26642}, DOI={10.1609/aaai.v37i11.26642}, abstractNote={Frame Semantic Role Labeling (FSRL) identifies arguments and labels them with frame semantic roles defined in FrameNet. Previous researches tend to divide FSRL into argument identification and role classification. Such methods usually model role classification as naive multi-class classification and treat arguments individually, which neglects label semantics and interactions between arguments and thus hindering performance and generalization of models. In this paper, we propose a query-based framework named ArGument Extractor with Definitions in FrameNet (AGED) to mitigate these problems. Definitions of frames and frame elements (FEs) in FrameNet can be used to query arguments in text. Encoding text-definition pairs can guide models in learning label semantics and strengthening argument interactions. Experiments show that AGED outperforms previous state-of-the-art by up to 1.3 F1-score in two FrameNet datasets and the generalization power of AGED in zero-shot and fewshot scenarios. Our code and technical appendix is available at https://github.com/PKUnlp-icler/AGED.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Ce and Wang, Yiming and Chang, Baobao}, year={2023}, month={Jun.}, pages={14029-14037} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26642/26414", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26642", + "pdf_size": 265511, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11058480737018508645&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;gmail.com;pku.edu.cn", + "email": "pku.edu.cn;gmail.com;pku.edu.cn", + "github": "https://github.com/PKUnlp-icler/AGED", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25613", + "title": "Query-Aware Quantization for Maximum Inner Product Search", + "track": "main", + "status": "Technical", + "abstract": "Maximum Inner Product Search (MIPS) plays an essential role in many applications ranging from information retrieval, recommender systems to natural language processing. However, exhaustive MIPS is often expensive and impractical when there are a large number of candidate items. \nThe state-of-the-art quantization method of approximated MIPS is product quantization with a score-aware loss, developed by assuming that queries are uniformly distributed in the unit sphere. However, in real-world datasets, the above assumption about queries does not necessarily hold. \nTo this end, we propose a quantization method based on the distribution of queries combined with sampled softmax.\nFurther, we introduce a general framework encompassing the proposed method and multiple quantization methods, and we develop an effective optimization for the proposed general framework. The proposed method is evaluated on three real-world datasets. The experimental results show that it outperforms the state-of-the-art baselines.", + "primary_area": "data mining and knowledge management", + "author": "Jin Zhang; Defu Lian; Haodi Zhang; Baoyun Wang; Enhong Chen", + "authorids": "", + "aff": "University of Science and Technology of China; University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence, Hefei, China; Shenzhen University; Hisense; University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence, Hefei, China", + "bibtex": "@article{Zhang_Lian_Zhang_Wang_Chen_2023, title={Query-Aware Quantization for Maximum Inner Product Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25613}, DOI={10.1609/aaai.v37i4.25613}, abstractNote={Maximum Inner Product Search (MIPS) plays an essential role in many applications ranging from information retrieval, recommender systems to natural language processing. However, exhaustive MIPS is often expensive and impractical when there are a large number of candidate items. The state-of-the-art quantization method of approximated MIPS is product quantization with a score-aware loss, developed by assuming that queries are uniformly distributed in the unit sphere. However, in real-world datasets, the above assumption about queries does not necessarily hold. To this end, we propose a quantization method based on the distribution of queries combined with sampled softmax.\nFurther, we introduce a general framework encompassing the proposed method and multiple quantization methods, and we develop an effective optimization for the proposed general framework. The proposed method is evaluated on three real-world datasets. The experimental results show that it outperforms the state-of-the-art baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jin and Lian, Defu and Zhang, Haodi and Wang, Baoyun and Chen, Enhong}, year={2023}, month={Jun.}, pages={4875-4883} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25613/25385", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25613", + "pdf_size": 235120, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4999636353688531528&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;szu.edu.cn;hisense.com", + "email": "mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;szu.edu.cn;hisense.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;3;0+1", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;Shenzhen University;Hisense Company", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.szu.edu.cn;https://www.hisense.com", + "aff_unique_abbr": "USTC;;SZU;Hisense", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26717", + "title": "Query-Based Hard-Image Retrieval for Object Detection at Test Time", + "track": "aaai special track", + "status": "Technical", + "abstract": "There is a longstanding interest in capturing the error behaviour of object detectors by finding images where their performance is likely to be unsatisfactory. In real-world applications such as autonomous driving, it is also crucial to characterise potential failures beyond simple requirements of detection performance. For example, a missed detection of a pedestrian close to an ego vehicle will generally require closer inspection than a missed detection of a car in the distance. The problem of predicting such potential failures at test time has largely been overlooked in the literature and conventional approaches based on detection uncertainty fall short in that they are agnostic to such fine-grained characterisation of errors. In this work, we propose to reformulate the problem of finding \"hard\" images as a query-based hard image retrieval task, where queries are specific definitions of \"hardness\", and offer a simple and intuitive method that can solve this task for a large family of queries. Our method is entirely post-hoc, does not require ground-truth annotations, is independent of the choice of a detector, and relies on an efficient Monte Carlo estimation that uses a simple stochastic model in place of the ground-truth. We show experimentally that it can be applied successfully to a wide variety of queries for which it can reliably identify hard images for a given detector without any labelled data. We provide results on ranking and classification tasks using the widely used RetinaNet, Faster-RCNN, Mask-RCNN, and Cascade Mask-RCNN object detectors. The code for this project is available at https://github.com/fiveai/hardest.", + "primary_area": "safe and robust ai", + "author": "Edward Ayers; Jonathan Sadeghi; John Redford; Romain Mueller; Puneet K. Dokania", + "authorids": "", + "aff": "Five AI Ltd., UK; Five AI Ltd., UK; Five AI Ltd., UK; Five AI Ltd., UK; Five AI Ltd., UK", + "bibtex": "@article{Ayers_Sadeghi_Redford_Mueller_Dokania_2023, title={Query-Based Hard-Image Retrieval for Object Detection at Test Time}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26717}, DOI={10.1609/aaai.v37i12.26717}, abstractNote={There is a longstanding interest in capturing the error behaviour of object detectors by finding images where their performance is likely to be unsatisfactory. In real-world applications such as autonomous driving, it is also crucial to characterise potential failures beyond simple requirements of detection performance. For example, a missed detection of a pedestrian close to an ego vehicle will generally require closer inspection than a missed detection of a car in the distance. The problem of predicting such potential failures at test time has largely been overlooked in the literature and conventional approaches based on detection uncertainty fall short in that they are agnostic to such fine-grained characterisation of errors. In this work, we propose to reformulate the problem of finding "hard" images as a query-based hard image retrieval task, where queries are specific definitions of "hardness", and offer a simple and intuitive method that can solve this task for a large family of queries. Our method is entirely post-hoc, does not require ground-truth annotations, is independent of the choice of a detector, and relies on an efficient Monte Carlo estimation that uses a simple stochastic model in place of the ground-truth. We show experimentally that it can be applied successfully to a wide variety of queries for which it can reliably identify hard images for a given detector without any labelled data. We provide results on ranking and classification tasks using the widely used RetinaNet, Faster-RCNN, Mask-RCNN, and Cascade Mask-RCNN object detectors. The code for this project is available at https://github.com/fiveai/hardest.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ayers, Edward and Sadeghi, Jonathan and Redford, John and Mueller, Romain and Dokania, Puneet K.}, year={2023}, month={Jun.}, pages={14692-14700} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26717/26489", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26717", + "pdf_size": 4750706, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9842230758281531507&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "five.ai;five.ai;five.ai;five.ai;five.ai", + "email": "five.ai;five.ai;five.ai;five.ai;five.ai", + "github": "https://github.com/fiveai/hardest", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Five AI Ltd.", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26519", + "title": "Question Decomposition Tree for Answering Complex Questions over Knowledge Bases", + "track": "main", + "status": "Technical", + "abstract": "Knowledge base question answering (KBQA) has attracted a lot of interest in recent years, especially for complex questions which require multiple facts to answer. Question decomposition is a promising way to answer complex questions. Existing decomposition methods split the question into sub-questions according to a single compositionality type, which is not sufficient for questions involving multiple compositionality types. In this paper, we propose Question Decomposition Tree (QDT) to represent the structure of complex questions. Inspired by recent advances in natural language generation (NLG), we present a two-staged method called Clue-Decipher to generate QDT. It can leverage the strong ability of NLG model and simultaneously preserve the original questions. To verify that QDT can enhance KBQA task, we design a decomposition-based KBQA system called QDTQA. Extensive experiments show that QDTQA outperforms previous state-of-the-art methods on ComplexWebQuestions dataset. Besides, our decomposition method improves an existing KBQA system by 12% and sets a new state-of-the-art on LC-QuAD 1.0.", + "primary_area": "speech natural language processing", + "author": "Xiang Huang; Sitao Cheng; Yiheng Shu; Yuheng Bao; Yuzhong Qu", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Huang_Cheng_Shu_Bao_Qu_2023, title={Question Decomposition Tree for Answering Complex Questions over Knowledge Bases}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26519}, DOI={10.1609/aaai.v37i11.26519}, abstractNote={Knowledge base question answering (KBQA) has attracted a lot of interest in recent years, especially for complex questions which require multiple facts to answer. Question decomposition is a promising way to answer complex questions. Existing decomposition methods split the question into sub-questions according to a single compositionality type, which is not sufficient for questions involving multiple compositionality types. In this paper, we propose Question Decomposition Tree (QDT) to represent the structure of complex questions. Inspired by recent advances in natural language generation (NLG), we present a two-staged method called Clue-Decipher to generate QDT. It can leverage the strong ability of NLG model and simultaneously preserve the original questions. To verify that QDT can enhance KBQA task, we design a decomposition-based KBQA system called QDTQA. Extensive experiments show that QDTQA outperforms previous state-of-the-art methods on ComplexWebQuestions dataset. Besides, our decomposition method improves an existing KBQA system by 12% and sets a new state-of-the-art on LC-QuAD 1.0.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Xiang and Cheng, Sitao and Shu, Yiheng and Bao, Yuheng and Qu, Yuzhong}, year={2023}, month={Jun.}, pages={12924-12932} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26519/26291", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26519", + "pdf_size": 336183, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6554815469110666849&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25270", + "title": "RADIANT: Radar-Image Association Network for 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "As a direct depth sensor, radar holds promise as a tool to improve monocular 3D object detection, which suffers from depth errors, due in part to the depth-scale ambiguity. On the other hand, leveraging radar depths is hampered by difficulties in precisely associating radar returns with 3D estimates from monocular methods, effectively erasing its benefits. This paper proposes a fusion network that addresses this radar-camera association challenge. We train our network to predict the 3D offsets between radar returns and object centers, enabling radar depths to enhance the accuracy of 3D monocular detection. By using parallel radar and camera backbones, our network fuses information at both the feature level and detection level, while at the same time leveraging a state-of-the-art monocular detection technique without retraining it. Experimental results show significant improvement in mean average precision and translation error on the nuScenes dataset over monocular counterparts. Our source code is available at https://github.com/longyunf/radiant.", + "primary_area": "computer vision ii", + "author": "Yunfei Long; Abhinav Kumar; Daniel Morris; Xiaoming Liu; Marcos Castro; Punarjay Chakravarty", + "authorids": "", + "aff": "Michigan State University; Michigan State University; Michigan State University; Michigan State University; Ford Motor Company; Ford Motor Company", + "bibtex": "@article{Long_Kumar_Morris_Liu_Castro_Chakravarty_2023, title={RADIANT: Radar-Image Association Network for 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25270}, DOI={10.1609/aaai.v37i2.25270}, abstractNote={As a direct depth sensor, radar holds promise as a tool to improve monocular 3D object detection, which suffers from depth errors, due in part to the depth-scale ambiguity. On the other hand, leveraging radar depths is hampered by difficulties in precisely associating radar returns with 3D estimates from monocular methods, effectively erasing its benefits. This paper proposes a fusion network that addresses this radar-camera association challenge. We train our network to predict the 3D offsets between radar returns and object centers, enabling radar depths to enhance the accuracy of 3D monocular detection. By using parallel radar and camera backbones, our network fuses information at both the feature level and detection level, while at the same time leveraging a state-of-the-art monocular detection technique without retraining it. Experimental results show significant improvement in mean average precision and translation error on the nuScenes dataset over monocular counterparts. Our source code is available at https://github.com/longyunf/radiant.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Long, Yunfei and Kumar, Abhinav and Morris, Daniel and Liu, Xiaoming and Castro, Marcos and Chakravarty, Punarjay}, year={2023}, month={Jun.}, pages={1808-1816} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25270/25042", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25270", + "pdf_size": 5434077, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3460194917979163947&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "msu.edu;msu.edu;msu.edu;msu.edu;ford.com;ford.com", + "email": "msu.edu;msu.edu;msu.edu;msu.edu;ford.com;ford.com", + "github": "https://github.com/longyunf/radiant", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;1", + "aff_unique_norm": "Michigan State University;Ford Motor Company", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.msu.edu;https://www.ford.com", + "aff_unique_abbr": "MSU;Ford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25149", + "title": "RAFaRe: Learning Robust and Accurate Non-parametric 3D Face Reconstruction from Pseudo 2D&3D Pairs", + "track": "main", + "status": "Technical", + "abstract": "We propose a robust and accurate non-parametric method for single-view 3D face reconstruction (SVFR). While tremendous efforts have been devoted to parametric SVFR, a visible gap still lies between the result 3D shape and the ground truth. We believe there are two major obstacles: 1) the representation of the parametric model is limited to a certain face database; 2) 2D images and 3D shapes in the fitted datasets are distinctly misaligned. To resolve these issues, a large-scale pseudo 2D&3D dataset is created by first rendering the detailed 3D faces, then swapping the face in the wild images with the rendered face. These pseudo 2D&3D pairs are created from publicly available datasets which eliminate the gaps between 2D and 3D data while covering diverse appearances, poses, scenes, and illumination. We further propose a non-parametric scheme to learn a well-generalized SVFR model from the created dataset, and the proposed hierarchical signed distance function turns out to be effective in predicting middle-scale and small-scale 3D facial geometry. Our model outperforms previous methods on FaceScape-wild/lab and MICC benchmarks and is well generalized to various appearances, poses, expressions, and in-the-wild environments. The code is released at https://github.com/zhuhao-nju/rafare.", + "primary_area": "computer vision i", + "author": "Longwei Guo; Hao Zhu; Yuanxun Lu; Menghua Wu; Xun Cao", + "authorids": "", + "aff": "Nanjing University; Nanjing University; Nanjing University; Nanjing University; Nanjing University", + "bibtex": "@article{Guo_Zhu_Lu_Wu_Cao_2023, title={RAFaRe: Learning Robust and Accurate Non-parametric 3D Face Reconstruction from Pseudo 2D&3D Pairs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25149}, DOI={10.1609/aaai.v37i1.25149}, abstractNote={We propose a robust and accurate non-parametric method for single-view 3D face reconstruction (SVFR). While tremendous efforts have been devoted to parametric SVFR, a visible gap still lies between the result 3D shape and the ground truth. We believe there are two major obstacles: 1) the representation of the parametric model is limited to a certain face database; 2) 2D images and 3D shapes in the fitted datasets are distinctly misaligned. To resolve these issues, a large-scale pseudo 2D&3D dataset is created by first rendering the detailed 3D faces, then swapping the face in the wild images with the rendered face. These pseudo 2D&3D pairs are created from publicly available datasets which eliminate the gaps between 2D and 3D data while covering diverse appearances, poses, scenes, and illumination. We further propose a non-parametric scheme to learn a well-generalized SVFR model from the created dataset, and the proposed hierarchical signed distance function turns out to be effective in predicting middle-scale and small-scale 3D facial geometry. Our model outperforms previous methods on FaceScape-wild/lab and MICC benchmarks and is well generalized to various appearances, poses, expressions, and in-the-wild environments. The code is released at https://github.com/zhuhao-nju/rafare.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Longwei and Zhu, Hao and Lu, Yuanxun and Wu, Menghua and Cao, Xun}, year={2023}, month={Jun.}, pages={719-727} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25149/24921", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25149", + "pdf_size": 7298342, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7045791202621674531&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "github": "https://github.com/zhuhao-nju/rafare", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26741", + "title": "READ: Aggregating Reconstruction Error into Out-of-Distribution Detection", + "track": "aaai special track", + "status": "Technical", + "abstract": "Detecting out-of-distribution (OOD) samples is crucial to the safe deployment of a classifier in the real world. However, deep neural networks are known to be overconfident for abnormal data. Existing works directly design score function by mining the inconsistency from classifier for in-distribution (ID) and OOD. In this paper, we further complement this inconsistency with reconstruction error, based on the assumption that an autoencoder trained on ID data cannot reconstruct OOD as well as ID. We propose a novel method, READ (Reconstruction Error Aggregated Detector), to unify inconsistencies from classifier and autoencoder. Specifically, the reconstruction error of raw pixels is transformed to latent space of classifier. We show that the transformed reconstruction error bridges the semantic gap and inherits detection performance from the original. Moreover, we propose an adjustment strategy to alleviate the overconfidence problem of autoencoder according to a fine-grained characterization of OOD data. Under two scenarios of pre-training and retraining, we respectively present two variants of our method, namely READ-MD (Mahalanobis Distance) only based on pre-trained classifier and READ-ED (Euclidean Distance) which retrains the classifier. Our methods do not require access to test time OOD data for fine-tuning hyperparameters. Finally, we demonstrate the effectiveness of the proposed methods through extensive comparisons with state-of-the-art OOD detection algorithms. On a CIFAR-10 pre-trained WideResNet, our method reduces the average FPR@95TPR by up to 9.8% compared with previous state-of-the-art.", + "primary_area": "safe and robust ai", + "author": "Wenyu Jiang; Yuxin Ge; Hao Cheng; Mingcai Chen; Shuai Feng; Chongjun Wang", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Jiang_Ge_Cheng_Chen_Feng_Wang_2023, title={READ: Aggregating Reconstruction Error into Out-of-Distribution Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26741}, DOI={10.1609/aaai.v37i12.26741}, abstractNote={Detecting out-of-distribution (OOD) samples is crucial to the safe deployment of a classifier in the real world. However, deep neural networks are known to be overconfident for abnormal data. Existing works directly design score function by mining the inconsistency from classifier for in-distribution (ID) and OOD. In this paper, we further complement this inconsistency with reconstruction error, based on the assumption that an autoencoder trained on ID data cannot reconstruct OOD as well as ID. We propose a novel method, READ (Reconstruction Error Aggregated Detector), to unify inconsistencies from classifier and autoencoder. Specifically, the reconstruction error of raw pixels is transformed to latent space of classifier. We show that the transformed reconstruction error bridges the semantic gap and inherits detection performance from the original. Moreover, we propose an adjustment strategy to alleviate the overconfidence problem of autoencoder according to a fine-grained characterization of OOD data. Under two scenarios of pre-training and retraining, we respectively present two variants of our method, namely READ-MD (Mahalanobis Distance) only based on pre-trained classifier and READ-ED (Euclidean Distance) which retrains the classifier. Our methods do not require access to test time OOD data for fine-tuning hyperparameters. Finally, we demonstrate the effectiveness of the proposed methods through extensive comparisons with state-of-the-art OOD detection algorithms. On a CIFAR-10 pre-trained WideResNet, our method reduces the average FPR@95TPR by up to 9.8% compared with previous state-of-the-art.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Wenyu and Ge, Yuxin and Cheng, Hao and Chen, Mingcai and Feng, Shuai and Wang, Chongjun}, year={2023}, month={Jun.}, pages={14910-14918} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26741/26513", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26741", + "pdf_size": 489598, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1361200822714463877&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing University", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25238", + "title": "READ: Large-Scale Neural Scene Rendering for Autonomous Driving", + "track": "main", + "status": "Technical", + "abstract": "With the development of advanced driver assistance systems~(ADAS) and autonomous vehicles, conducting experiments in various scenarios becomes an urgent need. Although having been capable of synthesizing photo-realistic street scenes, conventional image-to-image translation methods cannot produce coherent scenes due to the lack of 3D information. In this paper, a large-scale neural rendering method is proposed to synthesize the autonomous driving scene~(READ), which makes it possible to generate large-scale driving scenes in real time on a PC through a variety of sampling schemes. In order to effectively represent driving scenarios, we propose an \u03c9-net rendering network to learn neural descriptors from sparse point clouds. Our model can not only synthesize photo-realistic driving scenes but also stitch and edit them. The promising experimental results show that our model performs well in large-scale driving scenarios.", + "primary_area": "computer vision ii", + "author": "Zhuopeng Li; Lu Li; Jianke Zhu", + "authorids": "", + "aff": "Zhejiang University, Zhejiang, China; Zhejiang University, Zhejiang, China; Zhejiang University, Zhejiang, China + Alibaba-Zhejiang University Joint Institute of Frontier Technologies", + "bibtex": "@article{Li_Li_Zhu_2023, title={READ: Large-Scale Neural Scene Rendering for Autonomous Driving}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25238}, DOI={10.1609/aaai.v37i2.25238}, abstractNote={With the development of advanced driver assistance systems~(ADAS) and autonomous vehicles, conducting experiments in various scenarios becomes an urgent need. Although having been capable of synthesizing photo-realistic street scenes, conventional image-to-image translation methods cannot produce coherent scenes due to the lack of 3D information. In this paper, a large-scale neural rendering method is proposed to synthesize the autonomous driving scene~(READ), which makes it possible to generate large-scale driving scenes in real time on a PC through a variety of sampling schemes. In order to effectively represent driving scenarios, we propose an \u03c9-net rendering network to learn neural descriptors from sparse point clouds. Our model can not only synthesize photo-realistic driving scenes but also stitch and edit them. The promising experimental results show that our model performs well in large-scale driving scenarios.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zhuopeng and Li, Lu and Zhu, Jianke}, year={2023}, month={Jun.}, pages={1522-1529} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25238/25010", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25238", + "pdf_size": 3681262, + "gs_citation": 68, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15051114477851171261&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26181", + "title": "REMIT: Reinforced Multi-Interest Transfer for Cross-Domain Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Cold-start problem is one of the most challenging problems for recommender systems. One promising solution to this problem is cross-domain recommendation (CDR) which leverages rich information from an auxiliary source domain to improve the performance of recommender system in the target domain. In particular, the family of embedding and mapping methods for CDR is very effective, which explicitly learn a mapping function from source embeddings to target embeddings to transfer user\u2019s preferences. Recent works usually transfer an overall source embedding by modeling a common or personalized preference bridge for all users. However, a unified user embedding cannot reflect the user\u2019s multiple interests in auxiliary source domain. In this paper, we propose a novel framework called reinforced multi-interest transfer for CDR (REMIT). Specifically, we first construct a heterogeneous information network and employ different meta-path based aggregations to get user\u2019s multiple interests in source domain, then transform different interest embeddings with different meta-generated personalized bridge functions for each user. To better coordinate the transformed user interest embeddings and the item embedding in target domain, we systematically develop a reinforced method to dynamically assign weights to transformed interests for different training instances and optimize the performance of target model. In addition, the REMIT is a general framework that can be applied upon various base models in target domain. Our extensive experimental results on large real-world datasets demonstrate the superior performance and compatibility of REMIT.", + "primary_area": "machine learning iii", + "author": "Caiqi Sun; Jiewei Gu; Binbin Hu; Xin Dong; Hai Li; Lei Cheng; Linjian Mo", + "authorids": "", + "aff": "Ant Group; Ant Group + School of Data Science, Fudan University; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group", + "bibtex": "@article{Sun_Gu_Hu_Dong_Li_Cheng_Mo_2023, title={REMIT: Reinforced Multi-Interest Transfer for Cross-Domain Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26181}, DOI={10.1609/aaai.v37i8.26181}, abstractNote={Cold-start problem is one of the most challenging problems for recommender systems. One promising solution to this problem is cross-domain recommendation (CDR) which leverages rich information from an auxiliary source domain to improve the performance of recommender system in the target domain. In particular, the family of embedding and mapping methods for CDR is very effective, which explicitly learn a mapping function from source embeddings to target embeddings to transfer user\u2019s preferences. Recent works usually transfer an overall source embedding by modeling a common or personalized preference bridge for all users. However, a unified user embedding cannot reflect the user\u2019s multiple interests in auxiliary source domain. In this paper, we propose a novel framework called reinforced multi-interest transfer for CDR (REMIT). Specifically, we first construct a heterogeneous information network and employ different meta-path based aggregations to get user\u2019s multiple interests in source domain, then transform different interest embeddings with different meta-generated personalized bridge functions for each user. To better coordinate the transformed user interest embeddings and the item embedding in target domain, we systematically develop a reinforced method to dynamically assign weights to transformed interests for different training instances and optimize the performance of target model. In addition, the REMIT is a general framework that can be applied upon various base models in target domain. Our extensive experimental results on large real-world datasets demonstrate the superior performance and compatibility of REMIT.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Caiqi and Gu, Jiewei and Hu, Binbin and Dong, Xin and Li, Hai and Cheng, Lei and Mo, Linjian}, year={2023}, month={Jun.}, pages={9900-9908} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26181/25953", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26181", + "pdf_size": 1179353, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8999686531337949048&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "email": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0+1;0;0;0;0;0", + "aff_unique_norm": "Ant Group;Fudan University", + "aff_unique_dep": ";School of Data Science", + "aff_unique_url": "https://www.antgroup.com;https://www.fudan.edu.cn", + "aff_unique_abbr": "Ant Group;Fudan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26535", + "title": "RESDSQL: Decoupling Schema Linking and Skeleton Parsing for Text-to-SQL", + "track": "main", + "status": "Technical", + "abstract": "One of the recent best attempts at Text-to-SQL is the pre-trained language model. Due to the structural property of the SQL queries, the seq2seq model takes the responsibility of parsing both the schema items (i.e., tables and columns) and the skeleton (i.e., SQL keywords). Such coupled targets increase the difficulty of parsing the correct SQL queries especially when they involve many schema items and logic operators. This paper proposes a ranking-enhanced encoding and skeleton-aware decoding framework to decouple the schema linking and the skeleton parsing. Specifically, for a seq2seq encoder-decode model, its encoder is injected by the most relevant schema items instead of the whole unordered ones, which could alleviate the schema linking effort during SQL parsing, and its decoder first generates the skeleton and then the actual SQL query, which could implicitly constrain the SQL parsing. We evaluate our proposed framework on Spider and its three robustness variants: Spider-DK, Spider-Syn, and Spider-Realistic. The experimental results show that our framework delivers promising performance and robustness. Our code is available at https://github.com/RUCKBReasoning/RESDSQL.", + "primary_area": "speech natural language processing", + "author": "Haoyang Li; Jing Zhang; Cuiping Li; Hong Chen", + "authorids": "", + "aff": "Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China + Engineering Research Center of Ministry of Education on Database and BI + Information School, Renmin University of China; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China + Engineering Research Center of Ministry of Education on Database and BI + Information School, Renmin University of China; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China + Engineering Research Center of Ministry of Education on Database and BI + Information School, Renmin University of China; Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education, Renmin University of China + Engineering Research Center of Ministry of Education on Database and BI + Information School, Renmin University of China", + "bibtex": "@article{Li_Zhang_Li_Chen_2023, title={RESDSQL: Decoupling Schema Linking and Skeleton Parsing for Text-to-SQL}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26535}, DOI={10.1609/aaai.v37i11.26535}, abstractNote={One of the recent best attempts at Text-to-SQL is the pre-trained language model. Due to the structural property of the SQL queries, the seq2seq model takes the responsibility of parsing both the schema items (i.e., tables and columns) and the skeleton (i.e., SQL keywords). Such coupled targets increase the difficulty of parsing the correct SQL queries especially when they involve many schema items and logic operators. This paper proposes a ranking-enhanced encoding and skeleton-aware decoding framework to decouple the schema linking and the skeleton parsing. Specifically, for a seq2seq encoder-decode model, its encoder is injected by the most relevant schema items instead of the whole unordered ones, which could alleviate the schema linking effort during SQL parsing, and its decoder first generates the skeleton and then the actual SQL query, which could implicitly constrain the SQL parsing. We evaluate our proposed framework on Spider and its three robustness variants: Spider-DK, Spider-Syn, and Spider-Realistic. The experimental results show that our framework delivers promising performance and robustness. Our code is available at https://github.com/RUCKBReasoning/RESDSQL.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Haoyang and Zhang, Jing and Li, Cuiping and Chen, Hong}, year={2023}, month={Jun.}, pages={13067-13075} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26535/26307", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26535", + "pdf_size": 251134, + "gs_citation": 181, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14083781038971654954&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/RUCKBReasoning/RESDSQL", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+0;0+1+0;0+1+0;0+1+0", + "aff_unique_norm": "Renmin University of China;Engineering Research Center of Ministry of Education", + "aff_unique_dep": "Key Laboratory of Data Engineering and Knowledge Engineering;Database and BI", + "aff_unique_url": "http://www.ruc.edu.cn;", + "aff_unique_abbr": "RUC;", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25813", + "title": "RETRACTED: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning", + "track": "main", + "status": "Technical", + "abstract": "", + "primary_area": "knowledge representation and reasoning", + "author": "Daoming Zong; Shiliang Sun", + "authorids": "", + "aff": "School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China", + "bibtex": "@article{Zong_Sun_2023, title={RETRACTED: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25813}, DOI={10.1609/aaai.v37i5.25813}, abstractNote={<p>Referred to by: <a href="https://doi.org/10.1609/aaai.v37i13.27728">Retraction Note to: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning.</a></p>\n<p>This article, which was published in Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence (AAAI 2023), has been retracted by agreement between the authors and the journal.</p>}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zong, Daoming and Sun, Shiliang}, year={2023}, month={Jun.}, pages={6621-6629} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25813/25585", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25813", + "pdf_size": 6030012, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7377975443496365677&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;cs.ecnu.edu.cn", + "email": "gmail.com;cs.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27017", + "title": "RFC-Net: Learning High Resolution Global Features for Medical Image Segmentation on a Computational Budget (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Learning High-Resolution representations is essential for semantic segmentation. Convolutional neural network (CNN) architectures with downstream and upstream propagation flow are popular for segmentation in medical diagnosis. However, due to performing spatial downsampling and upsampling in multiple stages, information loss is inexorable. On the contrary, connecting layers densely on high spatial resolution is computationally expensive. In this work, we devise a Loose Dense Connection Strategy to connect neurons in subsequent layers with reduced parameters. On top of that, using a m-way Tree structure for feature propagation we propose Receptive Field Chain Network (RFC-Net) that learns high-resolution global features on a compressed computational space. Our experiments demonstrates that RFC Net achieves state-of-the-art performance on Kvasir and CVC-ClinicDB benchmarks for Polyp segmentation. Our code is publicly available at github.com/sourajitcs/RFC-NetAAAI23.", + "primary_area": "", + "author": "Sourajit Saha; Shaswati Saha; Md Osman Gani; Tim Oates; David Chapman", + "authorids": "", + "aff": "Department of Computer Science and Electrical Engineering, University of Maryland Baltimore County; Department of Information Systems, University of Maryland Baltimore County; Department of Information Systems, University of Maryland Baltimore County; Department of Computer Science and Electrical Engineering, University of Maryland Baltimore County; Department of Computer Science, University of Miami", + "bibtex": "@article{Saha_Saha_Gani_Oates_Chapman_2024, title={RFC-Net: Learning High Resolution Global Features for Medical Image Segmentation on a Computational Budget (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27017}, DOI={10.1609/aaai.v37i13.27017}, abstractNote={Learning High-Resolution representations is essential for semantic segmentation. Convolutional neural network (CNN) architectures with downstream and upstream propagation flow are popular for segmentation in medical diagnosis. However, due to performing spatial downsampling and upsampling in multiple stages, information loss is inexorable. On the contrary, connecting layers densely on high spatial resolution is computationally expensive. In this work, we devise a Loose Dense Connection Strategy to connect neurons in subsequent layers with reduced parameters. On top of that, using a m-way Tree structure for feature propagation we propose Receptive Field Chain Network (RFC-Net) that learns high-resolution global features on a compressed computational space. Our experiments demonstrates that RFC Net achieves state-of-the-art performance on Kvasir and CVC-ClinicDB benchmarks for Polyp segmentation. Our code is publicly available at github.com/sourajitcs/RFC-NetAAAI23.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Saha, Sourajit and Saha, Shaswati and Gani, Md Osman and Oates, Tim and Chapman, David}, year={2024}, month={Jul.}, pages={16314-16315} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27017/26789", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27017", + "pdf_size": 2144259, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:FzTQA-W-XVoJ:scholar.google.com/&scioq=RFC-Net:+Learning+High+Resolution+Global+Features+for+Medical+Image+Segmentation+on+a+Computational+Budget+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "umbc.edu;umbc.edu;umbc.edu;umbc.edu;cs.miami.edu", + "email": "umbc.edu;umbc.edu;umbc.edu;umbc.edu;cs.miami.edu", + "github": "github.com/sourajitcs/RFC-NetAAAI23", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "University of Maryland, Baltimore County;University of Miami", + "aff_unique_dep": "Department of Computer Science and Electrical Engineering;Department of Computer Science", + "aff_unique_url": "https://www.umbc.edu;https://www.miami.edu", + "aff_unique_abbr": "UMBC;UM", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Baltimore County;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25500", + "title": "RGBD1K: A Large-Scale Dataset and Benchmark for RGB-D Object Tracking", + "track": "main", + "status": "Technical", + "abstract": "RGB-D object tracking has attracted considerable attention recently, achieving promising performance thanks to the symbiosis between visual and depth channels. However, given a limited amount of annotated RGB-D tracking data, most state-of-the-art RGB-D trackers are simple extensions of high-performance RGB-only trackers, without fully exploiting the underlying potential of the depth channel in the offline training stage. To address the dataset deficiency issue, a new RGB-D dataset named RGBD1K is released in this paper. The RGBD1K contains 1,050 sequences with about 2.5M frames in total. To demonstrate the benefits of training on a larger RGB-D data set in general, and RGBD1K in particular, we develop a transformer-based RGB-D tracker, named SPT, as a baseline for future visual object tracking studies using the new dataset. The results, of extensive experiments using the SPT tracker demonstrate the potential of the RGBD1K dataset to improve the performance of RGB-D tracking, inspiring future developments of effective tracker designs. The dataset and codes will be available on the project homepage: https://github.com/xuefeng-zhu5/RGBD1K.", + "primary_area": "computer vision iii", + "author": "Xue-Feng Zhu; Tianyang Xu; Zhangyong Tang; Zucheng Wu; Haodong Liu; Xiao Yang; Xiao-Jun Wu; Josef Kittler", + "authorids": "", + "aff": "School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, Jiangsu, P.R. China; Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH, UK", + "bibtex": "@article{Zhu_Xu_Tang_Wu_Liu_Yang_Wu_Kittler_2023, title={RGBD1K: A Large-Scale Dataset and Benchmark for RGB-D Object Tracking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25500}, DOI={10.1609/aaai.v37i3.25500}, abstractNote={RGB-D object tracking has attracted considerable attention recently, achieving promising performance thanks to the symbiosis between visual and depth channels. However, given a limited amount of annotated RGB-D tracking data, most state-of-the-art RGB-D trackers are simple extensions of high-performance RGB-only trackers, without fully exploiting the underlying potential of the depth channel in the offline training stage. To address the dataset deficiency issue, a new RGB-D dataset named RGBD1K is released in this paper. The RGBD1K contains 1,050 sequences with about 2.5M frames in total. To demonstrate the benefits of training on a larger RGB-D data set in general, and RGBD1K in particular, we develop a transformer-based RGB-D tracker, named SPT, as a baseline for future visual object tracking studies using the new dataset. The results, of extensive experiments using the SPT tracker demonstrate the potential of the RGBD1K dataset to improve the performance of RGB-D tracking, inspiring future developments of effective tracker designs. The dataset and codes will be available on the project homepage: https://github.com/xuefeng-zhu5/RGBD1K.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Xue-Feng and Xu, Tianyang and Tang, Zhangyong and Wu, Zucheng and Liu, Haodong and Yang, Xiao and Wu, Xiao-Jun and Kittler, Josef}, year={2023}, month={Jun.}, pages={3870-3878} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25500/25272", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25500", + "pdf_size": 1385822, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11525424741193331566&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff_domain": "163.com;jiangnan.edu.cn;jiangnan.edu.cn;surrey.ac.uk; ; ; ; ", + "email": "163.com;jiangnan.edu.cn;jiangnan.edu.cn;surrey.ac.uk; ; ; ; ", + "github": "https://github.com/xuefeng-zhu5/RGBD1K", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;1", + "aff_unique_norm": "Jiangnan University;University of Surrey", + "aff_unique_dep": "School of Artificial Intelligence and Computer Science;Centre for Vision, Speech and Signal Processing", + "aff_unique_url": "https://www.jiangnan.edu.cn;https://www.surrey.ac.uk", + "aff_unique_abbr": "JNU;Surrey", + "aff_campus_unique_index": "0;0;0;0;0;0;0;1", + "aff_campus_unique": "Wuxi;Guildford", + "aff_country_unique_index": "0;0;0;0;0;0;0;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26577", + "title": "RINK: Reader-Inherited Evidence Reranker for Table-and-Text Open Domain Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Most approaches used in open-domain question answering on hybrid data that comprises both tabular-and-textual contents are based on a Retrieval-Reader pipeline in which the retrieval module finds relevant \u2028\u201cheterogenous\u201d evidence for a given question and the reader module generates an answer from the retrieved evidence. In this paper, we present a Retriever-Reranker-Reader framework by newly proposing a Reader-INherited evidence reranKer (RINK) where a reranker module is designed by finetuning the reader\u2019s neural architecture based on a simple prompting method. Our underlying assumption of reusing the reader\u2019s module for the reranker is that the reader\u2019s ability to generating an answer from evidence contains the knowledge required for the reranking, because the reranker needs to \u201cread\u201d in-depth a question and evidences more carefully and elaborately than a baseline retriever. Furthermore, we present a simple and effective pretraining method by extensively deploying the commonly used data augmentation methods of cell corruption and cell reordering based on the pretraining tasks - tabular-and-textual entailment and cross-modal masked language modeling. Experimental results on OTT-QA, a large-scale table-and-text open-domain question answering dataset, show that the proposed RINK armed with our pretraining procedure makes improvements over the baseline reranking method and leads to state-of-the-art performance.", + "primary_area": "speech natural language processing", + "author": "Eunhwan Park; Sung-Min Lee; Dearyong Seo; Seonhoon Kim; Inho Kang; Seung-Hoon Na", + "authorids": "", + "aff": "Jeonbuk National University; Jeonbuk National University; Naver Corporation; Coupang; Naver Corporation; Jeonbuk National University", + "bibtex": "@article{Park_Lee_Seo_Kim_Kang_Na_2023, title={RINK: Reader-Inherited Evidence Reranker for Table-and-Text Open Domain Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26577}, DOI={10.1609/aaai.v37i11.26577}, abstractNote={Most approaches used in open-domain question answering on hybrid data that comprises both tabular-and-textual contents are based on a Retrieval-Reader pipeline in which the retrieval module finds relevant \u2028\u201cheterogenous\u201d evidence for a given question and the reader module generates an answer from the retrieved evidence. In this paper, we present a Retriever-Reranker-Reader framework by newly proposing a Reader-INherited evidence reranKer (RINK) where a reranker module is designed by finetuning the reader\u2019s neural architecture based on a simple prompting method. Our underlying assumption of reusing the reader\u2019s module for the reranker is that the reader\u2019s ability to generating an answer from evidence contains the knowledge required for the reranking, because the reranker needs to \u201cread\u201d in-depth a question and evidences more carefully and elaborately than a baseline retriever. Furthermore, we present a simple and effective pretraining method by extensively deploying the commonly used data augmentation methods of cell corruption and cell reordering based on the pretraining tasks - tabular-and-textual entailment and cross-modal masked language modeling. Experimental results on OTT-QA, a large-scale table-and-text open-domain question answering dataset, show that the proposed RINK armed with our pretraining procedure makes improvements over the baseline reranking method and leads to state-of-the-art performance.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Park, Eunhwan and Lee, Sung-Min and Seo, Dearyong and Kim, Seonhoon and Kang, Inho and Na, Seung-Hoon}, year={2023}, month={Jun.}, pages={13446-13456} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26577/26349", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26577", + "pdf_size": 361902, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10133693090699127908&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "jbnu.ac.kr;jbnu.ac.kr;navercorp.com;coupang.com;navercorp.com;jbnu.ac.kr", + "email": "jbnu.ac.kr;jbnu.ac.kr;navercorp.com;coupang.com;navercorp.com;jbnu.ac.kr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;1;0", + "aff_unique_norm": "Jeonbuk National University;Naver Corporation;Coupang", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.jbnu.ac.kr;https://www.naver.com;https://www.coupang.com", + "aff_unique_abbr": "JBNU;Naver;Coupang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25957", + "title": "RLEKF: An Optimizer for Deep Potential with Ab Initio Accuracy", + "track": "main", + "status": "Technical", + "abstract": "It is imperative to accelerate the training of neural network force field such as Deep Potential, which usually requires thousands of images based on first-principles calculation and a couple of days to generate an accurate potential energy surface. To this end, we propose a novel optimizer named reorganized layer extended Kalman filtering (RLEKF), an optimized version of global extended Kalman filtering (GEKF) with a strategy of splitting big and gathering small layers to overcome the O(N^2) computational cost of GEKF. This strategy provides an approximation of the dense weights error covariance matrix with a sparse diagonal block matrix for GEKF. We implement both RLEKF and the baseline Adam in our alphaDynamics package and numerical experiments are performed on 13 unbiased datasets. Overall, RLEKF converges faster with slightly better accuracy. For example, a test on a typical system, bulk copper, shows that RLEKF converges faster by both the number of training epochs (x11.67) and wall-clock time (x1.19). Besides, we theoretically prove that the updates of weights converge and thus are against the gradient exploding problem. Experimental results verify that RLEKF is not sensitive to the initialization of weights. The RLEKF sheds light on other AI-for-science applications where training a large neural network (with tons of thousands parameters) is a bottleneck.", + "primary_area": "machine learning ii", + "author": "Siyu Hu; Wentao Zhang; Qiuchen Sha; Feng Pan; Lin-Wang Wang; Weile Jia; Guangming Tan; Tong Zhao", + "authorids": "", + "aff": "State Key Lab of Processors, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; School of Advanced Materials, Shenzhen Graduate School, Peking University, Shenzhen, China; State Key Lab of Processors, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; School of Advanced Materials, Shenzhen Graduate School, Peking University, Shenzhen, China; Institute of Semiconductors, Chinese Academy of Sciences, Beijing, China; State Key Lab of Processors, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; State Key Lab of Processors, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; State Key Lab of Processors, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Hu_Zhang_Sha_Pan_Wang_Jia_Tan_Zhao_2023, title={RLEKF: An Optimizer for Deep Potential with Ab Initio Accuracy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25957}, DOI={10.1609/aaai.v37i7.25957}, abstractNote={It is imperative to accelerate the training of neural network force field such as Deep Potential, which usually requires thousands of images based on first-principles calculation and a couple of days to generate an accurate potential energy surface. To this end, we propose a novel optimizer named reorganized layer extended Kalman filtering (RLEKF), an optimized version of global extended Kalman filtering (GEKF) with a strategy of splitting big and gathering small layers to overcome the O(N^2) computational cost of GEKF. This strategy provides an approximation of the dense weights error covariance matrix with a sparse diagonal block matrix for GEKF. We implement both RLEKF and the baseline Adam in our alphaDynamics package and numerical experiments are performed on 13 unbiased datasets. Overall, RLEKF converges faster with slightly better accuracy. For example, a test on a typical system, bulk copper, shows that RLEKF converges faster by both the number of training epochs (x11.67) and wall-clock time (x1.19). Besides, we theoretically prove that the updates of weights converge and thus are against the gradient exploding problem. Experimental results verify that RLEKF is not sensitive to the initialization of weights. The RLEKF sheds light on other AI-for-science applications where training a large neural network (with tons of thousands parameters) is a bottleneck.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Siyu and Zhang, Wentao and Sha, Qiuchen and Pan, Feng and Wang, Lin-Wang and Jia, Weile and Tan, Guangming and Zhao, Tong}, year={2023}, month={Jun.}, pages={7910-7918} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25957/25729", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25957", + "pdf_size": 2612737, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1862536943995742236&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ict.ac.cn;pku.edu.cn;mails.ucas.ac.cn;pkusz.edu.cn;semi.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;pku.edu.cn;mails.ucas.ac.cn;pkusz.edu.cn;semi.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;0+1;2;0;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Peking University", + "aff_unique_dep": "Institute of Computing Technology;;School of Advanced Materials", + "aff_unique_url": "http://www.ict.ac.cn;http://www.ucas.ac.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "CAS;UCAS;PKU", + "aff_campus_unique_index": "0+0;1;0+0;1;0;0;0;0", + "aff_campus_unique": "Beijing;Shenzhen", + "aff_country_unique_index": "0+0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25467", + "title": "RLogist: Fast Observation Strategy on Whole-Slide Images with Deep Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Whole-slide images (WSI) in computational pathology have high resolution with gigapixel size, but are generally with sparse regions of interest, which leads to weak diagnostic relevance and data inefficiency for each area in the slide. Most of the existing methods rely on a multiple instance learning framework that requires densely sampling local patches at high magnification. The limitation is evident in the application stage as the heavy computation for extracting patch-level features is inevitable. In this paper, we develop RLogist, a benchmarking deep reinforcement learning (DRL) method for fast observation strategy on WSIs. Imitating the diagnostic logic of human pathologists, our RL agent learns how to find regions of observation value and obtain representative features across multiple resolution levels, without having to analyze each part of the WSI at the high magnification. We benchmark our method on two whole-slide level classification tasks, including detection of metastases in WSIs of lymph node sections, and subtyping of lung cancer. Experimental results demonstrate that RLogist achieves competitive classification performance compared to typical multiple instance learning algorithms, while having a significantly short observation path. In addition, the observation path given by RLogist provides good decision-making interpretability, and its ability of reading path navigation can potentially be used by pathologists for educational/assistive purposes. Our code is available at: https://github.com/tencent-ailab/RLogist.", + "primary_area": "computer vision iii", + "author": "Boxuan Zhao; Jun Zhang; Deheng Ye; Jian Cao; Xiao Han; Qiang Fu; Wei Yang", + "authorids": "", + "aff": "Tencent AI Lab + Shanghai Jiao Tong University; Tencent AI Lab; Tencent AI Lab; Shanghai Jiao Tong University; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab", + "bibtex": "@article{Zhao_Zhang_Ye_Cao_Han_Fu_Yang_2023, title={RLogist: Fast Observation Strategy on Whole-Slide Images with Deep Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25467}, DOI={10.1609/aaai.v37i3.25467}, abstractNote={Whole-slide images (WSI) in computational pathology have high resolution with gigapixel size, but are generally with sparse regions of interest, which leads to weak diagnostic relevance and data inefficiency for each area in the slide. Most of the existing methods rely on a multiple instance learning framework that requires densely sampling local patches at high magnification. The limitation is evident in the application stage as the heavy computation for extracting patch-level features is inevitable. In this paper, we develop RLogist, a benchmarking deep reinforcement learning (DRL) method for fast observation strategy on WSIs. Imitating the diagnostic logic of human pathologists, our RL agent learns how to find regions of observation value and obtain representative features across multiple resolution levels, without having to analyze each part of the WSI at the high magnification. We benchmark our method on two whole-slide level classification tasks, including detection of metastases in WSIs of lymph node sections, and subtyping of lung cancer. Experimental results demonstrate that RLogist achieves competitive classification performance compared to typical multiple instance learning algorithms, while having a significantly short observation path. In addition, the observation path given by RLogist provides good decision-making interpretability, and its ability of reading path navigation can potentially be used by pathologists for educational/assistive purposes. Our code is available at: https://github.com/tencent-ailab/RLogist.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Boxuan and Zhang, Jun and Ye, Deheng and Cao, Jian and Han, Xiao and Fu, Qiang and Yang, Wei}, year={2023}, month={Jun.}, pages={3570-3578} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25467/25239", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25467", + "pdf_size": 6085090, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7543077765066577301&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn;tencent.com;tencent.com;sjtu.edu.cn;tencent.com;tencent.com;tencent.com", + "email": "sjtu.edu.cn;tencent.com;tencent.com;sjtu.edu.cn;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/tencent-ailab/RLogist", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;1;0;0;0", + "aff_unique_norm": "Tencent;Shanghai Jiao Tong University", + "aff_unique_dep": "Tencent AI Lab;", + "aff_unique_url": "https://ai.tencent.com;https://www.sjtu.edu.cn", + "aff_unique_abbr": "Tencent AI Lab;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25401", + "title": "ROIFormer: Semantic-Aware Region of Interest Transformer for Efficient Self-Supervised Monocular Depth Estimation", + "track": "main", + "status": "Technical", + "abstract": "The exploration of mutual-benefit cross-domains has shown great potential toward accurate self-supervised depth estimation. In this work, we revisit feature fusion between depth and semantic information and propose an efficient local adaptive attention method for geometric aware representation enhancement. Instead of building global connections or deforming attention across the feature space without restraint, we bound the spatial interaction within a learnable region of interest. In particular, we leverage geometric cues from semantic information to learn local adaptive bounding boxes to guide unsupervised feature aggregation. The local areas preclude most irrelevant reference points from attention space, yielding more selective feature learning and faster convergence. We naturally extend the paradigm into a multi-head and hierarchic way to enable the information distillation in different semantic levels and improve the feature discriminative ability for fine-grained depth estimation. Extensive experiments on the KITTI dataset show that our proposed method establishes a new state-of-the-art in self-supervised monocular depth estimation task, demonstrating the effectiveness of our approach over former Transformer variants.", + "primary_area": "computer vision iii", + "author": "Daitao Xing; Jinglin Shen; Chiuman Ho; Anthony Tzes", + "authorids": "", + "aff": "New York University, USA; OPPO US Research Center, USA; OPPO US Research Center, USA; New York University Abu Dhabi and Center for Artificial Intelligence and Robotics, UAE", + "bibtex": "@article{Xing_Shen_Ho_Tzes_2023, title={ROIFormer: Semantic-Aware Region of Interest Transformer for Efficient Self-Supervised Monocular Depth Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25401}, DOI={10.1609/aaai.v37i3.25401}, abstractNote={The exploration of mutual-benefit cross-domains has shown great potential toward accurate self-supervised depth estimation. In this work, we revisit feature fusion between depth and semantic information and propose an efficient local adaptive attention method for geometric aware representation enhancement. Instead of building global connections or deforming attention across the feature space without restraint, we bound the spatial interaction within a learnable region of interest. In particular, we leverage geometric cues from semantic information to learn local adaptive bounding boxes to guide unsupervised feature aggregation. The local areas preclude most irrelevant reference points from attention space, yielding more selective feature learning and faster convergence. We naturally extend the paradigm into a multi-head and hierarchic way to enable the information distillation in different semantic levels and improve the feature discriminative ability for fine-grained depth estimation. Extensive experiments on the KITTI dataset show that our proposed method establishes a new state-of-the-art in self-supervised monocular depth estimation task, demonstrating the effectiveness of our approach over former Transformer variants.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xing, Daitao and Shen, Jinglin and Ho, Chiuman and Tzes, Anthony}, year={2023}, month={Jun.}, pages={2983-2991} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25401/25173", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25401", + "pdf_size": 12485208, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6037472664930312184&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "nyu.edu; jinglin.shen;oppo.com;nyu.edu", + "email": "nyu.edu; jinglin.shen;oppo.com;nyu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "New York University;OPPO US Research Center;New York University Abu Dhabi", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nyu.edu;;https://nyuad.nyu.edu", + "aff_unique_abbr": "NYU;;NYU Abu Dhabi", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Abu Dhabi", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "article-26483", + "title": "RPA: Reasoning Path Augmentation in Iterative Retrieving for Multi-Hop QA", + "track": "main", + "status": "Technical", + "abstract": "Multi-hop questions are associated with a series of justifications, and one needs to obtain the answers by following the reasoning path (RP) that orders the justifications adequately. So reasoning path retrieval becomes a critical preliminary stage for multi-hop Question Answering (QA). Within the RP, two fundamental challenges emerge for better performance: (i) what the order of the justifications in the RP should be, and (ii) what if the wrong justification has been in the path. In this paper, we propose Reasoning Path Augmentation (RPA), which uses reasoning path reordering and augmentation to handle the above two challenges, respectively. Reasoning path reordering restructures the reasoning by targeting the easier justification first but difficult one later, in which the difficulty is determined by the overlap between query and justifications since the higher overlap means more lexical relevance and easier searchable. Reasoning path augmentation automatically generates artificial RPs, in which the distracted justifications are inserted to aid the model recover from the wrong justification. We build RPA with a naive pre-trained model and evaluate RPA on the QASC and MultiRC datasets. The evaluation results demonstrate that RPA outperforms previously published reasoning path retrieval methods, showing the effectiveness of the proposed methods. Moreover, we present detailed experiments on how the orders of justifications and the percent of augmented paths affect the question- answering performance, revealing the importance of polishing RPs and the necessity of augmentation.", + "primary_area": "speech natural language processing", + "author": "Ziyi Cao; Bingquan Liu; Shaobo Li", + "authorids": "", + "aff": "Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology", + "bibtex": "@article{Cao_Liu_Li_2023, title={RPA: Reasoning Path Augmentation in Iterative Retrieving for Multi-Hop QA}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26483}, DOI={10.1609/aaai.v37i11.26483}, abstractNote={Multi-hop questions are associated with a series of justifications, and one needs to obtain the answers by following the reasoning path (RP) that orders the justifications adequately. So reasoning path retrieval becomes a critical preliminary stage for multi-hop Question Answering (QA). Within the RP, two fundamental challenges emerge for better performance: (i) what the order of the justifications in the RP should be, and (ii) what if the wrong justification has been in the path. In this paper, we propose Reasoning Path Augmentation (RPA), which uses reasoning path reordering and augmentation to handle the above two challenges, respectively. Reasoning path reordering restructures the reasoning by targeting the easier justification first but difficult one later, in which the difficulty is determined by the overlap between query and justifications since the higher overlap means more lexical relevance and easier searchable. Reasoning path augmentation automatically generates artificial RPs, in which the distracted justifications are inserted to aid the model recover from the wrong justification. We build RPA with a naive pre-trained model and evaluate RPA on the QASC and MultiRC datasets. The evaluation results demonstrate that RPA outperforms previously published reasoning path retrieval methods, showing the effectiveness of the proposed methods. Moreover, we present detailed experiments on how the orders of justifications and the percent of augmented paths affect the question- answering performance, revealing the importance of polishing RPs and the necessity of augmentation.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Ziyi and Liu, Bingquan and Li, Shaobo}, year={2023}, month={Jun.}, pages={12598-12606} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26483/26255", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26483", + "pdf_size": 2274209, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5560615500986689279&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.hit.edu.cn;hit.edu.cn;insun.hit.edu.cn", + "email": "stu.hit.edu.cn;hit.edu.cn;insun.hit.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Harbin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25482", + "title": "RSPT: Reconstruct Surroundings and Predict Trajectory for Generalizable Active Object Tracking", + "track": "main", + "status": "Technical", + "abstract": "Active Object Tracking (AOT) aims to maintain a specific relation between the tracker and object(s) by autonomously controlling the motion system of a tracker given observations. It is widely used in various applications such as mobile robots and autonomous driving. However, Building a generalizable active tracker that works robustly across various scenarios remains a challenge, particularly in unstructured environments with cluttered obstacles and diverse layouts. To realize this, we argue that the key is to construct a state representation that can model the geometry structure of the surroundings and the dynamics of the target. To this end, we propose a framework called RSPT to form a structure-aware motion representation by Reconstructing Surroundings and Predicting the target Trajectory. Moreover, we further enhance the generalization of the policy network by training in the asymmetric dueling mechanism. Empirical results show that RSPT outperforms existing methods in unseen environments, especially those with cluttered obstacles and diverse layouts. We also demonstrate good sim-to-real transfer when deploying RSPT in real-world scenarios.", + "primary_area": "computer vision iii", + "author": "Fangwei Zhong; Xiao Bi; Yudi Zhang; Wei Zhang; Yizhou Wang", + "authorids": "", + "aff": "Sch\u2019l of Intelligence Science and Technology, Peking University; Nat\u2019l Key Lab. of GAI, Beijing Institute for General Artificial Intelligence (BIGAI); Center on Frontiers of Computing Studies, Sch\u2019l of Computer Science, Inst. for Artificial Intelligence, Peking University; Sch\u2019l of Control Science and Engineering, Shandong University; Sch\u2019l of Info. Eng., Zhengzhou University", + "bibtex": "@article{Zhong_Bi_Zhang_Zhang_Wang_2023, title={RSPT: Reconstruct Surroundings and Predict Trajectory for Generalizable Active Object Tracking}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25482}, DOI={10.1609/aaai.v37i3.25482}, abstractNote={Active Object Tracking (AOT) aims to maintain a specific relation between the tracker and object(s) by autonomously controlling the motion system of a tracker given observations. It is widely used in various applications such as mobile robots and autonomous driving. However, Building a generalizable active tracker that works robustly across various scenarios remains a challenge, particularly in unstructured environments with cluttered obstacles and diverse layouts. To realize this, we argue that the key is to construct a state representation that can model the geometry structure of the surroundings and the dynamics of the target. To this end, we propose a framework called RSPT to form a structure-aware motion representation by Reconstructing Surroundings and Predicting the target Trajectory. Moreover, we further enhance the generalization of the policy network by training in the asymmetric dueling mechanism. Empirical results show that RSPT outperforms existing methods in unseen environments, especially those with cluttered obstacles and diverse layouts. We also demonstrate good sim-to-real transfer when deploying RSPT in real-world scenarios.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Fangwei and Bi, Xiao and Zhang, Yudi and Zhang, Wei and Wang, Yizhou}, year={2023}, month={Jun.}, pages={3705-3714} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25482/25254", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25482", + "pdf_size": 7894124, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7520295733865321246&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;mail.sdu.edu.cn;sdu.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;mail.sdu.edu.cn;sdu.edu.cn", + "github": "", + "project": "https://sites.google.com/view/aot-rspt", + "author_num": 5, + "aff_unique_index": "0;1;0;2;3", + "aff_unique_norm": "Peking University;Beijing Institute for General Artificial Intelligence;Shandong University;Zhengzhou University", + "aff_unique_dep": "School of Intelligence Science and Technology;Nat\u2019l Key Lab. of GAI;School of Control Science and Engineering;School of Information Engineering", + "aff_unique_url": "http://www.pku.edu.cn;http://www.bigmodel.cn/;http://www.sdu.edu.cn;http://www.zzu.edu.cn", + "aff_unique_abbr": "PKU;BIGAI;SDU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26575", + "title": "RWEN-TTS: Relation-Aware Word Encoding Network for Natural Text-to-Speech Synthesis", + "track": "main", + "status": "Technical", + "abstract": "With the advent of deep learning, a huge number of text-to-speech (TTS) models which produce human-like speech have emerged. Recently, by introducing syntactic and semantic information w.r.t the input text, various approaches have been proposed to enrich the naturalness and expressiveness of TTS models. Although these strategies showed impressive results, they still have some limitations in utilizing language information. First, most approaches only use graph networks to utilize syntactic and semantic information without considering linguistic features. Second, most previous works do not explicitly consider adjacent words when encoding syntactic and semantic information, even though it is obvious that adjacent words are usually meaningful when encoding the current word. To address these issues, we propose Relation-aware Word Encoding Network (RWEN), which effectively allows syntactic and semantic information based on two modules (i.e., Semantic-level Relation Encoding and Adjacent Word Relation Encoding). Experimental results show substantial improvements compared to previous works.", + "primary_area": "speech natural language processing", + "author": "Shinhyeok Oh; HyeongRae Noh; Yoonseok Hong; Insoo Oh", + "authorids": "", + "aff": "Netmarble AI Center; Netmarble AI Center; Netmarble AI Center; Netmarble AI Center", + "bibtex": "@article{Oh_Noh_Hong_Oh_2023, title={RWEN-TTS: Relation-Aware Word Encoding Network for Natural Text-to-Speech Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26575}, DOI={10.1609/aaai.v37i11.26575}, abstractNote={With the advent of deep learning, a huge number of text-to-speech (TTS) models which produce human-like speech have emerged. Recently, by introducing syntactic and semantic information w.r.t the input text, various approaches have been proposed to enrich the naturalness and expressiveness of TTS models. Although these strategies showed impressive results, they still have some limitations in utilizing language information. First, most approaches only use graph networks to utilize syntactic and semantic information without considering linguistic features. Second, most previous works do not explicitly consider adjacent words when encoding syntactic and semantic information, even though it is obvious that adjacent words are usually meaningful when encoding the current word. To address these issues, we propose Relation-aware Word Encoding Network (RWEN), which effectively allows syntactic and semantic information based on two modules (i.e., Semantic-level Relation Encoding and Adjacent Word Relation Encoding). Experimental results show substantial improvements compared to previous works.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Oh, Shinhyeok and Noh, HyeongRae and Hong, Yoonseok and Oh, Insoo}, year={2023}, month={Jun.}, pages={13428-13436} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26575/26347", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26575", + "pdf_size": 299237, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14496760107727571557&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "netmarble.com;netmarble.com;netmarble.com;netmarble.com", + "email": "netmarble.com;netmarble.com;netmarble.com;netmarble.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Netmarble", + "aff_unique_dep": "AI Center", + "aff_unique_url": "https://www.netmarble.com", + "aff_unique_abbr": "Netmarble", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26296", + "title": "Random Walk Conformer: Learning Graph Representation from Long and Short Range", + "track": "main", + "status": "Technical", + "abstract": "While graph neural networks (GNNs) have achieved notable success in various graph mining tasks, conventional GNNs only model the pairwise correlation in 1-hop neighbors without considering the long-term relations and the high-order patterns, thus limiting their performances. Recently, several works have addressed these issues by exploring the motif, i.e., frequent subgraphs. However, these methods usually require an unacceptable computational time to enumerate all possible combinations of motifs. In this paper, we introduce a new GNN framework, namely Random Walk Conformer (RWC), to exploit global correlations and local patterns based on the random walk, which is a promising method to discover the graph structure. Besides, we propose random walk encoding to help RWC capture topological information, which is proven more expressive than conventional spatial encoding. Extensive experiment results manifest that RWC achieves state-of-the-art performance on graph classification and regression tasks. The source code of RWC is available at https://github.com/b05901024/RandomWalkConformer.", + "primary_area": "machine learning iv", + "author": "Pei-Kai Yeh; Hsi-Wen Chen; Ming-Syan Chen", + "authorids": "", + "aff": "National Taiwan University; National Taiwan University; National Taiwan University", + "bibtex": "@article{Yeh_Chen_Chen_2023, title={Random Walk Conformer: Learning Graph Representation from Long and Short Range}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26296}, DOI={10.1609/aaai.v37i9.26296}, abstractNote={While graph neural networks (GNNs) have achieved notable success in various graph mining tasks, conventional GNNs only model the pairwise correlation in 1-hop neighbors without considering the long-term relations and the high-order patterns, thus limiting their performances. Recently, several works have addressed these issues by exploring the motif, i.e., frequent subgraphs. However, these methods usually require an unacceptable computational time to enumerate all possible combinations of motifs. In this paper, we introduce a new GNN framework, namely Random Walk Conformer (RWC), to exploit global correlations and local patterns based on the random walk, which is a promising method to discover the graph structure. Besides, we propose random walk encoding to help RWC capture topological information, which is proven more expressive than conventional spatial encoding. Extensive experiment results manifest that RWC achieves state-of-the-art performance on graph classification and regression tasks. The source code of RWC is available at https://github.com/b05901024/RandomWalkConformer.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yeh, Pei-Kai and Chen, Hsi-Wen and Chen, Ming-Syan}, year={2023}, month={Jun.}, pages={10936-10944} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26296/26068", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26296", + "pdf_size": 235215, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17844754772391375410&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;ntu.edu.tw", + "email": "arbor.ee.ntu.edu.tw;arbor.ee.ntu.edu.tw;ntu.edu.tw", + "github": "https://github.com/b05901024/RandomWalkConformer", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25685", + "title": "Rank Aggregation Using Scoring Rules", + "track": "main", + "status": "Technical", + "abstract": "To aggregate rankings into a social ranking, one can use scoring systems such as Plurality, Veto, and Borda. We distinguish three types of methods: ranking by score, ranking by repeatedly choosing a winner that we delete and rank at the top, and ranking by repeatedly choosing a loser that we delete and rank at the bottom. The latter method captures the frequently studied voting rules Single Transferable Vote (aka Instant Runoff Voting), Coombs, and Baldwin. In an experimental analysis, we show that the three types of methods produce different rankings in practice. We also provide evidence that sequentially selecting winners is most suitable to detect the \"true\" ranking of candidates. For different rules in our classes, we then study the (parameterized) computational complexity of deciding in which positions a given candidate can appear in the chosen ranking. As part of our analysis, we also consider the Winner Determination problem for STV, Coombs, and Baldwin and determine their complexity when there are few voters or candidates.", + "primary_area": "game theory and economic paradigms", + "author": "Niclas Boehmer; Robert Bredereck; Dominik Peters", + "authorids": "", + "aff": "Algorithmics and Complexity, Technische Universit\u00e4t Berlin; Institut f\u00fcr Informatik, TU Clausthal; CNRS, LAMSADE, Universit\u00e9 Paris Dauphine\u2013PSL", + "bibtex": "@article{Boehmer_Bredereck_Peters_2023, title={Rank Aggregation Using Scoring Rules}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25685}, DOI={10.1609/aaai.v37i5.25685}, abstractNote={To aggregate rankings into a social ranking, one can use scoring systems such as Plurality, Veto, and Borda. We distinguish three types of methods: ranking by score, ranking by repeatedly choosing a winner that we delete and rank at the top, and ranking by repeatedly choosing a loser that we delete and rank at the bottom. The latter method captures the frequently studied voting rules Single Transferable Vote (aka Instant Runoff Voting), Coombs, and Baldwin. In an experimental analysis, we show that the three types of methods produce different rankings in practice. We also provide evidence that sequentially selecting winners is most suitable to detect the "true" ranking of candidates. For different rules in our classes, we then study the (parameterized) computational complexity of deciding in which positions a given candidate can appear in the chosen ranking. As part of our analysis, we also consider the Winner Determination problem for STV, Coombs, and Baldwin and determine their complexity when there are few voters or candidates.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Boehmer, Niclas and Bredereck, Robert and Peters, Dominik}, year={2023}, month={Jun.}, pages={5515-5523} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25685/25457", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25685", + "pdf_size": 156238, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5125781305711019135&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 12, + "aff_domain": "tu-berlin.de;tu-clausthal.de;lamsade.dauphine.fr", + "email": "tu-berlin.de;tu-clausthal.de;lamsade.dauphine.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;Technische Universit\u00e4t Clausthal;CNRS", + "aff_unique_dep": "Algorithmics and Complexity;Institut f\u00fcr Informatik;LAMSADE", + "aff_unique_url": "https://www.tu-berlin.de;https://www.tu-clausthal.de;https://www.cnrs.fr", + "aff_unique_abbr": "TU Berlin;TU Clausthal;CNRS", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Berlin;", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Germany;France" + }, + { + "id": "article-25150", + "title": "RankDNN: Learning to Rank for Few-Shot Learning", + "track": "main", + "status": "Technical", + "abstract": "This paper introduces a new few-shot learning pipeline that\ncasts relevance ranking for image retrieval as binary ranking\nrelation classification. In comparison to image classification,\nranking relation classification is sample efficient and\ndomain agnostic. Besides, it provides a new perspective on\nfew-shot learning and is complementary to state-of-the-art\nmethods. The core component of our deep neural network is\na simple MLP, which takes as input an image triplet encoded\nas the difference between two vector-Kronecker products,\nand outputs a binary relevance ranking order. The proposed\nRankMLP can be built on top of any state-of-the-art feature\nextractors, and our entire deep neural network is called\nthe ranking deep neural network, or RankDNN. Meanwhile,\nRankDNN can be flexibly fused with other post-processing\nmethods. During the meta test, RankDNN ranks support images\naccording to their similarity with the query samples,\nand each query sample is assigned the class label of its\nnearest neighbor. Experiments demonstrate that RankDNN\ncan effectively improve the performance of its baselines\nbased on a variety of backbones and it outperforms previous\nstate-of-the-art algorithms on multiple few-shot learning\nbenchmarks, including miniImageNet, tieredImageNet,\nCaltech-UCSD Birds, and CIFAR-FS. Furthermore, experiments\non the cross-domain challenge demonstrate the superior\ntransferability of RankDNN.The code is available at:\nhttps://github.com/guoqianyu-alberta/RankDNN.", + "primary_area": "computer vision i", + "author": "Qianyu Guo; Gong Haotong; Xujun Wei; Yanwei Fu; Yizhou Yu; Wenqiang Zhang; Weifeng Ge", + "authorids": "", + "aff": "Nebula AI Group, School of Computer Science, Fudan University,Shanghai,China+Shanghai Key Laboratory of Intelligent Information Processing,Shanghai,China+Academy for Engineering & Technology, Fudan University,Shanghai,China; Nebula AI Group, School of Computer Science, Fudan University,Shanghai,China+Shanghai Key Laboratory of Intelligent Information Processing,Shanghai,China; Nebula AI Group, School of Computer Science, Fudan University,Shanghai,China+Academy for Engineering & Technology, Fudan University,Shanghai,China; Shanghai Key Laboratory of Intelligent Information Processing,Shanghai,China; Department of Computer Science, The University of Hong Kong,Hong Kong,China; Shanghai Key Laboratory of Intelligent Information Processing,Shanghai,China+Academy for Engineering & Technology, Fudan University,Shanghai,China; Nebula AI Group, School of Computer Science, Fudan University,Shanghai,China+Shanghai Key Laboratory of Intelligent Information Processing,Shanghai,China", + "bibtex": "@article{Guo_Haotong_Wei_Fu_Yu_Zhang_Ge_2023, title={RankDNN: Learning to Rank for Few-Shot Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25150}, DOI={10.1609/aaai.v37i1.25150}, abstractNote={This paper introduces a new few-shot learning pipeline that\ncasts relevance ranking for image retrieval as binary ranking\nrelation classification. In comparison to image classification,\nranking relation classification is sample efficient and\ndomain agnostic. Besides, it provides a new perspective on\nfew-shot learning and is complementary to state-of-the-art\nmethods. The core component of our deep neural network is\na simple MLP, which takes as input an image triplet encoded\nas the difference between two vector-Kronecker products,\nand outputs a binary relevance ranking order. The proposed\nRankMLP can be built on top of any state-of-the-art feature\nextractors, and our entire deep neural network is called\nthe ranking deep neural network, or RankDNN. Meanwhile,\nRankDNN can be flexibly fused with other post-processing\nmethods. During the meta test, RankDNN ranks support images\naccording to their similarity with the query samples,\nand each query sample is assigned the class label of its\nnearest neighbor. Experiments demonstrate that RankDNN\ncan effectively improve the performance of its baselines\nbased on a variety of backbones and it outperforms previous\nstate-of-the-art algorithms on multiple few-shot learning\nbenchmarks, including miniImageNet, tieredImageNet,\nCaltech-UCSD Birds, and CIFAR-FS. Furthermore, experiments\non the cross-domain challenge demonstrate the superior\ntransferability of RankDNN.The code is available at:\nhttps://github.com/guoqianyu-alberta/RankDNN.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Qianyu and Haotong, Gong and Wei, Xujun and Fu, Yanwei and Yu, Yizhou and Zhang, Wenqiang and Ge, Weifeng}, year={2023}, month={Jun.}, pages={728-736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25150/24922", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25150", + "pdf_size": 602774, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9137192493070830349&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "fudan.edu.cn; ; ; ; ; ; ", + "email": "fudan.edu.cn; ; ; ; ; ; ", + "github": "https://github.com/guoqianyu-alberta/RankDNN", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+0;0+1;0+0;1;2;1+0;0+1", + "aff_unique_norm": "Fudan University;Shanghai Key Laboratory of Intelligent Information Processing;The University of Hong Kong", + "aff_unique_dep": "School of Computer Science;;Department of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn;;https://www.hku.hk", + "aff_unique_abbr": "Fudan;;HKU", + "aff_campus_unique_index": "0+0;0;0+0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0+0;0+0;0+0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25698", + "title": "Rawlsian Fairness in Online Bipartite Matching: Two-Sided, Group, and Individual", + "track": "main", + "status": "Technical", + "abstract": "Online bipartite-matching platforms are ubiquitous and find applications in important areas such as crowdsourcing and ridesharing. In the most general form, the platform consists of three entities: two sides to be matched and a platform operator that decides the matching. The design of algorithms for such platforms has traditionally focused on the operator\u2019s (expected) profit. Since fairness has become an important consideration that was ignored in the existing algorithms a collection of online matching algorithms have been developed that give a fair treatment guarantee for one side of the market at the expense of a drop in the operator\u2019s profit. In this paper, we generalize the existing work to offer fair treatment guarantees to both sides of the market simultaneously, at a calculated worst case drop to operator profit. We consider group and individual Rawlsian fairness criteria. Moreover, our algorithms have theoretical guarantees and have adjustable parameters that can be tuned as desired to balance the trade-off between the utilities of the three sides. We also derive hardness results that give clear upper bounds over the performance of any algorithm.", + "primary_area": "game theory and economic paradigms", + "author": "Seyed Esmaeili; Sharmila Duppala; Davidson Cheng; Vedant Nanda; Aravind Srinivasan; John P. Dickerson", + "authorids": "", + "aff": "University of Maryland, College Park; University of Maryland, College Park; Colorado College; University of Maryland, College Park; University of Maryland, College Park; University of Maryland, College Park", + "bibtex": "@article{Esmaeili_Duppala_Cheng_Nanda_Srinivasan_Dickerson_2023, title={Rawlsian Fairness in Online Bipartite Matching: Two-Sided, Group, and Individual}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25698}, DOI={10.1609/aaai.v37i5.25698}, abstractNote={Online bipartite-matching platforms are ubiquitous and find applications in important areas such as crowdsourcing and ridesharing. In the most general form, the platform consists of three entities: two sides to be matched and a platform operator that decides the matching. The design of algorithms for such platforms has traditionally focused on the operator\u2019s (expected) profit. Since fairness has become an important consideration that was ignored in the existing algorithms a collection of online matching algorithms have been developed that give a fair treatment guarantee for one side of the market at the expense of a drop in the operator\u2019s profit. In this paper, we generalize the existing work to offer fair treatment guarantees to both sides of the market simultaneously, at a calculated worst case drop to operator profit. We consider group and individual Rawlsian fairness criteria. Moreover, our algorithms have theoretical guarantees and have adjustable parameters that can be tuned as desired to balance the trade-off between the utilities of the three sides. We also derive hardness results that give clear upper bounds over the performance of any algorithm.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Esmaeili, Seyed and Duppala, Sharmila and Cheng, Davidson and Nanda, Vedant and Srinivasan, Aravind and Dickerson, John P.}, year={2023}, month={Jun.}, pages={5624-5632} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25698/25470", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25698", + "pdf_size": 542287, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14472176658091481185&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "cs.umd.edu;cs.umd.edu;coloradocollege.edu;cs.umd.edu;cs.umd.edu;cs.umd.edu", + "email": "cs.umd.edu;cs.umd.edu;coloradocollege.edu;cs.umd.edu;cs.umd.edu;cs.umd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "University of Maryland;Colorado College", + "aff_unique_dep": ";", + "aff_unique_url": "https://www/umd.edu;https://www.coloradocollege.edu", + "aff_unique_abbr": "UMD;CC", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "College Park;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25210", + "title": "ReGANIE: Rectifying GAN Inversion Errors for Accurate Real Image Editing", + "track": "main", + "status": "Technical", + "abstract": "The StyleGAN family succeed in high-fidelity image generation and allow for flexible and plausible editing of generated images by manipulating the semantic-rich latent style space. However, projecting a real image into its latent space encounters an inherent trade-off between inversion quality and editability. Existing encoder-based or optimization-based StyleGAN inversion methods attempt to mitigate the trade-off but see limited performance. To fundamentally resolve this problem, we propose a novel two-phase framework by designating two separate networks to tackle editing and reconstruction respectively, instead of balancing the two. Specifically, in Phase I, a W-space-oriented StyleGAN inversion network is trained and used to perform image inversion and edit- ing, which assures the editability but sacrifices reconstruction quality. In Phase II, a carefully designed rectifying network is utilized to rectify the inversion errors and perform ideal reconstruction. Experimental results show that our approach yields near-perfect reconstructions without sacrificing the editability, thus allowing accurate manipulation of real images. Further, we evaluate the performance of our rectifying net- work, and see great generalizability towards unseen manipulation types and out-of-domain images.", + "primary_area": "computer vision i", + "author": "Bingchuan Li; Tianxiang Ma; Peng Zhang; Miao Hua; Wei Liu; Qian He; Zili Yi", + "authorids": "", + "aff": "ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China", + "bibtex": "@article{Li_Ma_Zhang_Hua_Liu_He_Yi_2023, title={ReGANIE: Rectifying GAN Inversion Errors for Accurate Real Image Editing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25210}, DOI={10.1609/aaai.v37i1.25210}, abstractNote={The StyleGAN family succeed in high-fidelity image generation and allow for flexible and plausible editing of generated images by manipulating the semantic-rich latent style space. However, projecting a real image into its latent space encounters an inherent trade-off between inversion quality and editability. Existing encoder-based or optimization-based StyleGAN inversion methods attempt to mitigate the trade-off but see limited performance. To fundamentally resolve this problem, we propose a novel two-phase framework by designating two separate networks to tackle editing and reconstruction respectively, instead of balancing the two. Specifically, in Phase I, a W-space-oriented StyleGAN inversion network is trained and used to perform image inversion and edit- ing, which assures the editability but sacrifices reconstruction quality. In Phase II, a carefully designed rectifying network is utilized to rectify the inversion errors and perform ideal reconstruction. Experimental results show that our approach yields near-perfect reconstructions without sacrificing the editability, thus allowing accurate manipulation of real images. Further, we evaluate the performance of our rectifying net- work, and see great generalizability towards unseen manipulation types and out-of-domain images.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Bingchuan and Ma, Tianxiang and Zhang, Peng and Hua, Miao and Liu, Wei and He, Qian and Yi, Zili}, year={2023}, month={Jun.}, pages={1269-1277} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25210/24982", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25210", + "pdf_size": 20139560, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1365151632831465073&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "email": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "ByteDance Ltd", + "aff_unique_dep": "", + "aff_unique_url": "https://www.bytedance.com", + "aff_unique_abbr": "ByteDance", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25842", + "title": "RePreM: Representation Pre-training with Masked Model for Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Inspired by the recent success of sequence modeling in RL and the use of masked language model for pre-training, we propose a masked model for pre-training in RL, RePreM (Representation Pre-training with Masked Model), which trains the encoder combined with transformer blocks to predict the masked states or actions in a trajectory. RePreM is simple but effective compared to existing representation pre-training methods in RL. It avoids algorithmic sophistication (such as data augmentation or estimating multiple models) with sequence modeling and generates a representation that captures long-term dynamics well. Empirically, we demonstrate the effectiveness of RePreM in various tasks, including dynamic prediction, transfer learning, and sample-efficient RL with both value-based and actor-critic methods. Moreover, we show that RePreM scales well with dataset size, dataset quality, and the scale of the encoder, which indicates its potential towards big RL models.", + "primary_area": "machine learning i", + "author": "Yuanying Cai; Chuheng Zhang; Wei Shen; Xuyun Zhang; Wenjie Ruan; Longbo Huang", + "authorids": "", + "aff": "IIIS, Tsinghua University, Beijing, China; Microsoft Research Asia, Beijing, China; Hulu, Beijing, China; Macquarie University, Sydney, Australia; Macquarie University, Sydney, Australia+University of Exeter, Exeter, UK; IIIS, Tsinghua University, Beijing, China", + "bibtex": "@article{Cai_Zhang_Shen_Zhang_Ruan_Huang_2023, title={RePreM: Representation Pre-training with Masked Model for Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25842}, DOI={10.1609/aaai.v37i6.25842}, abstractNote={Inspired by the recent success of sequence modeling in RL and the use of masked language model for pre-training, we propose a masked model for pre-training in RL, RePreM (Representation Pre-training with Masked Model), which trains the encoder combined with transformer blocks to predict the masked states or actions in a trajectory. RePreM is simple but effective compared to existing representation pre-training methods in RL. It avoids algorithmic sophistication (such as data augmentation or estimating multiple models) with sequence modeling and generates a representation that captures long-term dynamics well. Empirically, we demonstrate the effectiveness of RePreM in various tasks, including dynamic prediction, transfer learning, and sample-efficient RL with both value-based and actor-critic methods. Moreover, we show that RePreM scales well with dataset size, dataset quality, and the scale of the encoder, which indicates its potential towards big RL models.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Yuanying and Zhang, Chuheng and Shen, Wei and Zhang, Xuyun and Ruan, Wenjie and Huang, Longbo}, year={2023}, month={Jun.}, pages={6879-6887} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25842/25614", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25842", + "pdf_size": 812418, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12259389287421701734&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mails.tsinghua.edu.cn;microsoft.com; ; ; ;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;microsoft.com; ; ; ;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;3+4;0", + "aff_unique_norm": "Tsinghua University;Microsoft Research Asia;Hulu;Macquarie University;University of Exeter", + "aff_unique_dep": "IIIS;Research;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.hulu.com;https://www.mq.edu.au;https://www.exeter.ac.uk", + "aff_unique_abbr": "THU;MSRA;Hulu;MQ;Exeter", + "aff_campus_unique_index": "0;0;0;1;1+2;0", + "aff_campus_unique": "Beijing;Sydney;Exeter", + "aff_country_unique_index": "0;0;0;1;1+2;0", + "aff_country_unique": "China;Australia;United Kingdom" + }, + { + "id": "article-26783", + "title": "Reachability Analysis of Neural Network Control Systems", + "track": "aaai special track", + "status": "Technical", + "abstract": "Neural network controllers (NNCs) have shown great promise in autonomous and cyber-physical systems. Despite the various verification approaches for neural networks, the safety analysis of NNCs remains an open problem. Existing verification approaches for neural network control systems (NNCSs) either can only work on a limited type of activation functions, or result in non-trivial over-approximation errors with time evolving. This paper proposes a verification framework for NNCS based on Lipschitzian optimisation, called DeepNNC. We first prove the Lipschitz continuity of closed-loop NNCSs by unrolling and eliminating the loops. We then reveal the working principles of applying Lipschitzian optimisation on NNCS verification and illustrate it by verifying an adaptive cruise control model. Compared to state-of-the-art verification approaches, DeepNNC shows superior performance in terms of efficiency and accuracy over a wide range of NNCs. We also provide a case study to demonstrate the capability of DeepNNC to handle a real-world, practical, and complex system. Our tool DeepNNC is available at https://github.com/TrustAI/DeepNNC.", + "primary_area": "safe and robust ai", + "author": "Chi Zhang; Wenjie Ruan; Peipei Xu", + "authorids": "", + "aff": "Department of Computer Science, University of Exeter, Exeter, EX4 4QF, UK; Department of Computer Science, University of Exeter, Exeter, EX4 4QF, UK; Department of Computer Science, University of Liverpool, Liverpool, L69 3BX, UK", + "bibtex": "@article{Zhang_Ruan_Xu_2023, title={Reachability Analysis of Neural Network Control Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26783}, DOI={10.1609/aaai.v37i12.26783}, abstractNote={Neural network controllers (NNCs) have shown great promise in autonomous and cyber-physical systems. Despite the various verification approaches for neural networks, the safety analysis of NNCs remains an open problem. Existing verification approaches for neural network control systems (NNCSs) either can only work on a limited type of activation functions, or result in non-trivial over-approximation errors with time evolving. This paper proposes a verification framework for NNCS based on Lipschitzian optimisation, called DeepNNC. We first prove the Lipschitz continuity of closed-loop NNCSs by unrolling and eliminating the loops. We then reveal the working principles of applying Lipschitzian optimisation on NNCS verification and illustrate it by verifying an adaptive cruise control model. Compared to state-of-the-art verification approaches, DeepNNC shows superior performance in terms of efficiency and accuracy over a wide range of NNCs. We also provide a case study to demonstrate the capability of DeepNNC to handle a real-world, practical, and complex system. Our tool DeepNNC is available at https://github.com/TrustAI/DeepNNC.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chi and Ruan, Wenjie and Xu, Peipei}, year={2023}, month={Jun.}, pages={15287-15295} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26783/26555", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26783", + "pdf_size": 2874475, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13147871883697847149&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "exeter.ac.uk;exeter.ac.uk;liverpool.ac.uk", + "email": "exeter.ac.uk;exeter.ac.uk;liverpool.ac.uk", + "github": "https://github.com/TrustAI/DeepNNC", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of Exeter;University of Liverpool", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.exeter.ac.uk;https://www.liverpool.ac.uk", + "aff_unique_abbr": "Exeter;Liv Uni", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "Exeter;Liverpool", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25779", + "title": "Reachability Games Modulo Theories with a Bounded Safety Player", + "track": "main", + "status": "Technical", + "abstract": "Solving reachability games is a fundamental problem for the analysis, verification, and synthesis of reactive systems.\nWe consider logical reachability games modulo theories (in short, GMTs), i.e.,\ninfinite-state games whose rules are defined by logical formulas over a multi-sorted first-order theory. \nOur games have an asymmetric constraint: the safety player has at most k possible moves from each game configuration, whereas the reachability player has no such limitation.\nEven though determining the winner of such a GMT is undecidable, it can be reduced to the well-studied problem of checking the satisfiability of a system of constrained Horn clauses (CHCs), for which many off-the-shelf solvers have been developed.\nWinning strategies for GMTs can also be computed by resorting to suitable CHC queries. \nWe demonstrate that GMTs can model various relevant real-world games, and that our approach can effectively solve several problems from different domains, using Z3 as the backend CHC solver.", + "primary_area": "knowledge representation and reasoning", + "author": "Marco Faella; Gennaro Parlato", + "authorids": "", + "aff": "University of Naples Federico II, Naples, Italy; University of Molise, Pesche, Italy", + "bibtex": "@article{Faella_Parlato_2023, title={Reachability Games Modulo Theories with a Bounded Safety Player}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25779}, DOI={10.1609/aaai.v37i5.25779}, abstractNote={Solving reachability games is a fundamental problem for the analysis, verification, and synthesis of reactive systems.\nWe consider logical reachability games modulo theories (in short, GMTs), i.e.,\ninfinite-state games whose rules are defined by logical formulas over a multi-sorted first-order theory. Our games have an asymmetric constraint: the safety player has at most k possible moves from each game configuration, whereas the reachability player has no such limitation.\nEven though determining the winner of such a GMT is undecidable, it can be reduced to the well-studied problem of checking the satisfiability of a system of constrained Horn clauses (CHCs), for which many off-the-shelf solvers have been developed.\nWinning strategies for GMTs can also be computed by resorting to suitable CHC queries. We demonstrate that GMTs can model various relevant real-world games, and that our approach can effectively solve several problems from different domains, using Z3 as the backend CHC solver.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Faella, Marco and Parlato, Gennaro}, year={2023}, month={Jun.}, pages={6330-6337} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25779/25551", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25779", + "pdf_size": 157609, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15579502781139772890&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "unina.it;unimol.it", + "email": "unina.it;unimol.it", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Naples Federico II;University of Molise", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unina.it;https://www.unimol.it", + "aff_unique_abbr": "UNINA;", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Naples;Pesche", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25767", + "title": "Reactive Synthesis of Dominant Strategies", + "track": "main", + "status": "Technical", + "abstract": "We study the synthesis under environment specifications problem for LTL/LTLf which, in particular, generalizes FOND (strong) planning with these temporal goals. We consider the case where the agent cannot enforce its goal --- for which the argument for using best-effort strategies has been made --- and study the intermediate ground, between enforcing and best-effort strategies, of dominant strategies. Intuitively, such strategies achieve the goal against any environment for which it is achievable. \n\nWe show that dominant strategies may exist when enforcing ones do not, while still sharing with the latter many desirable properties such as being interchangeable with each other, and being monotone with respect to tightening of environment specifications. We give necessary and sufficient conditions for the existence of dominant strategies, and show that deciding if they exist is 2EXPTIME-complete --- the same as for enforcing strategies. Finally, we give a uniform, optimal, game-theoretic algorithm for simultaneously solving the three synthesis problems of enforcing, dominant, and best-effort strategies.", + "primary_area": "knowledge representation and reasoning", + "author": "Benjamin Aminof; Giuseppe De Giacomo; Sasha Rubin", + "authorids": "", + "aff": "TU Wien+University of Sydney; University of Oxford+ Universit\u00e0 degli Studi di Roma \u201cLa Sapienza\u201d; University of Sydney", + "bibtex": "@article{Aminof_De Giacomo_Rubin_2023, title={Reactive Synthesis of Dominant Strategies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25767}, DOI={10.1609/aaai.v37i5.25767}, abstractNote={We study the synthesis under environment specifications problem for LTL/LTLf which, in particular, generalizes FOND (strong) planning with these temporal goals. We consider the case where the agent cannot enforce its goal --- for which the argument for using best-effort strategies has been made --- and study the intermediate ground, between enforcing and best-effort strategies, of dominant strategies. Intuitively, such strategies achieve the goal against any environment for which it is achievable. We show that dominant strategies may exist when enforcing ones do not, while still sharing with the latter many desirable properties such as being interchangeable with each other, and being monotone with respect to tightening of environment specifications. We give necessary and sufficient conditions for the existence of dominant strategies, and show that deciding if they exist is 2EXPTIME-complete --- the same as for enforcing strategies. Finally, we give a uniform, optimal, game-theoretic algorithm for simultaneously solving the three synthesis problems of enforcing, dominant, and best-effort strategies.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aminof, Benjamin and De Giacomo, Giuseppe and Rubin, Sasha}, year={2023}, month={Jun.}, pages={6228-6235} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25767/25539", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25767", + "pdf_size": 149411, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10998780947572145291&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "forsyte.at;cs.ox.ac.uk;sydney.edu.au", + "email": "forsyte.at;cs.ox.ac.uk;sydney.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2+3;1", + "aff_unique_norm": "Technische Universit\u00e4t Wien;University of Sydney;University of Oxford;Universit\u00e0 degli Studi di Roma La Sapienza", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tuwien.ac.at;https://www.sydney.edu.au;https://www.ox.ac.uk;https://www.uniroma1.it", + "aff_unique_abbr": "TU Wien;USYD;Oxford;La Sapienza", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;2+3;1", + "aff_country_unique": "Austria;Australia;United Kingdom;Italy" + }, + { + "id": "article-26501", + "title": "Real or Fake Text?: Investigating Human Ability to Detect Boundaries between Human-Written and Machine-Generated Text", + "track": "main", + "status": "Technical", + "abstract": "As text generated by large language models proliferates, it becomes vital to understand how humans engage with such text, and whether or not they are able to detect when the text they are reading did not originate with a human writer. Prior work on human detection of generated text focuses on the case where an entire passage is either human-written or machine-generated. In this paper, we study a more realistic setting where text begins as human-written and transitions to being generated by state-of-the-art neural language models. We show that, while annotators often struggle at this task, there is substantial variance in annotator skill and that given proper incentives, annotators can improve at this task over time. Furthermore, we conduct a detailed comparison study and analyze how a variety of variables (model size, decoding strategy, fine-tuning, prompt genre, etc.) affect human detection performance. Finally, we collect error annotations from our participants and use them to show that certain textual genres influence models to make different types of errors and that certain sentence-level features correlate highly with annotator selection. We release the RoFT dataset: a collection of over 21,000 human annotations paired with error classifications to encourage future work in human detection and evaluation of generated text.", + "primary_area": "speech natural language processing", + "author": "Liam Dugan; Daphne Ippolito; Arun Kirubarajan; Sherry Shi; Chris Callison-Burch", + "authorids": "", + "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", + "bibtex": "@article{Dugan_Ippolito_Kirubarajan_Shi_Callison-Burch_2023, title={Real or Fake Text?: Investigating Human Ability to Detect Boundaries between Human-Written and Machine-Generated Text}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26501}, DOI={10.1609/aaai.v37i11.26501}, abstractNote={As text generated by large language models proliferates, it becomes vital to understand how humans engage with such text, and whether or not they are able to detect when the text they are reading did not originate with a human writer. Prior work on human detection of generated text focuses on the case where an entire passage is either human-written or machine-generated. In this paper, we study a more realistic setting where text begins as human-written and transitions to being generated by state-of-the-art neural language models. We show that, while annotators often struggle at this task, there is substantial variance in annotator skill and that given proper incentives, annotators can improve at this task over time. Furthermore, we conduct a detailed comparison study and analyze how a variety of variables (model size, decoding strategy, fine-tuning, prompt genre, etc.) affect human detection performance. Finally, we collect error annotations from our participants and use them to show that certain textual genres influence models to make different types of errors and that certain sentence-level features correlate highly with annotator selection. We release the RoFT dataset: a collection of over 21,000 human annotations paired with error classifications to encourage future work in human detection and evaluation of generated text.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dugan, Liam and Ippolito, Daphne and Kirubarajan, Arun and Shi, Sherry and Callison-Burch, Chris}, year={2023}, month={Jun.}, pages={12763-12771} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26501/26273", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26501", + "pdf_size": 295458, + "gs_citation": 76, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=392920040933851495&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 13, + "aff_domain": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "email": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26844", + "title": "Real-Time Detection of Robotic Traffic in Online Advertising", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Detecting robotic traffic at scale on online ads needs an approach that is scalable, comprehensive, precise, and can rapidly respond to changing traffic patterns. In this paper we describe SLIDR or SLIce-Level Detection of Robots, a real-time deep neural network model trained with weak supervision to identify invalid clicks on online ads. We ensure fairness across different traffic slices by formulating a convex optimization problem that allows SLIDR to achieve optimal performance on individual traffic slices with a budget on overall false positives. SLIDR has been deployed since 2021 and safeguards advertiser campaigns on Amazon against robots clicking on ads on the e-commerce site. We describe some of the important lessons learned by deploying SLIDR that include guardrails that prevent updates of anomalous models and disaster recovery mechanisms to mitigate or correct decisions made by a faulty model.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Anand Muralidhar; Sharad Chitlangia; Rajat Agarwal; Muneeb Ahmed", + "authorids": "", + "aff": "Amazon; Amazon; Amazon; Amazon", + "bibtex": "@article{Muralidhar_Chitlangia_Agarwal_Ahmed_2024, title={Real-Time Detection of Robotic Traffic in Online Advertising}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26844}, DOI={10.1609/aaai.v37i13.26844}, abstractNote={Detecting robotic traffic at scale on online ads needs an approach that is scalable, comprehensive, precise, and can rapidly respond to changing traffic patterns. In this paper we describe SLIDR or SLIce-Level Detection of Robots, a real-time deep neural network model trained with weak supervision to identify invalid clicks on online ads. We ensure fairness across different traffic slices by formulating a convex optimization problem that allows SLIDR to achieve optimal performance on individual traffic slices with a budget on overall false positives. SLIDR has been deployed since 2021 and safeguards advertiser campaigns on Amazon against robots clicking on ads on the e-commerce site. We describe some of the important lessons learned by deploying SLIDR that include guardrails that prevent updates of anomalous models and disaster recovery mechanisms to mitigate or correct decisions made by a faulty model.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Muralidhar, Anand and Chitlangia, Sharad and Agarwal, Rajat and Ahmed, Muneeb}, year={2024}, month={Jul.}, pages={15551-15559} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26844/26616", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26844", + "pdf_size": 386608, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13668893480560821082&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25215", + "title": "Real-World Deep Local Motion Deblurring", + "track": "main", + "status": "Technical", + "abstract": "Most existing deblurring methods focus on removing global blur caused by camera shake, while they cannot well handle local blur caused by object movements. To fill the vacancy of local deblurring in real scenes, we establish the first real local motion blur dataset (ReLoBlur), which is captured by a synchronized beam-splitting photographing system and corrected by a post-progressing pipeline. Based on ReLoBlur, we propose a Local Blur-Aware Gated network (LBAG) and several local blur-aware techniques to bridge the gap between global and local deblurring: 1) a blur detection approach based on background subtraction to localize blurred regions; 2) a gate mechanism to guide our network to focus on blurred regions; and 3) a blur-aware patch cropping strategy to address data imbalance problem. Extensive experiments prove the reliability of ReLoBlur dataset, and demonstrate that LBAG achieves better performance than state-of-the-art global deblurring methods and our proposed local blur-aware techniques are effective.", + "primary_area": "computer vision i", + "author": "Haoying Li; Ziran Zhang; Tingting Jiang; Peng Luo; Huajun Feng; Zhihai Xu", + "authorids": "", + "aff": "College of Optical Science and Engineering, Zhejiang University+Research Center for Intelligent Sensing Systems, Zhejiang Laboratory; College of Optical Science and Engineering, Zhejiang University; Research Center for Intelligent Sensing Systems, Zhejiang Laboratory; College of Optical Science and Engineering, Zhejiang University; College of Optical Science and Engineering, Zhejiang University; College of Optical Science and Engineering, Zhejiang University", + "bibtex": "@article{Li_Zhang_Jiang_Luo_Feng_Xu_2023, title={Real-World Deep Local Motion Deblurring}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25215}, DOI={10.1609/aaai.v37i1.25215}, abstractNote={Most existing deblurring methods focus on removing global blur caused by camera shake, while they cannot well handle local blur caused by object movements. To fill the vacancy of local deblurring in real scenes, we establish the first real local motion blur dataset (ReLoBlur), which is captured by a synchronized beam-splitting photographing system and corrected by a post-progressing pipeline. Based on ReLoBlur, we propose a Local Blur-Aware Gated network (LBAG) and several local blur-aware techniques to bridge the gap between global and local deblurring: 1) a blur detection approach based on background subtraction to localize blurred regions; 2) a gate mechanism to guide our network to focus on blurred regions; and 3) a blur-aware patch cropping strategy to address data imbalance problem. Extensive experiments prove the reliability of ReLoBlur dataset, and demonstrate that LBAG achieves better performance than state-of-the-art global deblurring methods and our proposed local blur-aware techniques are effective.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Haoying and Zhang, Ziran and Jiang, Tingting and Luo, Peng and Feng, Huajun and Xu, Zhihai}, year={2023}, month={Jun.}, pages={1314-1322} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25215/24987", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25215", + "pdf_size": 17002711, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13068983903504453378&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zhejianglab.com;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zhejianglab.com;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;1;0;0;0", + "aff_unique_norm": "Zhejiang University;Zhejiang Laboratory", + "aff_unique_dep": "College of Optical Science and Engineering;Research Center for Intelligent Sensing Systems", + "aff_unique_url": "http://www.zju.edu.cn;http://www.zjlab.cn", + "aff_unique_abbr": "ZJU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26810", + "title": "Recent Developments in Data-Driven Algorithms for Discrete Optimization", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "The last few years have witnessed a renewed interest in \u201cdata-driven algorithm design\u201d (Balcan 2020), the use of Machine Learning (ML) to tailor an algorithm to a distribution of instances. More than a decade ago, advances in algorithm configuration (Hoos 2011) paved the way for the use of historical data to modify an algorithm\u2019s (typically fixed, static) parameters. In discrete optimization (e.g., satisfiability, integer programming, etc.), exact and inexact algorithms for NP-Hard problems often involve heuristic search decisions (Lodi 2013), abstracted as parameters, that can demonstrably benefit from tuning on historical instances from the application of interest.\n\nWhile useful, algorithm configuration may be insufficient: setting the parameters of an algorithm upfront of solving the input instance is still a static, high-level decision. In contrast, we have been exploring a suite of ML and Reinforcement Learning (RL) approaches that tune iterative optimization algorithms, such as branch-and-bound for integer programming or construction heuristics, at the iteration-level (Khalil et al. 2016, 2017; Dai et al. 2017; Chmiela et al. 2021; Gupta et al. 2022; Chi et al. 2022; Khalil, Vaezipoor, and Dilkina 2022; Khalil, Morris, and Lodi 2022; Alomrani, Moravej, and Khalil 2022; Cappart et al. 2021; Gupta et al. 2020).\n\nWe will survey our most recent work in this area:\n1. New methods for learning in MILP branch-and-bound (Gupta et al. 2020, 2022; Chmiela et al. 2021; Khalil, Vaezipoor, and Dilkina 2022; Khalil, Morris, and Lodi 2022);\n\n2. RL for online combinatorial optimization and largescale linear programming (Alomrani, Moravej, and Khalil 2022; Chi et al. 2022);\n\n3. Neural network approximations for stochastic programming (Dumouchelle et al. 2022).", + "primary_area": "", + "author": "Elias B. Khalil", + "authorids": "", + "aff": "Department of Mechanical and Industrial Engineering, University of Toronto, SCALE AI Research Chair in Data-Driven Algorithms for Modern Supply Chains", + "bibtex": "@article{Khalil_2024, title={Recent Developments in Data-Driven Algorithms for Discrete Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26810}, DOI={10.1609/aaai.v37i13.26810}, abstractNote={The last few years have witnessed a renewed interest in \u201cdata-driven algorithm design\u201d (Balcan 2020), the use of Machine Learning (ML) to tailor an algorithm to a distribution of instances. More than a decade ago, advances in algorithm configuration (Hoos 2011) paved the way for the use of historical data to modify an algorithm\u2019s (typically fixed, static) parameters. In discrete optimization (e.g., satisfiability, integer programming, etc.), exact and inexact algorithms for NP-Hard problems often involve heuristic search decisions (Lodi 2013), abstracted as parameters, that can demonstrably benefit from tuning on historical instances from the application of interest. While useful, algorithm configuration may be insufficient: setting the parameters of an algorithm upfront of solving the input instance is still a static, high-level decision. In contrast, we have been exploring a suite of ML and Reinforcement Learning (RL) approaches that tune iterative optimization algorithms, such as branch-and-bound for integer programming or construction heuristics, at the iteration-level (Khalil et al. 2016, 2017; Dai et al. 2017; Chmiela et al. 2021; Gupta et al. 2022; Chi et al. 2022; Khalil, Vaezipoor, and Dilkina 2022; Khalil, Morris, and Lodi 2022; Alomrani, Moravej, and Khalil 2022; Cappart et al. 2021; Gupta et al. 2020). We will survey our most recent work in this area:\n1. New methods for learning in MILP branch-and-bound (Gupta et al. 2020, 2022; Chmiela et al. 2021; Khalil, Vaezipoor, and Dilkina 2022; Khalil, Morris, and Lodi 2022); 2. RL for online combinatorial optimization and largescale linear programming (Alomrani, Moravej, and Khalil 2022; Chi et al. 2022); 3. Neural network approximations for stochastic programming (Dumouchelle et al. 2022).}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Khalil, Elias B.}, year={2024}, month={Jul.}, pages={15443-15443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26810/26582", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26810", + "pdf_size": 47700, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:r7-9tkVY6AMJ:scholar.google.com/&scioq=Recent+Developments+in+Data-Driven+Algorithms+for+Discrete+Optimization&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "mie.utoronto.ca", + "email": "mie.utoronto.ca", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Toronto", + "aff_unique_dep": "Department of Mechanical and Industrial Engineering", + "aff_unique_url": "https://www.utoronto.ca", + "aff_unique_abbr": "U of T", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Toronto", + "aff_country_unique_index": "0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26947", + "title": "Reconsidering Deception in Social Robotics: The Role of Human Vulnerability (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The literature on deception in human-robot interaction (henceforth HRI) could be divided between: (i) those who consider it essential to maximise users' end utility and robotic performance; (ii) those who consider it unethical, because it is potentially dangerous for individuals' psychological integrity. \nHowever, it has now been proven that humans are naturally prone to anthropomorphism and emotional attachment to inanimate objects. \nConsequently, despite ethical concerns, the argument for the total elimination of deception could reveal to be a pointless exercise.\nRather, it is suggested here to conceive deception in HRI as a dynamic to be modulated and graded, in order to both promote innovation and protect fundamental human rights. To this end, the concept of vulnerability could serve as an objective balancing criterion.", + "primary_area": "", + "author": "Rachele Carli; Amro Najjar", + "authorids": "", + "aff": "Alma AI, University of Bologna + ICR, University of Luxembourg; LIST Institute, University of Luxembourg", + "bibtex": "@article{Carli_Najjar_2024, title={Reconsidering Deception in Social Robotics: The Role of Human Vulnerability (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26947}, DOI={10.1609/aaai.v37i13.26947}, abstractNote={The literature on deception in human-robot interaction (henceforth HRI) could be divided between: (i) those who consider it essential to maximise users\u2019 end utility and robotic performance; (ii) those who consider it unethical, because it is potentially dangerous for individuals\u2019 psychological integrity. However, it has now been proven that humans are naturally prone to anthropomorphism and emotional attachment to inanimate objects. Consequently, despite ethical concerns, the argument for the total elimination of deception could reveal to be a pointless exercise.\nRather, it is suggested here to conceive deception in HRI as a dynamic to be modulated and graded, in order to both promote innovation and protect fundamental human rights. To this end, the concept of vulnerability could serve as an objective balancing criterion.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carli, Rachele and Najjar, Amro}, year={2024}, month={Jul.}, pages={16174-16175} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26947/26719", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26947", + "pdf_size": 57282, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11461122348870231410&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff_domain": "unibo.it;list.lu", + "email": "unibo.it;list.lu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "University of Bologna;University of Luxembourg", + "aff_unique_dep": "Alma AI;ICR", + "aff_unique_url": "https://www.unibo.it;https://wwwen.unil.lu", + "aff_unique_abbr": "Unibo;UniLu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "Italy;Luxembourg" + }, + { + "id": "article-26372", + "title": "Reconstructing an Epidemic Outbreak Using Steiner Connectivity", + "track": "main", + "status": "Technical", + "abstract": "Only a subset of infections is actually observed in an outbreak, due to multiple reasons such as asymptomatic cases and under-reporting. Therefore, reconstructing an epidemic cascade given some observed cases is an important step in responding to such an outbreak. A maximum likelihood solution to this problem ( referred to as CascadeMLE ) can be shown to be a variation of the classical Steiner subgraph problem, which connects a subset of observed infections. In contrast to prior works on epidemic reconstruction, which consider the standard Steiner tree objective, we show that a solution to CascadeMLE, based on the actual MLE objective, has a very different structure. We design a logarithmic approximation algorithm for CascadeMLE, and evaluate it on multiple synthetic and social contact networks, including a contact network constructed for a hospital. Our algorithm has significantly better performance compared to a prior baseline.", + "primary_area": "multiagent systems", + "author": "Ritwick Mishra; Jack Heavey; Gursharn Kaur; Abhijin Adiga; Anil Vullikanti", + "authorids": "", + "aff": "Biocomplexity Institute & Initiative, University of Virginia+Department of Computer Science, University of Virginia; Biocomplexity Institute & Initiative, University of Virginia+Department of Computer Science, University of Virginia; Biocomplexity Institute & Initiative, University of Virginia; Biocomplexity Institute & Initiative, University of Virginia; Biocomplexity Institute & Initiative, University of Virginia+Department of Computer Science, University of Virginia", + "bibtex": "@article{Mishra_Heavey_Kaur_Adiga_Vullikanti_2023, title={Reconstructing an Epidemic Outbreak Using Steiner Connectivity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26372}, DOI={10.1609/aaai.v37i10.26372}, abstractNote={Only a subset of infections is actually observed in an outbreak, due to multiple reasons such as asymptomatic cases and under-reporting. Therefore, reconstructing an epidemic cascade given some observed cases is an important step in responding to such an outbreak. A maximum likelihood solution to this problem ( referred to as CascadeMLE ) can be shown to be a variation of the classical Steiner subgraph problem, which connects a subset of observed infections. In contrast to prior works on epidemic reconstruction, which consider the standard Steiner tree objective, we show that a solution to CascadeMLE, based on the actual MLE objective, has a very different structure. We design a logarithmic approximation algorithm for CascadeMLE, and evaluate it on multiple synthetic and social contact networks, including a contact network constructed for a hospital. Our algorithm has significantly better performance compared to a prior baseline.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mishra, Ritwick and Heavey, Jack and Kaur, Gursharn and Adiga, Abhijin and Vullikanti, Anil}, year={2023}, month={Jun.}, pages={11613-11620} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26372/26144", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26372", + "pdf_size": 296752, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2062414815869223305&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu;virginia.edu;virginia.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0;0;0+0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "Biocomplexity Institute & Initiative", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26085", + "title": "Recovering the Graph Underlying Networked Dynamical Systems under Partial Observability: A Deep Learning Approach", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of graph structure identification, i.e., of recovering the graph of dependencies among time series. We model these time series data as components of the state of linear stochastic networked dynamical systems. We assume partial observability, where the state evolution of only a subset of nodes comprising the network is observed. We propose a new feature-based paradigm: to each pair of nodes, we compute a feature vector from the observed time series. We prove that these features are linearly separable, i.e., there exists a hyperplane that separates the cluster of features associated with connected pairs of nodes from those of disconnected pairs. This renders the features amenable to train a variety of classifiers to perform causal inference. In particular, we use these features to train Convolutional Neural Networks (CNNs). The resulting causal inference mechanism outperforms state-of-the-art counterparts w.r.t. sample-complexity. The trained CNNs generalize well over structurally distinct networks (dense or sparse) and noise-level profiles. Remarkably, they also generalize well to real-world networks while trained over a synthetic network -- namely, a particular realization of a random graph.", + "primary_area": "machine learning ii", + "author": "S\u00e9rgio Machado; Anirudh Sridhar; Paulo Gil; Jorge Henriques; Jos\u00e9 M. F. Moura; Augusto Santos", + "authorids": "", + "aff": "Instituto de Telecomunicac\u00b8 \u02dcoes, Portugal + University of Coimbra, Portugal + Department of Electrical and Computer Engineering at Carnegie Mellon University, Pittsburgh, PA, USA; Department of Electrical and Computer Engineering at Princeton University, New Jersey, NJ, USA; University of Coimbra, Portugal + Universidade Nova de Lisboa, Portugal; University of Coimbra, Portugal; Department of Electrical and Computer Engineering at Carnegie Mellon University, Pittsburgh, PA, USA; Instituto de Telecomunicac\u00b8 \u02dcoes, Portugal + University of Coimbra, Portugal", + "bibtex": "@article{Machado_Sridhar_Gil_Henriques_Moura_Santos_2023, title={Recovering the Graph Underlying Networked Dynamical Systems under Partial Observability: A Deep Learning Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26085}, DOI={10.1609/aaai.v37i7.26085}, abstractNote={We study the problem of graph structure identification, i.e., of recovering the graph of dependencies among time series. We model these time series data as components of the state of linear stochastic networked dynamical systems. We assume partial observability, where the state evolution of only a subset of nodes comprising the network is observed. We propose a new feature-based paradigm: to each pair of nodes, we compute a feature vector from the observed time series. We prove that these features are linearly separable, i.e., there exists a hyperplane that separates the cluster of features associated with connected pairs of nodes from those of disconnected pairs. This renders the features amenable to train a variety of classifiers to perform causal inference. In particular, we use these features to train Convolutional Neural Networks (CNNs). The resulting causal inference mechanism outperforms state-of-the-art counterparts w.r.t. sample-complexity. The trained CNNs generalize well over structurally distinct networks (dense or sparse) and noise-level profiles. Remarkably, they also generalize well to real-world networks while trained over a synthetic network -- namely, a particular realization of a random graph.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Machado, S\u00e9rgio and Sridhar, Anirudh and Gil, Paulo and Henriques, Jorge and Moura, Jos\u00e9 M. F. and Santos, Augusto}, year={2023}, month={Jun.}, pages={9038-9046} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26085/25857", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26085", + "pdf_size": 709655, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16857354481922622134&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "student.dei.uc.pt;princeton.edu; ;dei.uc.pt;andrew.cmu.edu;gmail.com", + "email": "student.dei.uc.pt;princeton.edu; ;dei.uc.pt;andrew.cmu.edu;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;3;1+4;1;2;0+1", + "aff_unique_norm": "Instituto de Telecomunica\u00e7\u00f5es;University of Coimbra;Carnegie Mellon University;Princeton University;Universidade Nova de Lisboa", + "aff_unique_dep": ";;Department of Electrical and Computer Engineering;Department of Electrical and Computer Engineering;", + "aff_unique_url": "https://www.it.pt;https://www.uc.pt;https://www.cmu.edu;https://www.princeton.edu;https://www.unl.pt", + "aff_unique_abbr": ";UC;CMU;Princeton;UNL", + "aff_campus_unique_index": "1;2;;1;", + "aff_campus_unique": ";Pittsburgh;New Jersey", + "aff_country_unique_index": "0+0+1;1;0+0;0;1;0+0", + "aff_country_unique": "Portugal;United States" + }, + { + "id": "article-25440", + "title": "Recurrent Structure Attention Guidance for Depth Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Image guidance is an effective strategy for depth super-resolution. Generally, most existing methods employ hand-crafted operators to decompose the high-frequency (HF) and low-frequency (LF) ingredients from low-resolution depth maps and guide the HF ingredients by directly concatenating them with image features. However, the hand-designed operators usually cause inferior HF maps (e.g., distorted or structurally missing) due to the diverse appearance of complex depth maps. Moreover, the direct concatenation often results in weak guidance because not all image features have a positive effect on the HF maps. In this paper, we develop a recurrent structure attention guided (RSAG) framework, consisting of two important parts. First, we introduce a deep contrastive network with multi-scale filters for adaptive frequency-domain separation, which adopts contrastive networks from large filters to small ones to calculate the pixel contrasts for adaptive high-quality HF predictions. Second, instead of the coarse concatenation guidance, we propose a recurrent structure attention block, which iteratively utilizes the latest depth estimation and the image features to jointly select clear patterns and boundaries, aiming at providing refined guidance for accurate depth recovery. In addition, we fuse the features of HF maps to enhance the edge structures in the decomposed LF maps. Extensive experiments show that our approach obtains superior performance compared with state-of-the-art depth super-resolution methods. Our code is available at: https://github.com/Yuanjiayii/DSR-RSAG.", + "primary_area": "computer vision iii", + "author": "Jiayi Yuan; Haobo Jiang; Xiang Li; Jianjun Qian; Jun Li; Jian Yang", + "authorids": "", + "aff": "PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education; Jiangsu Key Lab of Image and Video Understanding for Social Security; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education; Jiangsu Key Lab of Image and Video Understanding for Social Security; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Yuan_Jiang_Li_Qian_Li_Yang_2023, title={Recurrent Structure Attention Guidance for Depth Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25440}, DOI={10.1609/aaai.v37i3.25440}, abstractNote={Image guidance is an effective strategy for depth super-resolution. Generally, most existing methods employ hand-crafted operators to decompose the high-frequency (HF) and low-frequency (LF) ingredients from low-resolution depth maps and guide the HF ingredients by directly concatenating them with image features. However, the hand-designed operators usually cause inferior HF maps (e.g., distorted or structurally missing) due to the diverse appearance of complex depth maps. Moreover, the direct concatenation often results in weak guidance because not all image features have a positive effect on the HF maps. In this paper, we develop a recurrent structure attention guided (RSAG) framework, consisting of two important parts. First, we introduce a deep contrastive network with multi-scale filters for adaptive frequency-domain separation, which adopts contrastive networks from large filters to small ones to calculate the pixel contrasts for adaptive high-quality HF predictions. Second, instead of the coarse concatenation guidance, we propose a recurrent structure attention block, which iteratively utilizes the latest depth estimation and the image features to jointly select clear patterns and boundaries, aiming at providing refined guidance for accurate depth recovery. In addition, we fuse the features of HF maps to enhance the edge structures in the decomposed LF maps. Extensive experiments show that our approach obtains superior performance compared with state-of-the-art depth super-resolution methods. Our code is available at: https://github.com/Yuanjiayii/DSR-RSAG.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Jiayi and Jiang, Haobo and Li, Xiang and Qian, Jianjun and Li, Jun and Yang, Jian}, year={2023}, month={Jun.}, pages={3331-3339} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25440/25212", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25440", + "pdf_size": 1408533, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18189162216916974298&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "https://github.com/Yuanjiayii/DSR_RSAG", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;1;2", + "aff_unique_norm": "PCA Lab;Jiangsu Key Lab of Image and Video Understanding for Social Security;Nanjing University of Science and Technology", + "aff_unique_dep": "Key Lab of Intelligent Perception and Systems for High-Dimensional Information;Image and Video Understanding for Social Security;School of Computer Science and Engineering", + "aff_unique_url": ";;http://www.nust.edu.cn", + "aff_unique_abbr": ";;NUST", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26737", + "title": "Redactor: A Data-Centric and Individualized Defense against Inference Attacks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Information leakage is becoming a critical problem as various information becomes publicly available by mistake, and machine learning models train on that data to provide services. As a result, one's private information could easily be memorized by such trained models. Unfortunately, deleting information is out of the question as the data is already exposed to the Web or third-party platforms. Moreover, we cannot necessarily control the labeling process and the model trainings by other parties either. In this setting, we study the problem of targeted disinformation generation where the goal is to dilute the data and thus make a model safer and more robust against inference attacks on a specific target (e.g., a person's profile) by only inserting new data. Our method finds the closest points to the target in the input space that will be labeled as a different class. Since we cannot control the labeling process, we instead conservatively estimate the labels probabilistically by combining decision boundaries of multiple classifiers using data programming techniques. Our experiments show that a probabilistic decision boundary can be a good proxy for labelers, and that our approach is effective in defending against inference attacks and can scale to large data.", + "primary_area": "safe and robust ai", + "author": "Geon Heo; Steven Euijong Whang", + "authorids": "", + "aff": "KAIST; KAIST", + "bibtex": "@article{Heo_Whang_2023, title={Redactor: A Data-Centric and Individualized Defense against Inference Attacks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26737}, DOI={10.1609/aaai.v37i12.26737}, abstractNote={Information leakage is becoming a critical problem as various information becomes publicly available by mistake, and machine learning models train on that data to provide services. As a result, one\u2019s private information could easily be memorized by such trained models. Unfortunately, deleting information is out of the question as the data is already exposed to the Web or third-party platforms. Moreover, we cannot necessarily control the labeling process and the model trainings by other parties either. In this setting, we study the problem of targeted disinformation generation where the goal is to dilute the data and thus make a model safer and more robust against inference attacks on a specific target (e.g., a person\u2019s profile) by only inserting new data. Our method finds the closest points to the target in the input space that will be labeled as a different class. Since we cannot control the labeling process, we instead conservatively estimate the labels probabilistically by combining decision boundaries of multiple classifiers using data programming techniques. Our experiments show that a probabilistic decision boundary can be a good proxy for labelers, and that our approach is effective in defending against inference attacks and can scale to large data.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Heo, Geon and Whang, Steven Euijong}, year={2023}, month={Jun.}, pages={14874-14882} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26737/26509", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26737", + "pdf_size": 1045807, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12572105219133703816&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25071", + "title": "Reducing ANN-SNN Conversion Error through Residual Membrane Potential", + "track": "main", + "status": "Technical", + "abstract": "Spiking Neural Networks (SNNs) have received extensive academic attention due to the unique properties of low power consumption and high-speed computing on neuromorphic chips. Among various training methods of SNNs, ANN-SNN conversion has shown the equivalent level of performance as ANNs on large-scale datasets. However, unevenness error, which refers to the deviation caused by different temporal sequences of spike arrival on activation layers, has not been effectively resolved and seriously suffers the performance of SNNs under the condition of short time-steps. In this paper, we make a detailed analysis of unevenness error and divide it into four categories. We point out that the case of the ANN output being zero while the SNN output being larger than zero accounts for the largest percentage. Based on this, we theoretically prove the sufficient and necessary conditions of this case and propose an optimization strategy based on residual membrane potential to reduce unevenness error. The experimental results show that the proposed method achieves state-of-the-art performance on CIFAR-10, CIFAR-100, and ImageNet datasets. For example, we reach top-1 accuracy of 64.32% on ImageNet with 10-steps. To the best of our knowledge, this is the first time ANN-SNN conversion can simultaneously achieve high accuracy and ultra-low-latency on the complex dataset. Code is available at https://github.com/hzc1208/ANN2SNN_SRP.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Zecheng Hao; Tong Bu; Jianhao Ding; Tiejun Huang; Zhaofei Yu", + "authorids": "", + "aff": "School of Computer Science, Peking University; School of Computer Science, Peking University + Institute for Artificial Intelligence, Peking University; School of Computer Science, Peking University; School of Computer Science, Peking University + Institute for Artificial Intelligence, Peking University; School of Computer Science, Peking University + Institute for Artificial Intelligence, Peking University", + "bibtex": "@article{Hao_Bu_Ding_Huang_Yu_2023, title={Reducing ANN-SNN Conversion Error through Residual Membrane Potential}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25071}, DOI={10.1609/aaai.v37i1.25071}, abstractNote={Spiking Neural Networks (SNNs) have received extensive academic attention due to the unique properties of low power consumption and high-speed computing on neuromorphic chips. Among various training methods of SNNs, ANN-SNN conversion has shown the equivalent level of performance as ANNs on large-scale datasets. However, unevenness error, which refers to the deviation caused by different temporal sequences of spike arrival on activation layers, has not been effectively resolved and seriously suffers the performance of SNNs under the condition of short time-steps. In this paper, we make a detailed analysis of unevenness error and divide it into four categories. We point out that the case of the ANN output being zero while the SNN output being larger than zero accounts for the largest percentage. Based on this, we theoretically prove the sufficient and necessary conditions of this case and propose an optimization strategy based on residual membrane potential to reduce unevenness error. The experimental results show that the proposed method achieves state-of-the-art performance on CIFAR-10, CIFAR-100, and ImageNet datasets. For example, we reach top-1 accuracy of 64.32% on ImageNet with 10-steps. To the best of our knowledge, this is the first time ANN-SNN conversion can simultaneously achieve high accuracy and ultra-low-latency on the complex dataset. Code is available at https://github.com/hzc1208/ANN2SNN_SRP.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hao, Zecheng and Bu, Tong and Ding, Jianhao and Huang, Tiejun and Yu, Zhaofei}, year={2023}, month={Jun.}, pages={11-21} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25071/24843", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25071", + "pdf_size": 1515424, + "gs_citation": 70, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11822124045951619825&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/hzc1208/ANN2SNN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0;0;0+0;0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25260", + "title": "Reducing Domain Gap in Frequency and Spatial Domain for Cross-Modality Domain Adaptation on Medical Image Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised domain adaptation (UDA) aims to learn a model trained on source domain and performs well on unlabeled target domain. In medical image segmentation field, most existing UDA methods depend on adversarial learning to address the domain gap between different image modalities, which is ineffective due to its complicated training process. In this paper, we propose a simple yet effective UDA method based on frequency and spatial domain transfer under multi-teacher distillation framework. In the frequency domain, we first introduce non-subsampled contourlet transform for identifying domain-invariant and domain-variant frequency components (DIFs and DVFs), and then keep the DIFs unchanged while replacing the DVFs of the source domain images with that of the target domain images to narrow the domain gap. In the spatial domain, we propose a batch momentum update-based histogram matching strategy to reduce the domain-variant image style bias. Experiments on two commonly used cross-modality medical image segmentation datasets show that our proposed method achieves superior performance compared to state-of-the-art methods.", + "primary_area": "computer vision ii", + "author": "Shaolei Liu; Siqi Yin; Linhao Qu; Manning Wang", + "authorids": "", + "aff": "Digital Medical Research Center, School of Basic Medical Science, Fudan University, Shanghai 200032, China; Digital Medical Research Center, School of Basic Medical Science, Fudan University, Shanghai 200032, China; Digital Medical Research Center, School of Basic Medical Science, Fudan University, Shanghai 200032, China; Shanghai Key Lab of Medical Image Computing and Computer Assisted Intervention, Shanghai 200032, China", + "bibtex": "@article{Liu_Yin_Qu_Wang_2023, title={Reducing Domain Gap in Frequency and Spatial Domain for Cross-Modality Domain Adaptation on Medical Image Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25260}, DOI={10.1609/aaai.v37i2.25260}, abstractNote={Unsupervised domain adaptation (UDA) aims to learn a model trained on source domain and performs well on unlabeled target domain. In medical image segmentation field, most existing UDA methods depend on adversarial learning to address the domain gap between different image modalities, which is ineffective due to its complicated training process. In this paper, we propose a simple yet effective UDA method based on frequency and spatial domain transfer under multi-teacher distillation framework. In the frequency domain, we first introduce non-subsampled contourlet transform for identifying domain-invariant and domain-variant frequency components (DIFs and DVFs), and then keep the DIFs unchanged while replacing the DVFs of the source domain images with that of the target domain images to narrow the domain gap. In the spatial domain, we propose a batch momentum update-based histogram matching strategy to reduce the domain-variant image style bias. Experiments on two commonly used cross-modality medical image segmentation datasets show that our proposed method achieves superior performance compared to state-of-the-art methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shaolei and Yin, Siqi and Qu, Linhao and Wang, Manning}, year={2023}, month={Jun.}, pages={1719-1727} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25260/25032", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25260", + "pdf_size": 917217, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11192321433748607993&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "https://github.com/slliuEric/FSUDA", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Fudan University;Shanghai Key Lab of Medical Image Computing and Computer Assisted Intervention", + "aff_unique_dep": "School of Basic Medical Science;", + "aff_unique_url": "https://www.fudan.edu.cn;", + "aff_unique_abbr": "Fudan;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26599", + "title": "Reducing Sentiment Bias in Pre-trained Sentiment Classification via Adaptive Gumbel Attack", + "track": "main", + "status": "Technical", + "abstract": "Pre-trained language models (PLMs) have recently enabled rapid progress on sentiment classification under the pre-train and fine-tune paradigm, where the fine-tuning phase aims to transfer the factual knowledge learned by PLMs to sentiment classification. However, current fine-tuning methods ignore the risk that PLMs cause the problem of sentiment bias, that is, PLMs tend to inject positive or negative sentiment from the contextual information of certain entities (or aspects) into their word embeddings, leading them to establish spurious correlations with labels. In this paper, we propose an adaptive Gumbel-attacked classifier that immunes sentiment bias from an adversarial-attack perspective. Due to the complexity and diversity of sentiment bias, we construct multiple Gumbel-attack expert networks to generate various noises from mixed Gumbel distribution constrained by mutual information minimization, and design an adaptive training framework to synthesize complex noise by confidence-guided controlling the number of expert networks. Finally, we capture these noises that effectively simulate sentiment bias based on the feedback of the classifier, and then propose a multi-channel parameter updating algorithm to strengthen the classifier to recognize these noises by fusing the parameters between the classifier and each expert network. Experimental results illustrate that our method significantly reduced sentiment bias and improved the performance of sentiment classification.", + "primary_area": "speech natural language processing", + "author": "Jiachen Tian; Shizhan Chen; Xiaowang Zhang; Xin Wang; Zhiyong Feng", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, China+Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China+Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China+Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China+Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China+Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China", + "bibtex": "@article{Tian_Chen_Zhang_Wang_Feng_2023, title={Reducing Sentiment Bias in Pre-trained Sentiment Classification via Adaptive Gumbel Attack}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26599}, DOI={10.1609/aaai.v37i11.26599}, abstractNote={Pre-trained language models (PLMs) have recently enabled rapid progress on sentiment classification under the pre-train and fine-tune paradigm, where the fine-tuning phase aims to transfer the factual knowledge learned by PLMs to sentiment classification. However, current fine-tuning methods ignore the risk that PLMs cause the problem of sentiment bias, that is, PLMs tend to inject positive or negative sentiment from the contextual information of certain entities (or aspects) into their word embeddings, leading them to establish spurious correlations with labels. In this paper, we propose an adaptive Gumbel-attacked classifier that immunes sentiment bias from an adversarial-attack perspective. Due to the complexity and diversity of sentiment bias, we construct multiple Gumbel-attack expert networks to generate various noises from mixed Gumbel distribution constrained by mutual information minimization, and design an adaptive training framework to synthesize complex noise by confidence-guided controlling the number of expert networks. Finally, we capture these noises that effectively simulate sentiment bias based on the feedback of the classifier, and then propose a multi-channel parameter updating algorithm to strengthen the classifier to recognize these noises by fusing the parameters between the classifier and each expert network. Experimental results illustrate that our method significantly reduced sentiment bias and improved the performance of sentiment classification.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Jiachen and Chen, Shizhan and Zhang, Xiaowang and Wang, Xin and Feng, Zhiyong}, year={2023}, month={Jun.}, pages={13646-13654} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26599/26371", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26599", + "pdf_size": 1238348, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11464166754969975736&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Tianjin University;Tianjin Key Laboratory of Cognitive Computing and Application", + "aff_unique_dep": "College of Intelligence and Computing;Cognitive Computing and Application", + "aff_unique_url": "http://www.tju.edu.cn;", + "aff_unique_abbr": "Tianjin University;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25331", + "title": "Referring Expression Comprehension Using Language Adaptive Inference", + "track": "main", + "status": "Technical", + "abstract": "Different from universal object detection, referring expression comprehension (REC) aims to locate specific objects referred to by natural language expressions. The expression provides high-level concepts of relevant visual and contextual patterns, which vary significantly with different expressions and account for only a few of those encoded in the REC model. This leads us to a question: do we really need the entire network with a fixed structure for various referring expressions? Ideally, given an expression, only expression-relevant components of the REC model are required. These components should be small in number as each expression only contains very few visual and contextual clues. This paper explores the adaptation between expressions and REC models for dynamic inference. Concretely, we propose a neat yet efficient framework named Language Adaptive Dynamic Subnets (LADS), which can extract language-adaptive subnets from the REC model conditioned on the referring expressions. By using the compact subnet, the inference can be more economical and efficient. Extensive experiments on RefCOCO, RefCOCO+, RefCOCOg, and Referit show that the proposed method achieves faster inference speed and higher accuracy against state-of-the-art approaches.", + "primary_area": "computer vision ii", + "author": "Wei Su; Peihan Miao; Huanzhang Dou; Yongjian Fu; Xi Li", + "authorids": "", + "aff": "College of Computer Science & Technology, Zhejiang University; School of Software Technology, Zhejiang University; College of Computer Science & Technology, Zhejiang University + Shanghai Institute for Advanced Study, Zhejiang University + Shanghai AI Laboratory; College of Computer Science & Technology, Zhejiang University; College of Computer Science & Technology, Zhejiang University + Shanghai Institute for Advanced Study, Zhejiang University + Shanghai AI Laboratory", + "bibtex": "@article{Su_Miao_Dou_Fu_Li_2023, title={Referring Expression Comprehension Using Language Adaptive Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25331}, DOI={10.1609/aaai.v37i2.25331}, abstractNote={Different from universal object detection, referring expression comprehension (REC) aims to locate specific objects referred to by natural language expressions. The expression provides high-level concepts of relevant visual and contextual patterns, which vary significantly with different expressions and account for only a few of those encoded in the REC model. This leads us to a question: do we really need the entire network with a fixed structure for various referring expressions? Ideally, given an expression, only expression-relevant components of the REC model are required. These components should be small in number as each expression only contains very few visual and contextual clues. This paper explores the adaptation between expressions and REC models for dynamic inference. Concretely, we propose a neat yet efficient framework named Language Adaptive Dynamic Subnets (LADS), which can extract language-adaptive subnets from the REC model conditioned on the referring expressions. By using the compact subnet, the inference can be more economical and efficient. Extensive experiments on RefCOCO, RefCOCO+, RefCOCOg, and Referit show that the proposed method achieves faster inference speed and higher accuracy against state-of-the-art approaches.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Wei and Miao, Peihan and Dou, Huanzhang and Fu, Yongjian and Li, Xi}, year={2023}, month={Jun.}, pages={2357-2365} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25331/25103", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25331", + "pdf_size": 2969737, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15242573995559349383&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+0+1;0;0+0+1", + "aff_unique_norm": "Zhejiang University;Shanghai AI Laboratory", + "aff_unique_dep": "College of Computer Science & Technology;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.shanghai-ai-lab.com", + "aff_unique_abbr": "ZJU;SAIL", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0+0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25484", + "title": "Refined Semantic Enhancement towards Frequency Diffusion for Video Captioning", + "track": "main", + "status": "Technical", + "abstract": "Video captioning aims to generate natural language sentences that describe the given video accurately. Existing methods obtain favorable generation by exploring richer visual representations in encode phase or improving the decoding ability. However, the long-tailed problem hinders these attempts at low-frequency tokens, which rarely occur but carry critical semantics, playing a vital role in the detailed generation. In this paper, we introduce a novel Refined Semantic enhancement method towards Frequency Diffusion (RSFD), a captioning model that constantly perceives the linguistic representation of the infrequent tokens. Concretely, a Frequency-Aware Diffusion (FAD) module is proposed to comprehend the semantics of low-frequency tokens to break through generation limitations. In this way, the caption is refined by promoting the absorption of tokens with insufficient occurrence. Based on FAD, we design a Divergent Semantic Supervisor (DSS) module to compensate for the information loss of high-frequency tokens brought by the diffusion process, where the semantics of low-frequency tokens is further emphasized to alleviate the long-tailed problem. Extensive experiments indicate that RSFD outperforms the state-of-the-art methods on two benchmark datasets, i.e., MSR-VTT and MSVD, demonstrate that the enhancement of low-frequency tokens semantics can obtain a competitive generation effect. Code is available at https://github.com/lzp870/RSFD.", + "primary_area": "computer vision iii", + "author": "Xian Zhong; Zipeng Li; Shuqin Chen; Kui Jiang; Chen Chen; Mang Ye", + "authorids": "", + "aff": "School of Computer Science and Artificial Intelligence, Wuhan University of Technology; School of Computer Science and Artificial Intelligence, Wuhan University of Technology; College of Computer, Hubei University of Education; School of Computer Science, Wuhan University; Center for Research in Computer Vision, University of Central Florida; School of Computer Science, Wuhan University", + "bibtex": "@article{Zhong_Li_Chen_Jiang_Chen_Ye_2023, title={Refined Semantic Enhancement towards Frequency Diffusion for Video Captioning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25484}, DOI={10.1609/aaai.v37i3.25484}, abstractNote={Video captioning aims to generate natural language sentences that describe the given video accurately. Existing methods obtain favorable generation by exploring richer visual representations in encode phase or improving the decoding ability. However, the long-tailed problem hinders these attempts at low-frequency tokens, which rarely occur but carry critical semantics, playing a vital role in the detailed generation. In this paper, we introduce a novel Refined Semantic enhancement method towards Frequency Diffusion (RSFD), a captioning model that constantly perceives the linguistic representation of the infrequent tokens. Concretely, a Frequency-Aware Diffusion (FAD) module is proposed to comprehend the semantics of low-frequency tokens to break through generation limitations. In this way, the caption is refined by promoting the absorption of tokens with insufficient occurrence. Based on FAD, we design a Divergent Semantic Supervisor (DSS) module to compensate for the information loss of high-frequency tokens brought by the diffusion process, where the semantics of low-frequency tokens is further emphasized to alleviate the long-tailed problem. Extensive experiments indicate that RSFD outperforms the state-of-the-art methods on two benchmark datasets, i.e., MSR-VTT and MSVD, demonstrate that the enhancement of low-frequency tokens semantics can obtain a competitive generation effect. Code is available at https://github.com/lzp870/RSFD.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Xian and Li, Zipeng and Chen, Shuqin and Jiang, Kui and Chen, Chen and Ye, Mang}, year={2023}, month={Jun.}, pages={3724-3732} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25484/25256", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25484", + "pdf_size": 506651, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16351055390864344848&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whu.edu.cn;crcv.ucf.edu;gmail.com", + "email": "whut.edu.cn;whut.edu.cn;whut.edu.cn;whu.edu.cn;crcv.ucf.edu;gmail.com", + "github": "https://github.com/lzp870/RSFD", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;3;2", + "aff_unique_norm": "Wuhan University of Technology;Hubei University of Education;Wuhan University;University of Central Florida", + "aff_unique_dep": "School of Computer Science and Artificial Intelligence;College of Computer;School of Computer Science;Center for Research in Computer Vision", + "aff_unique_url": "http://www.wut.edu.cn;http://www.hubei.edu.cn;http://www.whu.edu.cn;https://www.ucf.edu", + "aff_unique_abbr": "WUT;;WHU;UCF", + "aff_campus_unique_index": "0;0;0;2;0", + "aff_campus_unique": "Wuhan;;Orlando", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26654", + "title": "Rehabilitating Homeless: Dataset and Key Insights", + "track": "aaai special track", + "status": "Technical", + "abstract": "This paper presents a large anonymized dataset of homelessness alongside insights into the data-driven rehabilitation of homeless people. The dataset was gathered by a large non-profit organization working on rehabilitating the homeless for twenty years. This is the first dataset that we know of that contains rich information on thousands of homeless individuals seeking rehabilitation. We show how data analysis can help to make the rehabilitation of homeless people more effective and successful. Thus, we hope this paper alerts the data science community to the problem of homelessness.", + "primary_area": "ai for social impact", + "author": "Anna Bykova; Nikolay Filippov; Ivan P. Yamshchikov", + "authorids": "", + "aff": "LEYA Lab for Natural Language Processing, Higher School of Economics, Yandex, St. Petersburg, Russia; LEYA Lab for Natural Language Processing, Higher School of Economics, Yandex, St. Petersburg, Russia; Center for Artificial Intelligence and Robotics (CAIRO), THWS, W\u00fcrzburg, Germany+CEMAPRE, University of Lisbon, Portugal", + "bibtex": "@article{Bykova_Filippov_Yamshchikov_2023, title={Rehabilitating Homeless: Dataset and Key Insights}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26654}, DOI={10.1609/aaai.v37i12.26654}, abstractNote={This paper presents a large anonymized dataset of homelessness alongside insights into the data-driven rehabilitation of homeless people. The dataset was gathered by a large non-profit organization working on rehabilitating the homeless for twenty years. This is the first dataset that we know of that contains rich information on thousands of homeless individuals seeking rehabilitation. We show how data analysis can help to make the rehabilitation of homeless people more effective and successful. Thus, we hope this paper alerts the data science community to the problem of homelessness.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bykova, Anna and Filippov, Nikolay and Yamshchikov, Ivan P.}, year={2023}, month={Jun.}, pages={14136-14143} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26654/26426", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26654", + "pdf_size": 292573, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14335877421831841045&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "hse.ru;hse.ru;yamshchikov.info", + "email": "hse.ru;hse.ru;yamshchikov.info", + "github": "", + "project": "https://homeless.ru/en/", + "author_num": 3, + "aff_unique_index": "0;0;1+2", + "aff_unique_norm": "Higher School of Economics;THWS;University of Lisbon", + "aff_unique_dep": "LEYA Lab for Natural Language Processing;Center for Artificial Intelligence and Robotics (CAIRO);CEMAPRE", + "aff_unique_url": "https://hse.ru;;https://www.ulisboa.pt", + "aff_unique_abbr": "HSE;;", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "St. Petersburg;W\u00fcrzburg;", + "aff_country_unique_index": "0;0;1+2", + "aff_country_unique": "Russia;Germany;Portugal" + }, + { + "id": "article-25929", + "title": "Reinforced Approximate Exploratory Data Analysis", + "track": "main", + "status": "Technical", + "abstract": "Exploratory data analytics (EDA) is a sequential decision making process where analysts choose subsequent queries that might lead to some interesting insights based on the previous queries and corresponding results. Data processing systems often execute the queries on samples to produce results with low latency. Different downsampling strategy preserves different statistics of the data and have different magnitude of latency reductions. The optimum choice of sampling strategy often depends on the particular context of the analysis flow and the hidden intent of the analyst. In this paper, we are the first to consider the impact of sampling in interactive data exploration settings as they introduce approximation errors.\nWe propose a Deep Reinforcement Learning (DRL) based framework which can optimize the sample selection in order to keep the analysis and insight generation flow intact. Evaluations with real datasets show that our technique can preserve the original insight generation flow while improving the interaction latency, compared to baseline methods.", + "primary_area": "machine learning i", + "author": "Shaddy Garg; Subrata Mitra; Tong Yu; Yash Gadhia; Arjun Kashettiwar", + "authorids": "", + "aff": "Adobe Research; Adobe Research; Adobe Research; Indian Institute of Technology, Bombay; Indian Institute of Technology, Bombay", + "bibtex": "@article{Garg_Mitra_Yu_Gadhia_Kashettiwar_2023, title={Reinforced Approximate Exploratory Data Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25929}, DOI={10.1609/aaai.v37i6.25929}, abstractNote={Exploratory data analytics (EDA) is a sequential decision making process where analysts choose subsequent queries that might lead to some interesting insights based on the previous queries and corresponding results. Data processing systems often execute the queries on samples to produce results with low latency. Different downsampling strategy preserves different statistics of the data and have different magnitude of latency reductions. The optimum choice of sampling strategy often depends on the particular context of the analysis flow and the hidden intent of the analyst. In this paper, we are the first to consider the impact of sampling in interactive data exploration settings as they introduce approximation errors.\nWe propose a Deep Reinforcement Learning (DRL) based framework which can optimize the sample selection in order to keep the analysis and insight generation flow intact. Evaluations with real datasets show that our technique can preserve the original insight generation flow while improving the interaction latency, compared to baseline methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Garg, Shaddy and Mitra, Subrata and Yu, Tong and Gadhia, Yash and Kashettiwar, Arjun}, year={2023}, month={Jun.}, pages={7660-7669} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25929/25701", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25929", + "pdf_size": 1150853, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3045680512772112135&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "; ; ; ; ", + "email": "; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;1", + "aff_unique_norm": "Adobe;Indian Institute of Technology Bombay", + "aff_unique_dep": "Adobe Research;", + "aff_unique_url": "https://research.adobe.com;https://www.iitb.ac.in", + "aff_unique_abbr": "Adobe;IIT Bombay", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Bombay", + "aff_country_unique_index": "0;0;0;1;1", + "aff_country_unique": "United States;India" + }, + { + "id": "article-26274", + "title": "Reinforcement Causal Structure Learning on Order Graph", + "track": "main", + "status": "Technical", + "abstract": "Learning directed acyclic graph (DAG) that describes the causality of observed data is a very challenging but important task. Due to the limited quantity and quality of observed data, and non-identifiability of causal graph, it is almost impossible to infer a single precise DAG. Some methods approximate the posterior distribution of DAGs to explore the DAG space via Markov chain Monte Carlo (MCMC), but the DAG space is over the nature of super-exponential growth, accurately characterizing the whole distribution over DAGs is very intractable. In this paper, we propose Reinforcement Causal Structure Learning on Order Graph (RCL-OG) that uses order graph instead of MCMC to model different DAG topological orderings and to reduce the problem size. RCL-OG first defines reinforcement learning with a new reward mechanism to approximate the posterior distribution of orderings in an efficacy way, and uses deep Q-learning to update and transfer rewards between nodes. Next, it obtains the probability transition model of nodes on order graph, and computes the posterior probability of different orderings. In this way, we can sample on this model to obtain the ordering with high probability. Experiments on synthetic and benchmark datasets show that RCL-OG provides accurate posterior probability approximation and achieves better results than competitive causal discovery algorithms.", + "primary_area": "machine learning iv", + "author": "Dezhi Yang; Guoxian Yu; Jun Wang; Zhengtian Wu; Maozu Guo", + "authorids": "", + "aff": "School of Software, Shandong University, Jinan, China+SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; School of Software, Shandong University, Jinan, China+SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; SDU-NTU Joint Centre for AI Research, Shandong University, Jinan, China; School of Electronic and Information Engineering, Suzhou University of Science and Technology, Suzhou, China; College of Elec. & Inf. Eng., Beijing University of Civil Engineering and Architecture, Beijing, China", + "bibtex": "@article{Yang_Yu_Wang_Wu_Guo_2023, title={Reinforcement Causal Structure Learning on Order Graph}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26274}, DOI={10.1609/aaai.v37i9.26274}, abstractNote={Learning directed acyclic graph (DAG) that describes the causality of observed data is a very challenging but important task. Due to the limited quantity and quality of observed data, and non-identifiability of causal graph, it is almost impossible to infer a single precise DAG. Some methods approximate the posterior distribution of DAGs to explore the DAG space via Markov chain Monte Carlo (MCMC), but the DAG space is over the nature of super-exponential growth, accurately characterizing the whole distribution over DAGs is very intractable. In this paper, we propose Reinforcement Causal Structure Learning on Order Graph (RCL-OG) that uses order graph instead of MCMC to model different DAG topological orderings and to reduce the problem size. RCL-OG first defines reinforcement learning with a new reward mechanism to approximate the posterior distribution of orderings in an efficacy way, and uses deep Q-learning to update and transfer rewards between nodes. Next, it obtains the probability transition model of nodes on order graph, and computes the posterior probability of different orderings. In this way, we can sample on this model to obtain the ordering with high probability. Experiments on synthetic and benchmark datasets show that RCL-OG provides accurate posterior probability approximation and achieves better results than competitive causal discovery algorithms.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Dezhi and Yu, Guoxian and Wang, Jun and Wu, Zhengtian and Guo, Maozu}, year={2023}, month={Jun.}, pages={10737-10744} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26274/26046", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26274", + "pdf_size": 451510, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11431174597572996944&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;mail.usts.edu.cn;bucea.edu.cn", + "email": "mail.sdu.edu.cn;sdu.edu.cn;sdu.edu.cn;mail.usts.edu.cn;bucea.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0;1;2", + "aff_unique_norm": "Shandong University;Suzhou University of Science and Technology;Beijing University of Civil Engineering and Architecture", + "aff_unique_dep": "School of Software;School of Electronic and Information Engineering;College of Elec. & Inf. Eng.", + "aff_unique_url": "http://www.sdu.edu.cn;;http://www.bucea.edu.cn", + "aff_unique_abbr": ";;BUCEA", + "aff_campus_unique_index": "0+0;0+0;0;1;2", + "aff_campus_unique": "Jinan;Suzhou;Beijing", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25521", + "title": "Reinforcement Learning for Branch-and-Bound Optimisation Using Retrospective Trajectories", + "track": "main", + "status": "Technical", + "abstract": "Combinatorial optimisation problems framed as mixed integer linear programmes (MILPs) are ubiquitous across a range of real-world applications. The canonical branch-and-bound algorithm seeks to exactly solve MILPs by constructing a search tree of increasingly constrained sub-problems. In practice, its solving time performance is dependent on heuristics, such as the choice of the next variable to constrain ('branching'). Recently, machine learning (ML) has emerged as a promising paradigm for branching. However, prior works have struggled to apply reinforcement learning (RL), citing sparse rewards, difficult exploration, and partial observability as significant challenges. Instead, leading ML methodologies resort to approximating high quality handcrafted heuristics with imitation learning (IL), which precludes the discovery of novel policies and requires expensive data labelling. In this work, we propose retro branching; a simple yet effective approach to RL for branching. By retrospectively deconstructing the search tree into multiple paths each contained within a sub-tree, we enable the agent to learn from shorter trajectories with more predictable next states. In experiments on four combinatorial tasks, our approach enables learning-to-branch without any expert guidance or pre-training. We outperform the current state-of-the-art RL branching algorithm by 3-5x and come within 20% of the best IL method's performance on MILPs with 500 constraints and 1000 variables, with ablations verifying that our retrospectively constructed trajectories are essential to achieving these results.", + "primary_area": "constraint satisfaction and optimization", + "author": "Christopher W. F. Parsonson; Alexandre Laterre; Thomas D. Barrett", + "authorids": "", + "aff": "UCL; InstaDeep; InstaDeep", + "bibtex": "@article{Parsonson_Laterre_Barrett_2023, title={Reinforcement Learning for Branch-and-Bound Optimisation Using Retrospective Trajectories}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25521}, DOI={10.1609/aaai.v37i4.25521}, abstractNote={Combinatorial optimisation problems framed as mixed integer linear programmes (MILPs) are ubiquitous across a range of real-world applications. The canonical branch-and-bound algorithm seeks to exactly solve MILPs by constructing a search tree of increasingly constrained sub-problems. In practice, its solving time performance is dependent on heuristics, such as the choice of the next variable to constrain (\u2019branching\u2019). Recently, machine learning (ML) has emerged as a promising paradigm for branching. However, prior works have struggled to apply reinforcement learning (RL), citing sparse rewards, difficult exploration, and partial observability as significant challenges. Instead, leading ML methodologies resort to approximating high quality handcrafted heuristics with imitation learning (IL), which precludes the discovery of novel policies and requires expensive data labelling. In this work, we propose retro branching; a simple yet effective approach to RL for branching. By retrospectively deconstructing the search tree into multiple paths each contained within a sub-tree, we enable the agent to learn from shorter trajectories with more predictable next states. In experiments on four combinatorial tasks, our approach enables learning-to-branch without any expert guidance or pre-training. We outperform the current state-of-the-art RL branching algorithm by 3-5x and come within 20% of the best IL method\u2019s performance on MILPs with 500 constraints and 1000 variables, with ablations verifying that our retrospectively constructed trajectories are essential to achieving these results.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Parsonson, Christopher W. F. and Laterre, Alexandre and Barrett, Thomas D.}, year={2023}, month={Jun.}, pages={4061-4069} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25521/25293", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25521", + "pdf_size": 829271, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9996548086988345284&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com; ; ", + "email": "gmail.com; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University College London;InstaDeep", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucl.ac.uk;https://www.instadeep.com", + "aff_unique_abbr": "UCL;InstaDeep", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25379", + "title": "Reject Decoding via Language-Vision Models for Text-to-Image Synthesis", + "track": "main", + "status": "Technical", + "abstract": "Transformer-based text-to-image synthesis generates images from abstractive textual conditions and achieves prompt results. Since transformer-based models predict visual tokens step by step in testing, where the early error is hard to be corrected and would be propagated. To alleviate this issue, the common practice is drawing multi-paths from the transformer-based models and re-ranking the multi-images decoded from multi-paths to find the best one and filter out others. Therefore, the computing procedure of excluding images may be inefficient. To improve the effectiveness and efficiency of decoding, we exploit a reject decoding algorithm with tiny multi-modal models to enlarge the searching space and exclude the useless paths as early as possible. Specifically, we build tiny multi-modal models to evaluate the similarities between the partial paths and the caption at multi scales. Then, we propose a reject decoding algorithm to exclude some lowest quality partial paths at the inner steps. Thus, under the same computing load as the original decoding, we could search across more multi-paths to improve the decoding efficiency and synthesizing quality. The experiments conducted on the MS-COCO dataset and large-scale datasets show that the proposed reject decoding algorithm can exclude the useless paths and enlarge the searching paths to improve the synthesizing quality by consuming less time.", + "primary_area": "computer vision iii", + "author": "Fuxiang Wu; Liu Liu; Fusheng Hao; Fengxiang He; Lei Wang; Jun Cheng", + "authorids": "", + "aff": "Guangdong Provincial Key Laboratory of Robotics and Intelligent System, Shenzhen Institute of Advanced Technology, CAS, China+The Chinese University of Hong Kong, Hong Kong, China; School of Computer Science, Faculty of Engineering, The University of Sydney, Australia; Guangdong Provincial Key Laboratory of Robotics and Intelligent System, Shenzhen Institute of Advanced Technology, CAS, China+The Chinese University of Hong Kong, Hong Kong, China; JD Explore Academy, JD.com Inc., Beijing, China; Guangdong Provincial Key Laboratory of Robotics and Intelligent System, Shenzhen Institute of Advanced Technology, CAS, China+The Chinese University of Hong Kong, Hong Kong, China; Guangdong Provincial Key Laboratory of Robotics and Intelligent System, Shenzhen Institute of Advanced Technology, CAS, China+The Chinese University of Hong Kong, Hong Kong, China", + "bibtex": "@article{Wu_Liu_Hao_He_Wang_Cheng_2023, title={Reject Decoding via Language-Vision Models for Text-to-Image Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25379}, DOI={10.1609/aaai.v37i3.25379}, abstractNote={Transformer-based text-to-image synthesis generates images from abstractive textual conditions and achieves prompt results. Since transformer-based models predict visual tokens step by step in testing, where the early error is hard to be corrected and would be propagated. To alleviate this issue, the common practice is drawing multi-paths from the transformer-based models and re-ranking the multi-images decoded from multi-paths to find the best one and filter out others. Therefore, the computing procedure of excluding images may be inefficient. To improve the effectiveness and efficiency of decoding, we exploit a reject decoding algorithm with tiny multi-modal models to enlarge the searching space and exclude the useless paths as early as possible. Specifically, we build tiny multi-modal models to evaluate the similarities between the partial paths and the caption at multi scales. Then, we propose a reject decoding algorithm to exclude some lowest quality partial paths at the inner steps. Thus, under the same computing load as the original decoding, we could search across more multi-paths to improve the decoding efficiency and synthesizing quality. The experiments conducted on the MS-COCO dataset and large-scale datasets show that the proposed reject decoding algorithm can exclude the useless paths and enlarge the searching paths to improve the synthesizing quality by consuming less time.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Fuxiang and Liu, Liu and Hao, Fusheng and He, Fengxiang and Wang, Lei and Cheng, Jun}, year={2023}, month={Jun.}, pages={2785-2794} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25379/25151", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25379", + "pdf_size": 7528878, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LfMO9gToQPMJ:scholar.google.com/&scioq=Reject+Decoding+via+Language-Vision+Models+for+Text-to-Image+Synthesis&hl=en&as_sdt=0,44", + "gs_version_total": 2, + "aff_domain": "siat.ac.cn;sydney.edu.au;siat.ac.cn;gmail.com;siat.ac.cn;siat.ac.cn", + "email": "siat.ac.cn;sydney.edu.au;siat.ac.cn;gmail.com;siat.ac.cn;siat.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;0+1;3;0+1;0+1", + "aff_unique_norm": "Shenzhen Institute of Advanced Technology;The Chinese University of Hong Kong;The University of Sydney;JD.com Inc.", + "aff_unique_dep": "Provincial Key Laboratory of Robotics and Intelligent System;;School of Computer Science;JD Explore Academy", + "aff_unique_url": "http://www.siat.ac.cn;https://www.cuhk.edu.hk;https://www.sydney.edu.au;https://www.jd.com", + "aff_unique_abbr": "SIAT;CUHK;USYD;JD.com", + "aff_campus_unique_index": "0+1;0+1;3;0+1;0+1", + "aff_campus_unique": "Shenzhen;Hong Kong;;Beijing", + "aff_country_unique_index": "0+0;1;0+0;0;0+0;0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26578", + "title": "Relation-Aware Language-Graph Transformer for Question Answering", + "track": "main", + "status": "Technical", + "abstract": "Question Answering (QA) is a task that entails reasoning over natural language contexts, and many relevant works augment language models (LMs) with graph neural networks (GNNs) to encode the Knowledge Graph (KG) information. However, most existing GNN-based modules for QA do not take advantage of rich relational information of KGs and depend on limited information interaction between the LM and the KG. To address these issues, we propose Question Answering Transformer (QAT), which is designed to jointly reason over language and graphs with respect to entity relations in a unified manner. Specifically, QAT constructs Meta-Path tokens, which learn relation-centric embeddings based on diverse structural and semantic relations. Then, our Relation-Aware Self-Attention module comprehensively integrates different modalities via the Cross-Modal Relative Position Bias, which guides information exchange between relevant entities of different modalities. We validate the effectiveness of QAT on commonsense question answering datasets like CommonsenseQA and OpenBookQA, and on a medical question answering dataset, MedQA-USMLE. On all the datasets, our method achieves state-of-the-art performance. Our code is available at http://github.com/mlvlab/QAT.", + "primary_area": "speech natural language processing", + "author": "Jinyoung Park; Hyeong Kyu Choi; Juyeon Ko; Hyeonjin Park; Ji-Hoon Kim; Jisu Jeong; Kyungmin Kim; Hyunwoo Kim", + "authorids": "", + "aff": "Korea University; Korea University; Korea University; NA VER; NA VER+NA VER Cloud+NA VER AI Lab; NA VER+NA VER Cloud+NA VER AI Lab; NA VER+NA VER Cloud+NA VER AI Lab; Korea University", + "bibtex": "@article{Park_Choi_Ko_Park_Kim_Jeong_Kim_Kim_2023, title={Relation-Aware Language-Graph Transformer for Question Answering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26578}, DOI={10.1609/aaai.v37i11.26578}, abstractNote={Question Answering (QA) is a task that entails reasoning over natural language contexts, and many relevant works augment language models (LMs) with graph neural networks (GNNs) to encode the Knowledge Graph (KG) information. However, most existing GNN-based modules for QA do not take advantage of rich relational information of KGs and depend on limited information interaction between the LM and the KG. To address these issues, we propose Question Answering Transformer (QAT), which is designed to jointly reason over language and graphs with respect to entity relations in a unified manner. Specifically, QAT constructs Meta-Path tokens, which learn relation-centric embeddings based on diverse structural and semantic relations. Then, our Relation-Aware Self-Attention module comprehensively integrates different modalities via the Cross-Modal Relative Position Bias, which guides information exchange between relevant entities of different modalities. We validate the effectiveness of QAT on commonsense question answering datasets like CommonsenseQA and OpenBookQA, and on a medical question answering dataset, MedQA-USMLE. On all the datasets, our method achieves state-of-the-art performance. Our code is available at http://github.com/mlvlab/QAT.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Park, Jinyoung and Choi, Hyeong Kyu and Ko, Juyeon and Park, Hyeonjin and Kim, Ji-Hoon and Jeong, Jisu and Kim, Kyungmin and Kim, Hyunwoo}, year={2023}, month={Jun.}, pages={13457-13464} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26578/26350", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26578", + "pdf_size": 1292597, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9181169117706157827&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;navercorp.com;navercorp.com;navercorp.com;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;navercorp.com;navercorp.com;navercorp.com;korea.ac.kr", + "github": "http://github.com/mlvlab/QAT", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;2+3;2+3;2+3;0", + "aff_unique_norm": "Korea University;;NAVER Cloud;NAVER Corporation", + "aff_unique_dep": ";;;AI Lab", + "aff_unique_url": "https://www.korea.ac.kr;;https://www.naver.com;https://www.naver.com", + "aff_unique_abbr": "KU;;NAVER;NAVER", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0+0;0+0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "article-25790", + "title": "Relational Program Synthesis with Numerical Reasoning", + "track": "main", + "status": "Technical", + "abstract": "Learning programs with numerical values is fundamental to many AI applications, including bio-informatics and drug design. However, current program synthesis approaches struggle to learn programs with numerical values.\nAn especially difficult problem is learning continuous values from multiple examples, such as intervals. To overcome this limitation, we introduce an inductive logic programming approach which combines relational learning with numerical reasoning. Our approach, which we call NumSynth, uses satisfiability modulo theories solvers to efficiently learn programs with numerical values. Our approach can identify numerical values in linear arithmetic fragments, such as real difference logic, and from infinite domains, such as real numbers or integers. Our experiments on four diverse domains, including game playing and program synthesis, show that our approach can (i) learn programs with numerical values from linear arithmetical reasoning, and (ii) outperform existing approaches in terms of predictive accuracies and learning times.", + "primary_area": "knowledge representation and reasoning", + "author": "C\u00e9line Hocquette; Andrew Cropper", + "authorids": "", + "aff": "University of Oxford; University of Oxford", + "bibtex": "@article{Hocquette_Cropper_2023, title={Relational Program Synthesis with Numerical Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25790}, DOI={10.1609/aaai.v37i5.25790}, abstractNote={Learning programs with numerical values is fundamental to many AI applications, including bio-informatics and drug design. However, current program synthesis approaches struggle to learn programs with numerical values.\nAn especially difficult problem is learning continuous values from multiple examples, such as intervals. To overcome this limitation, we introduce an inductive logic programming approach which combines relational learning with numerical reasoning. Our approach, which we call NumSynth, uses satisfiability modulo theories solvers to efficiently learn programs with numerical values. Our approach can identify numerical values in linear arithmetic fragments, such as real difference logic, and from infinite domains, such as real numbers or integers. Our experiments on four diverse domains, including game playing and program synthesis, show that our approach can (i) learn programs with numerical values from linear arithmetical reasoning, and (ii) outperform existing approaches in terms of predictive accuracies and learning times.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hocquette, C\u00e9line and Cropper, Andrew}, year={2023}, month={Jun.}, pages={6425-6433} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25790/25562", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25790", + "pdf_size": 1058249, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3342598358504902800&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 10, + "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26064", + "title": "Reliable Robustness Evaluation via Automatically Constructed Attack Ensembles", + "track": "main", + "status": "Technical", + "abstract": "Attack Ensemble (AE), which combines multiple attacks together, provides a reliable way to evaluate adversarial robustness. In practice, AEs are often constructed and tuned by human experts, which however tends to be sub-optimal and time-consuming. In this work, we present AutoAE, a conceptually simple approach for automatically constructing AEs. In brief, AutoAE repeatedly adds the attack and its iteration steps to the ensemble that maximizes ensemble improvement per additional iteration consumed. We show theoretically that AutoAE yields AEs provably within a constant factor of the optimal for a given defense. We then use AutoAE to construct two AEs for l\u221e and l2 attacks, and apply them without any tuning or adaptation to 45 top adversarial defenses on the RobustBench leaderboard. In all except one cases we achieve equal or better (often the latter) robustness evaluation than existing AEs, and notably, in 29 cases we achieve better robustness evaluation than the best known one. Such performance of AutoAE shows itself as a reliable evaluation protocol for adversarial robustness, which further indicates the huge potential of automatic AE construction. Code is available at https://github.com/LeegerPENG/AutoAE.", + "primary_area": "machine learning ii", + "author": "Shengcai Liu; Fu Peng; Ke Tang", + "authorids": "", + "aff": "Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology, Shenzhen 518055, China + Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China; Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China; Research Institute of Trustworthy Autonomous Systems, Southern University of Science and Technology, Shenzhen 518055, China + Department of Computer Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China", + "bibtex": "@article{Liu_Peng_Tang_2023, title={Reliable Robustness Evaluation via Automatically Constructed Attack Ensembles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26064}, DOI={10.1609/aaai.v37i7.26064}, abstractNote={Attack Ensemble (AE), which combines multiple attacks together, provides a reliable way to evaluate adversarial robustness. In practice, AEs are often constructed and tuned by human experts, which however tends to be sub-optimal and time-consuming. In this work, we present AutoAE, a conceptually simple approach for automatically constructing AEs. In brief, AutoAE repeatedly adds the attack and its iteration steps to the ensemble that maximizes ensemble improvement per additional iteration consumed. We show theoretically that AutoAE yields AEs provably within a constant factor of the optimal for a given defense. We then use AutoAE to construct two AEs for l\u221e and l2 attacks, and apply them without any tuning or adaptation to 45 top adversarial defenses on the RobustBench leaderboard. In all except one cases we achieve equal or better (often the latter) robustness evaluation than existing AEs, and notably, in 29 cases we achieve better robustness evaluation than the best known one. Such performance of AutoAE shows itself as a reliable evaluation protocol for adversarial robustness, which further indicates the huge potential of automatic AE construction. Code is available at https://github.com/LeegerPENG/AutoAE.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Shengcai and Peng, Fu and Tang, Ke}, year={2023}, month={Jun.}, pages={8852-8860} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26064/25836", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26064", + "pdf_size": 294561, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7454262679362876757&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sustech.edu.cn;mail.sustech.edu.cn;sustech.edu.cn", + "email": "sustech.edu.cn;mail.sustech.edu.cn;sustech.edu.cn", + "github": "https://github.com/LeegerPENG/AutoAE", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0;0+0", + "aff_unique_norm": "Southern University of Science and Technology", + "aff_unique_dep": "Research Institute of Trustworthy Autonomous Systems", + "aff_unique_url": "https://www.sustech.edu.cn", + "aff_unique_abbr": "SUSTech", + "aff_campus_unique_index": "0+0;0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26511", + "title": "RenewNAT: Renewing Potential Translation for Non-autoregressive Transformer", + "track": "main", + "status": "Technical", + "abstract": "Non-autoregressive neural machine translation (NAT) models are proposed to accelerate the inference process while maintaining relatively high performance. However, existing NAT models are difficult to achieve the desired efficiency-quality trade-off. For one thing, fully NAT models with efficient inference perform inferior to their autoregressive counterparts. For another, iterative NAT models can, though, achieve comparable performance while diminishing the advantage of speed. In this paper, we propose RenewNAT, a flexible framework with high efficiency and effectiveness, to incorporate the merits of fully and iterative NAT models. RenewNAT first generates the potential translation results and then renews them in a single pass. It can achieve significant performance improvements at the same expense as traditional NAT models (without introducing additional model parameters and decoding latency). Experimental results on various translation benchmarks (e.g., 4 WMT) show that our framework consistently improves the performance of strong fully NAT methods (e.g., GLAT and DSLP) without additional speed overhead.", + "primary_area": "speech natural language processing", + "author": "Pei Guo; Yisheng Xiao; Juntao Li; Min Zhang", + "authorids": "", + "aff": "Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University; Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University; Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University; Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University", + "bibtex": "@article{Guo_Xiao_Li_Zhang_2023, title={RenewNAT: Renewing Potential Translation for Non-autoregressive Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26511}, DOI={10.1609/aaai.v37i11.26511}, abstractNote={Non-autoregressive neural machine translation (NAT) models are proposed to accelerate the inference process while maintaining relatively high performance. However, existing NAT models are difficult to achieve the desired efficiency-quality trade-off. For one thing, fully NAT models with efficient inference perform inferior to their autoregressive counterparts. For another, iterative NAT models can, though, achieve comparable performance while diminishing the advantage of speed. In this paper, we propose RenewNAT, a flexible framework with high efficiency and effectiveness, to incorporate the merits of fully and iterative NAT models. RenewNAT first generates the potential translation results and then renews them in a single pass. It can achieve significant performance improvements at the same expense as traditional NAT models (without introducing additional model parameters and decoding latency). Experimental results on various translation benchmarks (e.g., 4 WMT) show that our framework consistently improves the performance of strong fully NAT methods (e.g., GLAT and DSLP) without additional speed overhead.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Pei and Xiao, Yisheng and Li, Juntao and Zhang, Min}, year={2023}, month={Jun.}, pages={12854-12862} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26511/26283", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26511", + "pdf_size": 189879, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16146062754354728928&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.suda.edu.cn;stu.suda.edu.cn;suda.edu.cn;suda.edu.cn", + "email": "stu.suda.edu.cn;stu.suda.edu.cn;suda.edu.cn;suda.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Soochow University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "https://www.soochow.edu.cn", + "aff_unique_abbr": "Soochow U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25642", + "title": "Repair Is Nearly Generation: Multilingual Program Repair with LLMs", + "track": "main", + "status": "Technical", + "abstract": "Most programmers make mistakes when writing code. Some of these mistakes are small and require few edits to the original program \u2013 a class of errors recently termed last mile mistakes. These errors break the flow for experienced developers and can stump novice programmers. Existing automated repair techniques targeting this class of errors are language-specific and do not easily carry over to new languages. Transferring symbolic approaches requires substantial engineering and neural approaches require data and retraining. We introduce RING, a multilingual repair engine powered by a large language model trained on code (LLMC) such as Codex. Such a multilingual engine enables a flipped model for programming assistance, one where the programmer writes code and the AI assistance suggests fixes, compared to traditional code suggestion technology. Taking inspiration from the way programmers manually fix bugs, we show that a prompt-based strategy that conceptualizes repair as localization, transformation, and candidate ranking, can successfully repair programs in multiple languages with minimal effort. We present the first results for such a multilingual repair engine by evaluating on 6 different languages and comparing performance to language-specific repair engines. We show that RING can outperform language-specific repair engines for three of these languages.", + "primary_area": "domain s of application", + "author": "Harshit Joshi; Jos\u00e9 Cambronero Sanchez; Sumit Gulwani; Vu Le; Gust Verbruggen; Ivan Radi\u010dek", + "authorids": "", + "aff": "Microsoft, India; Microsoft, USA; Microsoft, USA; Microsoft, USA; Microsoft, Croatia; Microsoft, Belgium", + "bibtex": "@article{Joshi_Cambronero Sanchez_Gulwani_Le_Verbruggen_Radi\u010dek_2023, title={Repair Is Nearly Generation: Multilingual Program Repair with LLMs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25642}, DOI={10.1609/aaai.v37i4.25642}, abstractNote={Most programmers make mistakes when writing code. Some of these mistakes are small and require few edits to the original program \u2013 a class of errors recently termed last mile mistakes. These errors break the flow for experienced developers and can stump novice programmers. Existing automated repair techniques targeting this class of errors are language-specific and do not easily carry over to new languages. Transferring symbolic approaches requires substantial engineering and neural approaches require data and retraining. We introduce RING, a multilingual repair engine powered by a large language model trained on code (LLMC) such as Codex. Such a multilingual engine enables a flipped model for programming assistance, one where the programmer writes code and the AI assistance suggests fixes, compared to traditional code suggestion technology. Taking inspiration from the way programmers manually fix bugs, we show that a prompt-based strategy that conceptualizes repair as localization, transformation, and candidate ranking, can successfully repair programs in multiple languages with minimal effort. We present the first results for such a multilingual repair engine by evaluating on 6 different languages and comparing performance to language-specific repair engines. We show that RING can outperform language-specific repair engines for three of these languages.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Joshi, Harshit and Cambronero Sanchez, Jos\u00e9 and Gulwani, Sumit and Le, Vu and Verbruggen, Gust and Radi\u010dek, Ivan}, year={2023}, month={Jun.}, pages={5131-5140} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25642/25414", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25642", + "pdf_size": 261931, + "gs_citation": 155, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=326265779757123959&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Microsoft Corporation;Microsoft", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.microsoft.com/en-in;https://www.microsoft.com", + "aff_unique_abbr": "Microsoft;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;2;3", + "aff_country_unique": "India;United States;Croatia;Belgium" + }, + { + "id": "article-26587", + "title": "Rephrasing the Reference for Non-autoregressive Machine Translation", + "track": "main", + "status": "Technical", + "abstract": "Non-autoregressive neural machine translation (NAT) models suffer from the multi-modality problem that there may exist multiple possible translations of a source sentence, so the reference sentence may be inappropriate for the training when the NAT output is closer to other translations. In response to this problem, we introduce a rephraser to provide a better training target for NAT by rephrasing the reference sentence according to the NAT output. As we train NAT based on the rephraser output rather than the reference sentence, the rephraser output should fit well with the NAT output and not deviate too far from the reference, which can be quantified as reward functions and optimized by reinforcement learning. Experiments on major WMT benchmarks and NAT baselines show that our approach consistently improves the translation quality of NAT. Specifically, our best variant achieves comparable performance to the autoregressive Transformer, while being 14.7 times more efficient in inference.", + "primary_area": "speech natural language processing", + "author": "Chenze Shao; Jinchao Zhang; Jie Zhou; Yang Feng", + "authorids": "", + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences", + "bibtex": "@article{Shao_Zhang_Zhou_Feng_2023, title={Rephrasing the Reference for Non-autoregressive Machine Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26587}, DOI={10.1609/aaai.v37i11.26587}, abstractNote={Non-autoregressive neural machine translation (NAT) models suffer from the multi-modality problem that there may exist multiple possible translations of a source sentence, so the reference sentence may be inappropriate for the training when the NAT output is closer to other translations. In response to this problem, we introduce a rephraser to provide a better training target for NAT by rephrasing the reference sentence according to the NAT output. As we train NAT based on the rephraser output rather than the reference sentence, the rephraser output should fit well with the NAT output and not deviate too far from the reference, which can be quantified as reward functions and optimized by reinforcement learning. Experiments on major WMT benchmarks and NAT baselines show that our approach consistently improves the translation quality of NAT. Specifically, our best variant achieves comparable performance to the autoregressive Transformer, while being 14.7 times more efficient in inference.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shao, Chenze and Zhang, Jinchao and Zhou, Jie and Feng, Yang}, year={2023}, month={Jun.}, pages={13538-13546} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26587/26359", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26587", + "pdf_size": 918820, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1567682519851347585&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "ict.ac.cn;tencent.com;tencent.com;ict.ac.cn", + "email": "ict.ac.cn;tencent.com;tencent.com;ict.ac.cn", + "github": "https://github.com/ictnlp/Rephraser-NAT", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tencent Inc", + "aff_unique_dep": "Institute of Computing Technology;;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.tencent.com", + "aff_unique_abbr": "CAS;UCAS;Tencent", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26160", + "title": "Representation Learning by Detecting Incorrect Location Embeddings", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we introduce a novel self-supervised learning (SSL) loss for image representation learning. There is a growing belief that generalization in deep neural networks is linked to their ability to discriminate object shapes. Since object shape is related to the location of its parts, we propose to detect those that have been artificially misplaced. We represent object parts with image tokens and train a ViT to detect which token has been combined with an incorrect positional embedding. We then introduce sparsity in the inputs to make the model more robust to occlusions and to speed up the training. We call our method DILEMMA, which stands for Detection of Incorrect Location EMbeddings with MAsked inputs. We apply DILEMMA to MoCoV3, DINO and SimCLR and show an improvement in their performance of respectively 4.41%, 3.97%, and 0.5% under the same training time and with a linear probing transfer on ImageNet-1K. We also show full fine-tuning improvements of MAE combined with our method on ImageNet-100. We evaluate our method via fine-tuning on common SSL benchmarks. Moreover, we show that when downstream tasks are strongly reliant on shape (such as in the YOGA-82 pose dataset), our pre-trained features yield a significant gain over prior work.", + "primary_area": "machine learning iii", + "author": "Sepehr Sameni; Simon Jenni; Paolo Favaro", + "authorids": "", + "aff": "Computer Vision Group, University of Bern, Switzerland; Adobe Research; Computer Vision Group, University of Bern, Switzerland", + "bibtex": "@article{Sameni_Jenni_Favaro_2023, title={Representation Learning by Detecting Incorrect Location Embeddings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26160}, DOI={10.1609/aaai.v37i8.26160}, abstractNote={In this paper, we introduce a novel self-supervised learning (SSL) loss for image representation learning. There is a growing belief that generalization in deep neural networks is linked to their ability to discriminate object shapes. Since object shape is related to the location of its parts, we propose to detect those that have been artificially misplaced. We represent object parts with image tokens and train a ViT to detect which token has been combined with an incorrect positional embedding. We then introduce sparsity in the inputs to make the model more robust to occlusions and to speed up the training. We call our method DILEMMA, which stands for Detection of Incorrect Location EMbeddings with MAsked inputs. We apply DILEMMA to MoCoV3, DINO and SimCLR and show an improvement in their performance of respectively 4.41%, 3.97%, and 0.5% under the same training time and with a linear probing transfer on ImageNet-1K. We also show full fine-tuning improvements of MAE combined with our method on ImageNet-100. We evaluate our method via fine-tuning on common SSL benchmarks. Moreover, we show that when downstream tasks are strongly reliant on shape (such as in the YOGA-82 pose dataset), our pre-trained features yield a significant gain over prior work.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sameni, Sepehr and Jenni, Simon and Favaro, Paolo}, year={2023}, month={Jun.}, pages={9704-9713} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26160/25932", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26160", + "pdf_size": 1327703, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9400017203183540086&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "unibe.ch;adobe.com;unibe.ch", + "email": "unibe.ch;adobe.com;unibe.ch", + "github": "https://github.com/Separius/DILEMMA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Bern;Adobe", + "aff_unique_dep": "Computer Vision Group;Adobe Research", + "aff_unique_url": "https://www.unibe.ch;https://research.adobe.com", + "aff_unique_abbr": ";Adobe", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "article-25702", + "title": "Representation with Incomplete Votes", + "track": "main", + "status": "Technical", + "abstract": "Platforms for online civic participation rely heavily on methods for condensing thousands of comments into a relevant handful, based on whether participants agree or disagree with them. These methods should guarantee fair representation of the participants, as their outcomes may affect the health of the conversation and inform impactful downstream decisions. To that end, we draw on the literature on approval-based committee elections. Our setting is novel in that the approval votes are incomplete since participants will typically not vote on all comments. We prove that this complication renders non-adaptive algorithms impractical in terms of the amount of information they must gather. Therefore, we develop an adaptive algorithm that uses information more efficiently by presenting incoming participants with statements that appear promising based on votes by previous participants. We prove that this method satisfies commonly used notions of fair representation, even when participants only vote on a small fraction of comments. Finally, an empirical evaluation using real data shows that the proposed algorithm provides representative outcomes in practice.", + "primary_area": "game theory and economic paradigms", + "author": "Daniel Halpern; Gregory Kehne; Ariel D. Procaccia; Jamie Tucker-Foltz; Manuel W\u00fcthrich", + "authorids": "", + "aff": "Harvard University; Harvard University; Harvard University; Harvard University; Harvard University", + "bibtex": "@article{Halpern_Kehne_Procaccia_Tucker-Foltz_W\u00fcthrich_2023, title={Representation with Incomplete Votes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25702}, DOI={10.1609/aaai.v37i5.25702}, abstractNote={Platforms for online civic participation rely heavily on methods for condensing thousands of comments into a relevant handful, based on whether participants agree or disagree with them. These methods should guarantee fair representation of the participants, as their outcomes may affect the health of the conversation and inform impactful downstream decisions. To that end, we draw on the literature on approval-based committee elections. Our setting is novel in that the approval votes are incomplete since participants will typically not vote on all comments. We prove that this complication renders non-adaptive algorithms impractical in terms of the amount of information they must gather. Therefore, we develop an adaptive algorithm that uses information more efficiently by presenting incoming participants with statements that appear promising based on votes by previous participants. We prove that this method satisfies commonly used notions of fair representation, even when participants only vote on a small fraction of comments. Finally, an empirical evaluation using real data shows that the proposed algorithm provides representative outcomes in practice.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Halpern, Daniel and Kehne, Gregory and Procaccia, Ariel D. and Tucker-Foltz, Jamie and W\u00fcthrich, Manuel}, year={2023}, month={Jun.}, pages={5657-5664} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25702/25474", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25702", + "pdf_size": 245308, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17299820482120540981&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Harvard University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.harvard.edu", + "aff_unique_abbr": "Harvard", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26824", + "title": "Reshaping State-Space Search: From Dominance to Contrastive Analysis", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "State-space search is paramount for intelligent decision making when long-term thinking is needed. We introduce dominance and contrastive analysis methods, which enable reasoning about the relative advantages among different courses of action. This re-shapes how agents reason and leads to new families of state-space search algorithms.", + "primary_area": "", + "author": "Alvaro Torralba", + "authorids": "", + "aff": "Aalborg University, Aalborg, Denmark", + "bibtex": "@article{Torralba_2024, title={Reshaping State-Space Search: From Dominance to Contrastive Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26824}, DOI={10.1609/aaai.v37i13.26824}, abstractNote={State-space search is paramount for intelligent decision making when long-term thinking is needed. We introduce dominance and contrastive analysis methods, which enable reasoning about the relative advantages among different courses of action. This re-shapes how agents reason and leads to new families of state-space search algorithms.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Torralba, Alvaro}, year={2024}, month={Jul.}, pages={15457-15457} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26824/26596", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26824", + "pdf_size": 45076, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:mSnw644qlcgJ:scholar.google.com/&scioq=Reshaping+State-Space+Search:+From+Dominance+to+Contrastive+Analysis&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Aalborg University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.aau.dk", + "aff_unique_abbr": "AAU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Aalborg", + "aff_country_unique_index": "0", + "aff_country_unique": "Denmark" + }, + { + "id": "article-26261", + "title": "Resilient Binary Neural Network", + "track": "main", + "status": "Technical", + "abstract": "Binary neural networks (BNNs) have received ever-increasing popularity for their great capability of reducing storage burden as well as quickening inference time. However, there is a severe performance drop compared with {real-valued} networks, due to its intrinsic frequent weight oscillation during training. In this paper, we introduce a Resilient Binary Neural Network (ReBNN) to mitigate the frequent oscillation for better BNNs' training. We identify that the weight oscillation mainly stems from the non-parametric scaling factor. To address this issue, we propose to parameterize the scaling factor and introduce a weighted reconstruction loss to build an adaptive training objective. For the first time, we show that the weight oscillation is controlled by the balanced parameter attached to the reconstruction loss, which provides a theoretical foundation to parameterize it in back propagation. Based on this, we learn our ReBNN by calculating the balanced parameter based on its maximum magnitude, which can effectively mitigate the weight oscillation with a resilient training process. Extensive experiments are conducted upon various network models, such as ResNet and Faster-RCNN for computer vision, as well as BERT for natural language processing. The results demonstrate the overwhelming performance of our ReBNN over prior arts. For example, our ReBNN achieves 66.9% Top-1 accuracy with ResNet-18 backbone on the ImageNet dataset, surpassing existing state-of-the-arts by a significant margin. Our code is open-sourced at https://github.com/SteveTsui/ReBNN.", + "primary_area": "machine learning iv", + "author": "Sheng Xu; Yanjing Li; Teli Ma; Mingbao Lin; Hao Dong; Baochang Zhang; Peng Gao; Jinhu Lu", + "authorids": "", + "aff": "Beihang University; Beihang University; Shanghai AI Laboratory; Tencent; Peking University; Beihang University+Zhongguancun Laboratory; Shanghai AI Laboratory; Beihang University+Zhongguancun Laboratory", + "bibtex": "@article{Xu_Li_Ma_Lin_Dong_Zhang_Gao_Lu_2023, title={Resilient Binary Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26261}, DOI={10.1609/aaai.v37i9.26261}, abstractNote={Binary neural networks (BNNs) have received ever-increasing popularity for their great capability of reducing storage burden as well as quickening inference time. However, there is a severe performance drop compared with {real-valued} networks, due to its intrinsic frequent weight oscillation during training. In this paper, we introduce a Resilient Binary Neural Network (ReBNN) to mitigate the frequent oscillation for better BNNs\u2019 training. We identify that the weight oscillation mainly stems from the non-parametric scaling factor. To address this issue, we propose to parameterize the scaling factor and introduce a weighted reconstruction loss to build an adaptive training objective. For the first time, we show that the weight oscillation is controlled by the balanced parameter attached to the reconstruction loss, which provides a theoretical foundation to parameterize it in back propagation. Based on this, we learn our ReBNN by calculating the balanced parameter based on its maximum magnitude, which can effectively mitigate the weight oscillation with a resilient training process. Extensive experiments are conducted upon various network models, such as ResNet and Faster-RCNN for computer vision, as well as BERT for natural language processing. The results demonstrate the overwhelming performance of our ReBNN over prior arts. For example, our ReBNN achieves 66.9% Top-1 accuracy with ResNet-18 backbone on the ImageNet dataset, surpassing existing state-of-the-arts by a significant margin. Our code is open-sourced at https://github.com/SteveTsui/ReBNN.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Sheng and Li, Yanjing and Ma, Teli and Lin, Mingbao and Dong, Hao and Zhang, Baochang and Gao, Peng and Lu, Jinhu}, year={2023}, month={Jun.}, pages={10620-10628} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26261/26033", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26261", + "pdf_size": 1009136, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10586490940332628975&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;pjlab.org.cn;outlook.com;pku.edu.cn;buaa.edu.cn;pjlab.org.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;pjlab.org.cn;outlook.com;pku.edu.cn;buaa.edu.cn;pjlab.org.cn;buaa.edu.cn", + "github": "https://github.com/SteveTsui/ReBNN", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;2;3;0+4;1;0+4", + "aff_unique_norm": "Beihang University;Shanghai AI Laboratory;Tencent Holdings Limited;Peking University;Zhongguancun Laboratory", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.buaa.edu.cn/;https://www.shanghai-ai-lab.com;https://www.tencent.com;http://www.pku.edu.cn;", + "aff_unique_abbr": "BUAA;SAIL;Tencent;Peking U;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25170", + "title": "Resolving Task Confusion in Dynamic Expansion Architectures for Class Incremental Learning", + "track": "main", + "status": "Technical", + "abstract": "The dynamic expansion architecture is becoming popular in class incremental learning, mainly due to its advantages in alleviating catastrophic forgetting. However, task confu- sion is not well assessed within this framework, e.g., the discrepancy between classes of different tasks is not well learned (i.e., inter-task confusion, ITC), and certain prior- ity is still given to the latest class batch (i.e., old-new con- fusion, ONC). We empirically validate the side effects of the two types of confusion. Meanwhile, a novel solution called Task Correlated Incremental Learning (TCIL) is pro- posed to encourage discriminative and fair feature utilization across tasks. TCIL performs a multi-level knowledge distil- lation to propagate knowledge learned from old tasks to the new one. It establishes information flow paths at both fea- ture and logit levels, enabling the learning to be aware of old classes. Besides, attention mechanism and classifier re- scoring are applied to generate more fair classification scores. We conduct extensive experiments on CIFAR100 and Ima- geNet100 datasets. The results demonstrate that TCIL con- sistently achieves state-of-the-art accuracy. It mitigates both ITC and ONC, while showing advantages in battle with catas- trophic forgetting even no rehearsal memory is reserved. Source code: https://github.com/YellowPancake/TCIL.", + "primary_area": "computer vision i", + "author": "Bingchen Huang; Zhineng Chen; Peng Zhou; Jiayin Chen; Zuxuan Wu", + "authorids": "", + "aff": "Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University+Shanghai Collaborative Innovation Center on Intelligent Visual Computing; Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University+Shanghai Collaborative Innovation Center on Intelligent Visual Computing; University of Maryland, College Park, MD, USA; Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University+Shanghai Collaborative Innovation Center on Intelligent Visual Computing; Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University+Shanghai Collaborative Innovation Center on Intelligent Visual Computing", + "bibtex": "@article{Huang_Chen_Zhou_Chen_Wu_2023, title={Resolving Task Confusion in Dynamic Expansion Architectures for Class Incremental Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25170}, DOI={10.1609/aaai.v37i1.25170}, abstractNote={The dynamic expansion architecture is becoming popular in class incremental learning, mainly due to its advantages in alleviating catastrophic forgetting. However, task confu- sion is not well assessed within this framework, e.g., the discrepancy between classes of different tasks is not well learned (i.e., inter-task confusion, ITC), and certain prior- ity is still given to the latest class batch (i.e., old-new con- fusion, ONC). We empirically validate the side effects of the two types of confusion. Meanwhile, a novel solution called Task Correlated Incremental Learning (TCIL) is pro- posed to encourage discriminative and fair feature utilization across tasks. TCIL performs a multi-level knowledge distil- lation to propagate knowledge learned from old tasks to the new one. It establishes information flow paths at both fea- ture and logit levels, enabling the learning to be aware of old classes. Besides, attention mechanism and classifier re- scoring are applied to generate more fair classification scores. We conduct extensive experiments on CIFAR100 and Ima- geNet100 datasets. The results demonstrate that TCIL con- sistently achieves state-of-the-art accuracy. It mitigates both ITC and ONC, while showing advantages in battle with catas- trophic forgetting even no rehearsal memory is reserved. Source code: https://github.com/YellowPancake/TCIL.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Bingchen and Chen, Zhineng and Zhou, Peng and Chen, Jiayin and Wu, Zuxuan}, year={2023}, month={Jun.}, pages={908-916} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25170/24942", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25170", + "pdf_size": 1582556, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4951855782945641408&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;terpmail.umd.edu;m.fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;terpmail.umd.edu;m.fudan.edu.cn;fudan.edu.cn", + "github": "https://github.com/YellowPancake/TCIL", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;0+1;0+1", + "aff_unique_norm": "Fudan University;Shanghai Collaborative Innovation Center on Intelligent Visual Computing;University of Maryland", + "aff_unique_dep": "School of Computer Science;Intelligent Visual Computing;", + "aff_unique_url": "https://www.fudan.edu.cn;;https://www/umd.edu", + "aff_unique_abbr": "Fudan;;UMD", + "aff_campus_unique_index": "0;0;2;0;0", + "aff_campus_unique": "Shanghai;;College Park", + "aff_country_unique_index": "0+0;0+0;1;0+0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26380", + "title": "Resource Sharing through Multi-Round Matchings", + "track": "main", + "status": "Technical", + "abstract": "Applications such as employees sharing office spaces over a workweek\ncan be modeled as problems where agents are matched to resources\nover multiple rounds. Agents' requirements limit the set of compatible\nresources and the rounds in which they want to be matched. Viewing such an\napplication as a multi-round matching problem on a bipartite compatibility\ngraph between agents and resources, we show that a solution \n(i.e., a set of matchings, with one matching per round) can be found\nefficiently if one exists. To cope with situations where a solution does not exist, we consider two extensions. In\nthe first extension, a benefit function is defined for each agent and the\nobjective is to find a multi-round matching to maximize the total benefit. For a\ngeneral class of benefit functions satisfying certain properties (including\ndiminishing returns), we show that this multi-round matching problem is\nefficiently solvable. This class includes utilitarian and Rawlsian welfare\nfunctions. \nFor another benefit function, we show that the maximization\nproblem is NP-hard. \nIn the second extension, the objective is to generate advice to\neach agent (i.e., a subset of requirements to be relaxed) subject to a\nbudget constraint so that the agent can be matched.\nWe show that this budget-constrained advice generation problem is NP-hard.\nFor this problem, we develop an integer linear programming formulation as well\nas a heuristic based on local search.\n We experimentally evaluate our algorithms on\nsynthetic networks and apply them to two real-world situations: shared\noffice spaces and matching courses to classrooms.", + "primary_area": "multiagent systems", + "author": "Yohai Trabelsi; Abhijin Adiga; Sarit Kraus; S. S. Ravi; Daniel J. Rosenkrantz", + "authorids": "", + "aff": "Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Biocomplexity Institute and Initiative, Univ. of Virginia, Charlottesville, V A, USA; Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Biocomplexity Institute and Initiative, Univ. of Virginia, Charlottesville, V A, USA + Dept. of Computer Science, University at Albany \u2013 SUNY , Albany, NY , USA; Biocomplexity Institute and Initiative, Univ. of Virginia, Charlottesville, V A, USA + Dept. of Computer Science, University at Albany \u2013 SUNY , Albany, NY , USA", + "bibtex": "@article{Trabelsi_Adiga_Kraus_Ravi_Rosenkrantz_2023, title={Resource Sharing through Multi-Round Matchings}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26380}, DOI={10.1609/aaai.v37i10.26380}, abstractNote={Applications such as employees sharing office spaces over a workweek\ncan be modeled as problems where agents are matched to resources\nover multiple rounds. Agents\u2019 requirements limit the set of compatible\nresources and the rounds in which they want to be matched. Viewing such an\napplication as a multi-round matching problem on a bipartite compatibility\ngraph between agents and resources, we show that a solution (i.e., a set of matchings, with one matching per round) can be found\nefficiently if one exists. To cope with situations where a solution does not exist, we consider two extensions. In\nthe first extension, a benefit function is defined for each agent and the\nobjective is to find a multi-round matching to maximize the total benefit. For a\ngeneral class of benefit functions satisfying certain properties (including\ndiminishing returns), we show that this multi-round matching problem is\nefficiently solvable. This class includes utilitarian and Rawlsian welfare\nfunctions. For another benefit function, we show that the maximization\nproblem is NP-hard. In the second extension, the objective is to generate advice to\neach agent (i.e., a subset of requirements to be relaxed) subject to a\nbudget constraint so that the agent can be matched.\nWe show that this budget-constrained advice generation problem is NP-hard.\nFor this problem, we develop an integer linear programming formulation as well\nas a heuristic based on local search. We experimentally evaluate our algorithms on\nsynthetic networks and apply them to two real-world situations: shared\noffice spaces and matching courses to classrooms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Trabelsi, Yohai and Adiga, Abhijin and Kraus, Sarit and Ravi, S. S. and Rosenkrantz, Daniel J.}, year={2023}, month={Jun.}, pages={11681-11690} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26380/26152", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26380", + "pdf_size": 349293, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14263185836071402269&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;virginia.edu;cs.biu.ac.il;gmail.com;gmail.com", + "email": "gmail.com;virginia.edu;cs.biu.ac.il;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1+2;1+2", + "aff_unique_norm": "Bar-Ilan University;University of Virginia;University at Albany \u2013 SUNY", + "aff_unique_dep": "Department of Computer Science;Biocomplexity Institute and Initiative;Dept. of Computer Science", + "aff_unique_url": "https://www.biu.ac.il;https://www.virginia.edu;https://www.albany.edu", + "aff_unique_abbr": "BIU;UVA;UAlbany", + "aff_campus_unique_index": "0;1;0;1+2;1+2", + "aff_campus_unique": "Ramat Gan;Charlottesville;Albany", + "aff_country_unique_index": "0;1;0;1+1;1+1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "article-26885", + "title": "Responsible Robotics: A Socio-Ethical Addition to Robotics Courses", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "We are witnessing a rapid increase in real-world autonomous robotic deployments in environments ranging from indoor homes and commercial establishments to large-scale urban areas, with applications ranging from domestic assistance to urban last-mile delivery. The developers of these robots inevitably have to make impactful design decisions to ensure commercially viability, but such decisions have serious real-world consequences. Unfortunately it is not uncommon for such projects to face intense bouts of social backlash, which can be attributed to a wide variety of causes, ranging from inappropriate technical design choices to transgressions of social norms and lack of community engagement.\n\nTo better prepare students for the rigors of developing and deploying real-world robotics systems, we developed a Responsible Robotics teaching module, intended to be included in upper-division and graduate level robotics courses. Our module is structured as a role playing exercise which aims to equip students with a framework for navigating the conflicting goals of human actors which govern robots in the field. We report on instructor reflections and anonymous survey responses from offering our responsible robotics module in both a graduate-level, and an upper-division undergraduate robotics course at UT Austin. The responses indicate that students gained a deeper understanding of the socio-technical factors of real-world robotics deployments than they might have using self-study methods, and the students proactively suggested that such modules should be more broadly included in CS courses.", + "primary_area": "", + "author": "Joshua Vekhter; Joydeep Biswas", + "authorids": "", + "aff": "The University of Texas at Austin; The University of Texas at Austin", + "bibtex": "@article{Vekhter_Biswas_2024, title={Responsible Robotics: A Socio-Ethical Addition to Robotics Courses}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26885}, DOI={10.1609/aaai.v37i13.26885}, abstractNote={We are witnessing a rapid increase in real-world autonomous robotic deployments in environments ranging from indoor homes and commercial establishments to large-scale urban areas, with applications ranging from domestic assistance to urban last-mile delivery. The developers of these robots inevitably have to make impactful design decisions to ensure commercially viability, but such decisions have serious real-world consequences. Unfortunately it is not uncommon for such projects to face intense bouts of social backlash, which can be attributed to a wide variety of causes, ranging from inappropriate technical design choices to transgressions of social norms and lack of community engagement. To better prepare students for the rigors of developing and deploying real-world robotics systems, we developed a Responsible Robotics teaching module, intended to be included in upper-division and graduate level robotics courses. Our module is structured as a role playing exercise which aims to equip students with a framework for navigating the conflicting goals of human actors which govern robots in the field. We report on instructor reflections and anonymous survey responses from offering our responsible robotics module in both a graduate-level, and an upper-division undergraduate robotics course at UT Austin. The responses indicate that students gained a deeper understanding of the socio-technical factors of real-world robotics deployments than they might have using self-study methods, and the students proactively suggested that such modules should be more broadly included in CS courses.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vekhter, Joshua and Biswas, Joydeep}, year={2024}, month={Jul.}, pages={15877-15885} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26885/26657", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26885", + "pdf_size": 180826, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13107544377116535045&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.utexas.edu;cs.utexas.edu", + "email": "cs.utexas.edu;cs.utexas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Texas at Austin", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26038", + "title": "Restructuring Graph for Higher Homophily via Adaptive Spectral Clustering", + "track": "main", + "status": "Technical", + "abstract": "While a growing body of literature has been studying new Graph Neural Networks (GNNs) that work on both homophilic and heterophilic graphs, little has been done on adapting classical GNNs to less-homophilic graphs. Although the ability to handle less-homophilic graphs is restricted, classical GNNs still stand out in several nice properties such as efficiency, simplicity, and explainability. In this work, we propose a novel graph restructuring method that can be integrated into any type of GNNs, including classical GNNs, to leverage the benefits of existing GNNs while alleviating their limitations. Our contribution is threefold: a) learning the weight of pseudo-eigenvectors for an adaptive spectral clustering that aligns well with known node labels, b) proposing a new density-aware homophilic metric that is robust to label imbalance, and c) reconstructing the adjacency matrix based on the result of adaptive spectral clustering to maximize the homophilic scores. The experimental results show that our graph restructuring method can significantly boost the performance of six classical GNNs by an average of 25% on less-homophilic graphs. The boosted performance is comparable to state-of-the-art methods.", + "primary_area": "machine learning ii", + "author": "Shouheng Li; Dongwoo Kim; Qing Wang", + "authorids": "", + "aff": "School of Computing, Australian National University, Canberra, Australia + Data61, CSIRO, Canberra, Australia; CSE & GSAI, POSTECH, Pohang, South Korea; School of Computing, Australian National University, Canberra, Australia", + "bibtex": "@article{Li_Kim_Wang_2023, title={Restructuring Graph for Higher Homophily via Adaptive Spectral Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26038}, DOI={10.1609/aaai.v37i7.26038}, abstractNote={While a growing body of literature has been studying new Graph Neural Networks (GNNs) that work on both homophilic and heterophilic graphs, little has been done on adapting classical GNNs to less-homophilic graphs. Although the ability to handle less-homophilic graphs is restricted, classical GNNs still stand out in several nice properties such as efficiency, simplicity, and explainability. In this work, we propose a novel graph restructuring method that can be integrated into any type of GNNs, including classical GNNs, to leverage the benefits of existing GNNs while alleviating their limitations. Our contribution is threefold: a) learning the weight of pseudo-eigenvectors for an adaptive spectral clustering that aligns well with known node labels, b) proposing a new density-aware homophilic metric that is robust to label imbalance, and c) reconstructing the adjacency matrix based on the result of adaptive spectral clustering to maximize the homophilic scores. The experimental results show that our graph restructuring method can significantly boost the performance of six classical GNNs by an average of 25% on less-homophilic graphs. The boosted performance is comparable to state-of-the-art methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shouheng and Kim, Dongwoo and Wang, Qing}, year={2023}, month={Jun.}, pages={8622-8630} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26038/25810", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26038", + "pdf_size": 1382970, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2453174545420974404&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "anu.edu.au;postech.ac.kr;anu.edu.au", + "email": "anu.edu.au;postech.ac.kr;anu.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0", + "aff_unique_norm": "Australian National University;CSIRO;POSTECH", + "aff_unique_dep": "School of Computing;Data61;CSE & GSAI", + "aff_unique_url": "https://www.anu.edu.au;https://www.csiro.au;https://www.postech.ac.kr", + "aff_unique_abbr": "ANU;CSIRO;POSTECH", + "aff_campus_unique_index": "0+0;1;0", + "aff_campus_unique": "Canberra;Pohang", + "aff_country_unique_index": "0+0;1;0", + "aff_country_unique": "Australia;South Korea" + }, + { + "id": "article-26325", + "title": "Rethinking Alignment and Uniformity in Unsupervised Image Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised image segmentation aims to match low-level visual features with semantic-level representations without outer supervision. In this paper, we address the critical properties from the view of feature alignments and feature uniformity for UISS models. We also make a comparison between UISS and image-wise representation learning. Based on the analysis, we argue that the existing MI-based methods in UISS suffer from representation collapse. By this, we proposed a robust network called Semantic Attention Network(SAN), in which a new module Semantic Attention(SEAT) is proposed to generate pixel-wise and semantic features dynamically. Experimental results on multiple semantic segmentation benchmarks show that our unsupervised segmentation framework specializes in catching semantic representations, which outperforms all the unpretrained and even several pretrained methods.", + "primary_area": "machine learning iv", + "author": "Daoan Zhang; Chenming Li; Haoquan Li; Wenjian Huang; Lingyun Huang; Jianguo Zhang", + "authorids": "", + "aff": "Southern University of Science and Technology+Ping An Technology (Shenzhen) Co., Ltd.; Southern University of Science and Technology; Southern University of Science and Technology; Southern University of Science and Technology; Ping An Technology (Shenzhen) Co., Ltd.; Southern University of Science and Technology+Peng Cheng Laboratory", + "bibtex": "@article{Zhang_Li_Li_Huang_Huang_Zhang_2023, title={Rethinking Alignment and Uniformity in Unsupervised Image Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26325}, DOI={10.1609/aaai.v37i9.26325}, abstractNote={Unsupervised image segmentation aims to match low-level visual features with semantic-level representations without outer supervision. In this paper, we address the critical properties from the view of feature alignments and feature uniformity for UISS models. We also make a comparison between UISS and image-wise representation learning. Based on the analysis, we argue that the existing MI-based methods in UISS suffer from representation collapse. By this, we proposed a robust network called Semantic Attention Network(SAN), in which a new module Semantic Attention(SEAT) is proposed to generate pixel-wise and semantic features dynamically. Experimental results on multiple semantic segmentation benchmarks show that our unsupervised segmentation framework specializes in catching semantic representations, which outperforms all the unpretrained and even several pretrained methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Daoan and Li, Chenming and Li, Haoquan and Huang, Wenjian and Huang, Lingyun and Zhang, Jianguo}, year={2023}, month={Jun.}, pages={11192-11200} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26325/26097", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26325", + "pdf_size": 2423069, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5441133424484061697&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "mail.sustech.edu.cn;mail.sustech.edu.cn;mail.sustech.edu.cn;sustech.edu.cn;foxmail.com;sustech.edu.cn", + "email": "mail.sustech.edu.cn;mail.sustech.edu.cn;mail.sustech.edu.cn;sustech.edu.cn;foxmail.com;sustech.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;0;1;0+2", + "aff_unique_norm": "Southern University of Science and Technology;Ping An Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sustech.edu.cn;https://www.pingan.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "SUSTech;Ping An Tech;PCL", + "aff_campus_unique_index": "1;1;", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25332", + "title": "Rethinking Data Augmentation for Single-Source Domain Generalization in Medical Image Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Single-source domain generalization (SDG) in medical image segmentation is a challenging yet essential task as domain shifts are quite common among clinical image datasets. Previous attempts most conduct global-only/random augmentation. Their augmented samples are usually insufficient in diversity and informativeness, thus failing to cover the possible target domain distribution. In this paper, we rethink the data augmentation strategy for SDG in medical image segmentation. Motivated by the class-level representation invariance and style mutability of medical images, we hypothesize that unseen target data can be sampled from a linear combination of C (the class number) random variables, where each variable follows a location-scale distribution at the class level. Accordingly, data augmented can be readily made by sampling the random variables through a general form. On the empirical front, we implement such strategy with constrained Bezier transformation on both global and local (i.e. class-level) regions, which can largely increase the augmentation diversity. A Saliency-balancing Fusion mechanism is further proposed to enrich the informativeness by engaging the gradient information, guiding augmentation with proper orientation and magnitude. As an important contribution, we prove theoretically that our proposed augmentation can lead to an upper bound of the generalization risk on the unseen target domain, thus confirming our hypothesis. Combining the two strategies, our Saliency-balancing Location-scale Augmentation (SLAug) exceeds the state-of-the-art works by a large margin in two challenging SDG tasks. Code is available at https://github.com/Kaiseem/SLAug.", + "primary_area": "computer vision ii", + "author": "Zixian Su; Kai Yao; Xi Yang; Kaizhu Huang; Qiufeng Wang; Jie Sun", + "authorids": "", + "aff": "University of Liverpool, Liverpool, the United Kingdom + School of Advanced Technology, Xi\u2019an Jiaotong-Liverpool University (XJTLU), Suzhou, China; University of Liverpool, Liverpool, the United Kingdom + School of Advanced Technology, Xi\u2019an Jiaotong-Liverpool University (XJTLU), Suzhou, China; School of Advanced Technology, Xi\u2019an Jiaotong-Liverpool University (XJTLU), Suzhou, China; Data Science Research Center, Duke Kunshan University, Kunshan, China; School of Advanced Technology, Xi\u2019an Jiaotong-Liverpool University (XJTLU), Suzhou, China; School of Advanced Technology, Xi\u2019an Jiaotong-Liverpool University (XJTLU), Suzhou, China", + "bibtex": "@article{Su_Yao_Yang_Huang_Wang_Sun_2023, title={Rethinking Data Augmentation for Single-Source Domain Generalization in Medical Image Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25332}, DOI={10.1609/aaai.v37i2.25332}, abstractNote={Single-source domain generalization (SDG) in medical image segmentation is a challenging yet essential task as domain shifts are quite common among clinical image datasets. Previous attempts most conduct global-only/random augmentation. Their augmented samples are usually insufficient in diversity and informativeness, thus failing to cover the possible target domain distribution. In this paper, we rethink the data augmentation strategy for SDG in medical image segmentation. Motivated by the class-level representation invariance and style mutability of medical images, we hypothesize that unseen target data can be sampled from a linear combination of C (the class number) random variables, where each variable follows a location-scale distribution at the class level. Accordingly, data augmented can be readily made by sampling the random variables through a general form. On the empirical front, we implement such strategy with constrained Bezier transformation on both global and local (i.e. class-level) regions, which can largely increase the augmentation diversity. A Saliency-balancing Fusion mechanism is further proposed to enrich the informativeness by engaging the gradient information, guiding augmentation with proper orientation and magnitude. As an important contribution, we prove theoretically that our proposed augmentation can lead to an upper bound of the generalization risk on the unseen target domain, thus confirming our hypothesis. Combining the two strategies, our Saliency-balancing Location-scale Augmentation (SLAug) exceeds the state-of-the-art works by a large margin in two challenging SDG tasks. Code is available at https://github.com/Kaiseem/SLAug.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Su, Zixian and Yao, Kai and Yang, Xi and Huang, Kaizhu and Wang, Qiufeng and Sun, Jie}, year={2023}, month={Jun.}, pages={2366-2374} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25332/25104", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25332", + "pdf_size": 3886558, + "gs_citation": 82, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12644160789628167982&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "dukekunshan.edu.cn;xjtlu.edu.cn; ; ; ;", + "email": "dukekunshan.edu.cn;xjtlu.edu.cn; ; ; ;", + "github": "https://github.com/Kaiseem/SLAug", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;1;2;1;1", + "aff_unique_norm": "University of Liverpool;Xi'an Jiaotong-Liverpool University;Duke Kunshan University", + "aff_unique_dep": ";School of Advanced Technology;Data Science Research Center", + "aff_unique_url": "https://www.liverpool.ac.uk;https://www.xjtlu.edu.cn;https://www.dukunshan.edu.cn", + "aff_unique_abbr": "Liv Uni;XJTLU;DKU", + "aff_campus_unique_index": "0+1;0+1;1;2;1;1", + "aff_campus_unique": "Liverpool;Suzhou;Kunshan", + "aff_country_unique_index": "0+1;0+1;1;1;1;1", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "article-26136", + "title": "Rethinking Data-Free Quantization as a Zero-Sum Game", + "track": "main", + "status": "Technical", + "abstract": "Data-free quantization (DFQ) recovers the performance of quantized network (Q) without accessing the real data, but generates the fake sample via a generator (G) by learning from full-precision network (P) instead. However, such sample generation process is totally independence of Q, specialized as failing to consider the adaptability of the generated samples, i.e., beneficial or adversarial, over the learning process of Q, resulting into non-ignorable performance loss. Building on this, several crucial questions --- how to measure and exploit the sample adaptability to Q under varied bit-width scenarios? how to generate the samples with desirable adaptability to benefit the quantized network? --- impel us to revisit DFQ. In this paper, we answer the above questions from a game-theory perspective to specialize DFQ as a zero-sum game between two players --- a generator and a quantized network, and further propose an Adaptability-aware Sample Generation (AdaSG) method. Technically, AdaSG reformulates DFQ as a dynamic maximization-vs-minimization game process anchored on the sample adaptability. The maximization process aims to generate the sample with desirable adaptability, such sample adaptability is further reduced by the minimization process after calibrating Q for performance recovery. The Balance Gap is defined to guide the stationarity of the game process to maximally benefit Q. The theoretical analysis and empirical studies verify the superiority of AdaSG over the state-of-the-arts. Our code is available at https://github.com/hfutqian/AdaSG.", + "primary_area": "machine learning iii", + "author": "Biao Qian; Yang Wang; Richang Hong; Meng Wang", + "authorids": "", + "aff": "Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, School of Computer Science and Information Engineering, Hefei University of Technology, China; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, School of Computer Science and Information Engineering, Hefei University of Technology, China; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, School of Computer Science and Information Engineering, Hefei University of Technology, China; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, School of Computer Science and Information Engineering, Hefei University of Technology, China", + "bibtex": "@article{Qian_Wang_Hong_Wang_2023, title={Rethinking Data-Free Quantization as a Zero-Sum Game}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26136}, DOI={10.1609/aaai.v37i8.26136}, abstractNote={Data-free quantization (DFQ) recovers the performance of quantized network (Q) without accessing the real data, but generates the fake sample via a generator (G) by learning from full-precision network (P) instead. However, such sample generation process is totally independence of Q, specialized as failing to consider the adaptability of the generated samples, i.e., beneficial or adversarial, over the learning process of Q, resulting into non-ignorable performance loss. Building on this, several crucial questions --- how to measure and exploit the sample adaptability to Q under varied bit-width scenarios? how to generate the samples with desirable adaptability to benefit the quantized network? --- impel us to revisit DFQ. In this paper, we answer the above questions from a game-theory perspective to specialize DFQ as a zero-sum game between two players --- a generator and a quantized network, and further propose an Adaptability-aware Sample Generation (AdaSG) method. Technically, AdaSG reformulates DFQ as a dynamic maximization-vs-minimization game process anchored on the sample adaptability. The maximization process aims to generate the sample with desirable adaptability, such sample adaptability is further reduced by the minimization process after calibrating Q for performance recovery. The Balance Gap is defined to guide the stationarity of the game process to maximally benefit Q. The theoretical analysis and empirical studies verify the superiority of AdaSG over the state-of-the-arts. Our code is available at https://github.com/hfutqian/AdaSG.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qian, Biao and Wang, Yang and Hong, Richang and Wang, Meng}, year={2023}, month={Jun.}, pages={9489-9497} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26136/25908", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26136", + "pdf_size": 8495929, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2303016044642025969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "hfut.edu.cn;hfut.edu.cn;hfut.edu.cn;gmail.com", + "email": "hfut.edu.cn;hfut.edu.cn;hfut.edu.cn;gmail.com", + "github": "https://github.com/hfutqian/AdaSG", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Hefei University of Technology", + "aff_unique_dep": "School of Computer Science and Information Engineering", + "aff_unique_url": "http://www.hfut.edu.cn", + "aff_unique_abbr": "HFUT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Hefei", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25413", + "title": "Rethinking Disparity: A Depth Range Free Multi-View Stereo Based on Disparity", + "track": "main", + "status": "Technical", + "abstract": "Existing learning-based multi-view stereo (MVS) methods rely on the depth range to build the 3D cost volume and may fail when the range is too large or unreliable. To address this problem, we propose a disparity-based MVS method based on the epipolar disparity flow (E-flow), called DispMVS, which infers the depth information from the pixel movement between two views. The core of DispMVS is to construct a 2D cost volume on the image plane along the epipolar line between each pair (between the reference image and several source images) for pixel matching and fuse uncountable depths triangulated from each pair by multi-view geometry to ensure multi-view consistency. To be robust, DispMVS starts from a randomly initialized depth map and iteratively refines the depth map with the help of the coarse-to-fine strategy. Experiments on DTUMVS and Tanks\\&Temple datasets show that DispMVS is not sensitive to the depth range and achieves state-of-the-art results with lower GPU memory.", + "primary_area": "computer vision iii", + "author": "Qingsong Yan; Qiang Wang; Kaiyong Zhao; Bo Li; Xiaowen Chu; Fei Deng", + "authorids": "", + "aff": "Wuhan University; Harbin Institute of Technology (Shenzhen); XGRIDS; The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology (Guangzhou); Wuhan University", + "bibtex": "@article{Yan_Wang_Zhao_Li_Chu_Deng_2023, title={Rethinking Disparity: A Depth Range Free Multi-View Stereo Based on Disparity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25413}, DOI={10.1609/aaai.v37i3.25413}, abstractNote={Existing learning-based multi-view stereo (MVS) methods rely on the depth range to build the 3D cost volume and may fail when the range is too large or unreliable. To address this problem, we propose a disparity-based MVS method based on the epipolar disparity flow (E-flow), called DispMVS, which infers the depth information from the pixel movement between two views. The core of DispMVS is to construct a 2D cost volume on the image plane along the epipolar line between each pair (between the reference image and several source images) for pixel matching and fuse uncountable depths triangulated from each pair by multi-view geometry to ensure multi-view consistency. To be robust, DispMVS starts from a randomly initialized depth map and iteratively refines the depth map with the help of the coarse-to-fine strategy. Experiments on DTUMVS and Tanks\\&Temple datasets show that DispMVS is not sensitive to the depth range and achieves state-of-the-art results with lower GPU memory.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Qingsong and Wang, Qiang and Zhao, Kaiyong and Li, Bo and Chu, Xiaowen and Deng, Fei}, year={2023}, month={Jun.}, pages={3091-3099} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25413/25185", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25413", + "pdf_size": 3323799, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7209929263321043956&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "whu.edu.cn;hit.edu.cn;xgrids.com;cse.ust.hk;ust.hk;sgg.whu.edu.cn", + "email": "whu.edu.cn;hit.edu.cn;xgrids.com;cse.ust.hk;ust.hk;sgg.whu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;0", + "aff_unique_norm": "Wuhan University;Harbin Institute of Technology;XGRIDS;Hong Kong University of Science and Technology;The Hong Kong University of Science and Technology", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.whu.edu.cn/;http://en.hhit.edu.cn/;;https://www.ust.hk;https://www.ust.hk", + "aff_unique_abbr": "WHU;HIT;;HKUST;HKUST", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Shenzhen;Guangzhou", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-25089", + "title": "Rethinking Interpretation: Input-Agnostic Saliency Mapping of Deep Visual Classifiers", + "track": "main", + "status": "Technical", + "abstract": "Saliency methods provide post-hoc model interpretation by attributing input features to the model outputs. Current methods mainly achieve this using a single input sample, thereby failing to answer input-independent inquiries about the model. We also show that input-specific saliency mapping is intrinsically susceptible to misleading feature attribution. Current attempts to use `general' input features for model interpretation assume access to a dataset containing those features, which biases the interpretation. Addressing the gap, we introduce a new perspective of input-agnostic saliency mapping that computationally estimates the high-level features attributed by the model to its outputs. These features are geometrically correlated, and are computed by accumulating model's gradient information with respect to an unrestricted data distribution. To compute these features, we nudge independent data points over the model loss surface towards the local minima associated by a human-understandable concept, e.g., class label for classifiers. With a systematic projection, scaling and refinement process, this information is transformed into an interpretable visualization without compromising its model-fidelity. The visualization serves as a stand-alone qualitative interpretation. With an extensive evaluation, we not only demonstrate successful visualizations for a variety of concepts for large-scale models, but also showcase an interesting utility of this new form of saliency mapping by identifying backdoor signatures in compromised classifiers.", + "primary_area": "computer vision i", + "author": "Naveed Akhtar; Mohammad Amir Asim Khan Jalwana", + "authorids": "", + "aff": "The University of Western Australia; The University of Western Australia", + "bibtex": "@article{Akhtar_Jalwana_2023, title={Rethinking Interpretation: Input-Agnostic Saliency Mapping of Deep Visual Classifiers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25089}, DOI={10.1609/aaai.v37i1.25089}, abstractNote={Saliency methods provide post-hoc model interpretation by attributing input features to the model outputs. Current methods mainly achieve this using a single input sample, thereby failing to answer input-independent inquiries about the model. We also show that input-specific saliency mapping is intrinsically susceptible to misleading feature attribution. Current attempts to use `general\u2019 input features for model interpretation assume access to a dataset containing those features, which biases the interpretation. Addressing the gap, we introduce a new perspective of input-agnostic saliency mapping that computationally estimates the high-level features attributed by the model to its outputs. These features are geometrically correlated, and are computed by accumulating model\u2019s gradient information with respect to an unrestricted data distribution. To compute these features, we nudge independent data points over the model loss surface towards the local minima associated by a human-understandable concept, e.g., class label for classifiers. With a systematic projection, scaling and refinement process, this information is transformed into an interpretable visualization without compromising its model-fidelity. The visualization serves as a stand-alone qualitative interpretation. With an extensive evaluation, we not only demonstrate successful visualizations for a variety of concepts for large-scale models, but also showcase an interesting utility of this new form of saliency mapping by identifying backdoor signatures in compromised classifiers.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Akhtar, Naveed and Jalwana, Mohammad Amir Asim Khan}, year={2023}, month={Jun.}, pages={178-186} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25089/24861", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25089", + "pdf_size": 3024763, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7526382613342224185&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff_domain": "uwa.edu.au;gmail.com", + "email": "uwa.edu.au;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Western Australia", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uwa.edu.au", + "aff_unique_abbr": "UWA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26751", + "title": "Rethinking Label Refurbishment: Model Robustness under Label Noise", + "track": "aaai special track", + "status": "Technical", + "abstract": "A family of methods that generate soft labels by mixing the hard labels with a certain distribution, namely label refurbishment, are widely used to train deep neural networks. However, some of these methods are still poorly understood in the presence of label noise. In this paper, we revisit four label refurbishment methods and reveal the strong connection between them. We find that they affect the neural network models in different manners. Two of them smooth the estimated posterior for regularization effects, and the other two force the model to produce high-confidence predictions. We conduct extensive experiments to evaluate related methods and observe that both effects improve the model generalization under label noise. Furthermore, we theoretically show that both effects lead to generalization guarantees on the clean distribution despite being trained with noisy labels.", + "primary_area": "safe and robust ai", + "author": "Yangdi Lu; Zhiwei Xu; Wenbo He", + "authorids": "", + "aff": "McMaster University, Department of Computing and Software, Canada; McMaster University, Department of Computing and Software, Canada; McMaster University, Department of Computing and Software, Canada", + "bibtex": "@article{Lu_Xu_He_2023, title={Rethinking Label Refurbishment: Model Robustness under Label Noise}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26751}, DOI={10.1609/aaai.v37i12.26751}, abstractNote={A family of methods that generate soft labels by mixing the hard labels with a certain distribution, namely label refurbishment, are widely used to train deep neural networks. However, some of these methods are still poorly understood in the presence of label noise. In this paper, we revisit four label refurbishment methods and reveal the strong connection between them. We find that they affect the neural network models in different manners. Two of them smooth the estimated posterior for regularization effects, and the other two force the model to produce high-confidence predictions. We conduct extensive experiments to evaluate related methods and observe that both effects improve the model generalization under label noise. Furthermore, we theoretically show that both effects lead to generalization guarantees on the clean distribution despite being trained with noisy labels.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Yangdi and Xu, Zhiwei and He, Wenbo}, year={2023}, month={Jun.}, pages={15000-15008} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26751/26523", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26751", + "pdf_size": 9716779, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7496144474622795447&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "mcmaster.ca;mcmaster.ca;mcmaster.ca", + "email": "mcmaster.ca;mcmaster.ca;mcmaster.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "McMaster University", + "aff_unique_dep": "Department of Computing and Software", + "aff_unique_url": "https://www.mcmaster.ca", + "aff_unique_abbr": "McMaster", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25438", + "title": "Rethinking Rotation Invariance with Point Cloud Registration", + "track": "main", + "status": "Technical", + "abstract": "Recent investigations on rotation invariance for 3D point clouds have been devoted to devising rotation-invariant feature descriptors or learning canonical spaces where objects are semantically aligned. Examinations of learning frameworks for invariance have seldom been looked into. In this work, we review rotation invariance (RI) in terms of point cloud registration (PCR) and propose an effective framework for rotation invariance learning via three sequential stages, namely rotation-invariant shape encoding, aligned feature integration, and deep feature registration. We first encode shape descriptors constructed with respect to reference frames defined over different scales, e.g., local patches and global topology, to generate rotation-invariant latent shape codes. Within the integration stage, we propose an Aligned Integration Transformer (AIT) to produce a discriminative feature representation by integrating point-wise self- and cross-relations established within the shape codes. Meanwhile, we adopt rigid transformations between reference frames to align the shape codes for feature consistency across different scales. Finally, the deep integrated feature is registered to both rotation-invariant shape codes to maximize their feature similarities, such that rotation invariance of the integrated feature is preserved and shared semantic information is implicitly extracted from shape codes. Experimental results on 3D shape classification, part segmentation, and retrieval tasks prove the feasibility of our framework. Our project page is released at: https://rotation3d.github.io/.", + "primary_area": "computer vision iii", + "author": "Jianhui Yu; Chaoyi Zhang; Weidong Cai", + "authorids": "", + "aff": "School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia; School of Computer Science, University of Sydney, Australia", + "bibtex": "@article{Yu_Zhang_Cai_2023, title={Rethinking Rotation Invariance with Point Cloud Registration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25438}, DOI={10.1609/aaai.v37i3.25438}, abstractNote={Recent investigations on rotation invariance for 3D point clouds have been devoted to devising rotation-invariant feature descriptors or learning canonical spaces where objects are semantically aligned. Examinations of learning frameworks for invariance have seldom been looked into. In this work, we review rotation invariance (RI) in terms of point cloud registration (PCR) and propose an effective framework for rotation invariance learning via three sequential stages, namely rotation-invariant shape encoding, aligned feature integration, and deep feature registration. We first encode shape descriptors constructed with respect to reference frames defined over different scales, e.g., local patches and global topology, to generate rotation-invariant latent shape codes. Within the integration stage, we propose an Aligned Integration Transformer (AIT) to produce a discriminative feature representation by integrating point-wise self- and cross-relations established within the shape codes. Meanwhile, we adopt rigid transformations between reference frames to align the shape codes for feature consistency across different scales. Finally, the deep integrated feature is registered to both rotation-invariant shape codes to maximize their feature similarities, such that rotation invariance of the integrated feature is preserved and shared semantic information is implicitly extracted from shape codes. Experimental results on 3D shape classification, part segmentation, and retrieval tasks prove the feasibility of our framework. Our project page is released at: https://rotation3d.github.io/.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Jianhui and Zhang, Chaoyi and Cai, Weidong}, year={2023}, month={Jun.}, pages={3313-3321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25438/25210", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25438", + "pdf_size": 2916443, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6404281631493425836&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sydney.edu.au;sydney.edu.au;sydney.edu.au", + "email": "sydney.edu.au;sydney.edu.au;sydney.edu.au", + "github": "", + "project": "https://rotation3d.github.io/", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Sydney", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.sydney.edu.au", + "aff_unique_abbr": "USYD", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Sydney", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26788", + "title": "Rethinking Safe Control in the Presence of Self-Seeking Humans", + "track": "aaai special track", + "status": "Technical", + "abstract": "Safe control methods are often designed to behave safely even in worst-case human uncertainties. Such design can cause more aggressive human behaviors that exploit its conservatism and result in greater risk for everyone. However, this issue has not been systematically investigated previously. This paper uses an interaction-based payoff structure from evolutionary game theory to model humans\u2019 short-sighted, self-seeking behaviors. The model captures how prior human-machine interaction experience causes behavioral and strategic changes in humans in the long term. We then show that deterministic worst-case safe control techniques and equilibrium-based stochastic methods can have worse safety and performance trade-offs than a basic method that mediates human strategic changes. This finding suggests an urgent need to fundamentally rethink the safe control framework used in human-technology interaction in pursuit of greater safety for all.", + "primary_area": "safe and robust ai", + "author": "Zixuan Zhang; Maitham AL-Sunni; Haoming Jing; Hirokazu Shirado; Yorie Nakahira", + "authorids": "", + "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", + "bibtex": "@article{Zhang_AL-Sunni_Jing_Shirado_Nakahira_2023, title={Rethinking Safe Control in the Presence of Self-Seeking Humans}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26788}, DOI={10.1609/aaai.v37i12.26788}, abstractNote={Safe control methods are often designed to behave safely even in worst-case human uncertainties. Such design can cause more aggressive human behaviors that exploit its conservatism and result in greater risk for everyone. However, this issue has not been systematically investigated previously. This paper uses an interaction-based payoff structure from evolutionary game theory to model humans\u2019 short-sighted, self-seeking behaviors. The model captures how prior human-machine interaction experience causes behavioral and strategic changes in humans in the long term. We then show that deterministic worst-case safe control techniques and equilibrium-based stochastic methods can have worse safety and performance trade-offs than a basic method that mediates human strategic changes. This finding suggests an urgent need to fundamentally rethink the safe control framework used in human-technology interaction in pursuit of greater safety for all.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zixuan and AL-Sunni, Maitham and Jing, Haoming and Shirado, Hirokazu and Nakahira, Yorie}, year={2023}, month={Jun.}, pages={15331-15339} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26788/26560", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26788", + "pdf_size": 2566251, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5175633463998358516&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;cmu.edu;cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;cmu.edu;cmu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27728", + "title": "Retraction Note to: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning", + "track": "errata", + "status": "Technical", + "abstract": "", + "primary_area": "", + "author": "Daoming Zong; Shiliang Sun", + "authorids": "", + "aff": ";", + "bibtex": "@article{Zong_Sun_2024, title={Retraction Note to: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27728}, DOI={10.1609/aaai.v37i13.27728}, abstractNote={<p>This Retraction Note refers to: <a href="https://doi.org/10.1609/aaai.v37i5.25813">RETRACTED: McOmet: Multimodal Fusion Transformer for Physical Audiovisual Commonsense Reasoning.</a></p>\n<p>The referenced article, published in Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence (AAAI 2023), has been retracted by agreement between the authors and the journal, as described in the PDF file for this Retraction Note.</p>\n<p>\u00a0</p>}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zong, Daoming and Sun, Shiliang}, year={2024}, month={Mar.}, pages={16497} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27728/32777", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27728", + "pdf_size": 213308, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:bxQkCEk5y3wJ:scholar.google.com/&scioq=Retraction+Note+to:+McOmet:+Multimodal+Fusion+Transformer+for+Physical+Audiovisual+Commonsense+Reasoning&hl=en&as_sdt=0,5", + "gs_version_total": 0, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "article-25664", + "title": "Retrosynthesis Prediction with Local Template Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Retrosynthesis, which predicts the reactants of a given target molecule, is an essential task for drug discovery. In recent years, the machine learing based retrosynthesis methods have achieved promising results. In this work, we introduce RetroKNN, a local reaction template retrieval method to further boost the performance of template-based systems with non-parametric retrieval. We first build an atom-template store and a bond-template store that contains the local templates in the training data, then retrieve from these templates with a k-nearest-neighbor (KNN) search during inference. The retrieved templates are combined with neural network predictions as the final output. Furthermore, we propose a lightweight adapter to adjust the weights when combing neural network and KNN predictions conditioned on the hidden representation and the retrieved templates. We conduct comprehensive experiments on two widely used benchmarks, the USPTO-50K and USPTO-MIT. Especially for the top-1 accuracy, we improved 7.1% on the USPTO-50K dataset and 12.0% on the USPTO-MIT dataset.These results demonstrate the effectiveness of our method.", + "primary_area": "domain s of application", + "author": "Shufang Xie; Rui Yan; Junliang Guo; Yingce Xia; Lijun Wu; Tao Qin", + "authorids": "", + "aff": "Beijing Key Laboratory of Big Data Management and Analysis Methods, Gaoling School of Artificial Intelligence (GSAI), Renmin University of China; Beijing Key Laboratory of Big Data Management and Analysis Methods, Gaoling School of Artificial Intelligence (GSAI), Renmin University of China; Microsoft Reserarch Aisa; Microsoft Reserarch AI4Science; Microsoft Reserarch AI4Science; Microsoft Reserarch AI4Science", + "bibtex": "@article{Xie_Yan_Guo_Xia_Wu_Qin_2023, title={Retrosynthesis Prediction with Local Template Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25664}, DOI={10.1609/aaai.v37i4.25664}, abstractNote={Retrosynthesis, which predicts the reactants of a given target molecule, is an essential task for drug discovery. In recent years, the machine learing based retrosynthesis methods have achieved promising results. In this work, we introduce RetroKNN, a local reaction template retrieval method to further boost the performance of template-based systems with non-parametric retrieval. We first build an atom-template store and a bond-template store that contains the local templates in the training data, then retrieve from these templates with a k-nearest-neighbor (KNN) search during inference. The retrieved templates are combined with neural network predictions as the final output. Furthermore, we propose a lightweight adapter to adjust the weights when combing neural network and KNN predictions conditioned on the hidden representation and the retrieved templates. We conduct comprehensive experiments on two widely used benchmarks, the USPTO-50K and USPTO-MIT. Especially for the top-1 accuracy, we improved 7.1% on the USPTO-50K dataset and 12.0% on the USPTO-MIT dataset.These results demonstrate the effectiveness of our method.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Shufang and Yan, Rui and Guo, Junliang and Xia, Yingce and Wu, Lijun and Qin, Tao}, year={2023}, month={Jun.}, pages={5330-5338} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25664/25436", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25664", + "pdf_size": 1523905, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12507838464784642234&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "ruc.edu.cn;ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;2;2", + "aff_unique_norm": "Renmin University of China;Microsoft Research Asia;Microsoft Research", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Microsoft Research;AI4Science", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.microsoft.com/en-us/research/group/microsoft-research-asia;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "RUC;MSRA;Microsoft Research", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26533", + "title": "Reviewing Labels: Label Graph Network with Top-k Prediction Set for Relation Extraction", + "track": "main", + "status": "Technical", + "abstract": "The typical way for relation extraction is fine-tuning large pre-trained language models on task-specific datasets, then selecting the label with the highest probability of the output\ndistribution as the final prediction. However, the usage of the Top-k prediction set for a given sample is commonly overlooked. In this paper, we first reveal that the Top-k prediction\nset of a given sample contains useful information for predicting the correct label. To effectively utilizes the Top-k prediction set, we propose Label Graph Network with Top-k Prediction Set, termed as KLG. Specifically, for a given sample, we build a label graph to review candidate labels in the Top-k prediction set and learn the connections between them. We also design a dynamic k selection mechanism to learn more powerful and discriminative relation representation. Our experiments show that KLG achieves the best performances on three relation extraction datasets. Moreover, we observe thatKLG is more effective in dealing with long-tailed classes.", + "primary_area": "speech natural language processing", + "author": "Bo Li; Wei Ye; Jinglei Zhang; Shikun Zhang", + "authorids": "", + "aff": "National Engineering Research Center for Software Engineering, Peking University + School of Software and Microelectronics, Peking University; National Engineering Research Center for Software Engineering, Peking University; National Engineering Research Center for Software Engineering, Peking University + School of Software and Microelectronics, Peking University; National Engineering Research Center for Software Engineering, Peking University", + "bibtex": "@article{Li_Ye_Zhang_Zhang_2023, title={Reviewing Labels: Label Graph Network with Top-k Prediction Set for Relation Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26533}, DOI={10.1609/aaai.v37i11.26533}, abstractNote={The typical way for relation extraction is fine-tuning large pre-trained language models on task-specific datasets, then selecting the label with the highest probability of the output\ndistribution as the final prediction. However, the usage of the Top-k prediction set for a given sample is commonly overlooked. In this paper, we first reveal that the Top-k prediction\nset of a given sample contains useful information for predicting the correct label. To effectively utilizes the Top-k prediction set, we propose Label Graph Network with Top-k Prediction Set, termed as KLG. Specifically, for a given sample, we build a label graph to review candidate labels in the Top-k prediction set and learn the connections between them. We also design a dynamic k selection mechanism to learn more powerful and discriminative relation representation. Our experiments show that KLG achieves the best performances on three relation extraction datasets. Moreover, we observe thatKLG is more effective in dealing with long-tailed classes.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Bo and Ye, Wei and Zhang, Jinglei and Zhang, Shikun}, year={2023}, month={Jun.}, pages={13051-13058} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26533/26305", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26533", + "pdf_size": 372117, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9893144930920698058&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0;0+0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "National Engineering Research Center for Software Engineering", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25386", + "title": "Revisiting Classifier: Transferring Vision-Language Models for Video Recognition", + "track": "main", + "status": "Technical", + "abstract": "Transferring knowledge from task-agnostic pre-trained deep models for downstream tasks is an important topic in computer vision research. Along with the growth of computational capacity, we now have open-source vision-language pre-trained models in large scales of the model architecture and amount of data. In this study, we focus on transferring knowledge for video classification tasks. Conventional methods randomly initialize the linear classifier head for vision classification, but they leave the usage of the text encoder for downstream visual recognition tasks undiscovered. In this paper, we revise the role of the linear classifier and replace the classifier with the different knowledge from pre-trained model. We utilize the well-pretrained language model to generate good semantic target for efficient transferring learning. The empirical study shows that our method improves both the performance and the training speed of video classification, with a negligible change in the model. Our simple yet effective tuning paradigm achieves state-of-the-art performance and efficient training on various video recognition scenarios, i.e., zero-shot, few-shot, general recognition. In particular, our paradigm achieves the state-of-the-art accuracy of 87.8% on Kinetics-400, and also surpasses previous methods by 20~50% absolute top-1 accuracy under zero-shot, few-shot settings on five video datasets. Code and models are available at https://github.com/whwu95/Text4Vis.", + "primary_area": "computer vision iii", + "author": "Wenhao Wu; Zhun Sun; Wanli Ouyang", + "authorids": "", + "aff": "The University of Sydney, NSW, Australia; Baidu Inc., Beijing, China; Shanghai Artificial Intelligence Laboratory, Shanghai, China", + "bibtex": "@article{Wu_Sun_Ouyang_2023, title={Revisiting Classifier: Transferring Vision-Language Models for Video Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25386}, DOI={10.1609/aaai.v37i3.25386}, abstractNote={Transferring knowledge from task-agnostic pre-trained deep models for downstream tasks is an important topic in computer vision research. Along with the growth of computational capacity, we now have open-source vision-language pre-trained models in large scales of the model architecture and amount of data. In this study, we focus on transferring knowledge for video classification tasks. Conventional methods randomly initialize the linear classifier head for vision classification, but they leave the usage of the text encoder for downstream visual recognition tasks undiscovered. In this paper, we revise the role of the linear classifier and replace the classifier with the different knowledge from pre-trained model. We utilize the well-pretrained language model to generate good semantic target for efficient transferring learning. The empirical study shows that our method improves both the performance and the training speed of video classification, with a negligible change in the model. Our simple yet effective tuning paradigm achieves state-of-the-art performance and efficient training on various video recognition scenarios, i.e., zero-shot, few-shot, general recognition. In particular, our paradigm achieves the state-of-the-art accuracy of 87.8% on Kinetics-400, and also surpasses previous methods by 20~50% absolute top-1 accuracy under zero-shot, few-shot settings on five video datasets. Code and models are available at https://github.com/whwu95/Text4Vis.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Wenhao and Sun, Zhun and Ouyang, Wanli}, year={2023}, month={Jun.}, pages={2847-2855} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25386/25158", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25386", + "pdf_size": 442522, + "gs_citation": 121, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2635733220205723891&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;baidu.com;sydney.edu.au", + "email": "gmail.com;baidu.com;sydney.edu.au", + "github": "https://github.com/whwu95/Text4Vis", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "The University of Sydney;Baidu Inc.;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sydney.edu.au;https://www.baidu.com;", + "aff_unique_abbr": "USYD;Baidu;", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Beijing;Shanghai", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-26597", + "title": "Revisiting Denoising Diffusion Probabilistic Models for Speech Enhancement: Condition Collapse, Efficiency and Refinement", + "track": "main", + "status": "Technical", + "abstract": "Recent literature has shown that denoising diffusion probabilistic models (DDPMs) can be used to synthesize high-fidelity samples with a competitive (or sometimes better) quality than previous state-of-the-art approaches. However, few attempts have been made to apply DDPM for the speech enhancement task. The reported performance of the existing works is relatively poor and significantly inferior to other generative methods. In this work, we first reveal the difficulties in applying existing diffusion models to the field of speech enhancement. Then we introduce DR-DiffuSE, a simple and effective framework for speech enhancement using conditional diffusion models. We present three strategies (two in diffusion training and one in reverse sampling) to tackle the condition collapse and guarantee the sufficient use of condition information. For efficiency, we introduce the fast sampling technique to reduce the sampling process into several steps and exploit a refinement network to calibrate the defective speech. Our proposed method achieves the state-of-the-art performance to the GAN-based model and shows a significant improvement over existing DDPM-based algorithms.", + "primary_area": "speech natural language processing", + "author": "Wenxin Tai; Fan Zhou; Goce Trajcevski; Ting Zhong", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China; Iowa State University; University of Electronic Science and Technology of China", + "bibtex": "@article{Tai_Zhou_Trajcevski_Zhong_2023, title={Revisiting Denoising Diffusion Probabilistic Models for Speech Enhancement: Condition Collapse, Efficiency and Refinement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26597}, DOI={10.1609/aaai.v37i11.26597}, abstractNote={Recent literature has shown that denoising diffusion probabilistic models (DDPMs) can be used to synthesize high-fidelity samples with a competitive (or sometimes better) quality than previous state-of-the-art approaches. However, few attempts have been made to apply DDPM for the speech enhancement task. The reported performance of the existing works is relatively poor and significantly inferior to other generative methods. In this work, we first reveal the difficulties in applying existing diffusion models to the field of speech enhancement. Then we introduce DR-DiffuSE, a simple and effective framework for speech enhancement using conditional diffusion models. We present three strategies (two in diffusion training and one in reverse sampling) to tackle the condition collapse and guarantee the sufficient use of condition information. For efficiency, we introduce the fast sampling technique to reduce the sampling process into several steps and exploit a refinement network to calibrate the defective speech. Our proposed method achieves the state-of-the-art performance to the GAN-based model and shows a significant improvement over existing DDPM-based algorithms.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tai, Wenxin and Zhou, Fan and Trajcevski, Goce and Zhong, Ting}, year={2023}, month={Jun.}, pages={13627-13635} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26597/26369", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26597", + "pdf_size": 393602, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13138350473572538678&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;uestc.edu.cn;iastate.edu;uestc.edu.cn", + "email": "gmail.com;uestc.edu.cn;iastate.edu;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Electronic Science and Technology of China;Iowa State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.iastate.edu", + "aff_unique_abbr": "UESTC;ISU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26774", + "title": "Revisiting Item Promotion in GNN-Based Collaborative Filtering: A Masked Targeted Topological Attack Perspective", + "track": "aaai special track", + "status": "Technical", + "abstract": "Graph neural networks (GNN) based collaborative filtering (CF) has attracted increasing attention in e-commerce and financial marketing platforms. However, there still lack efforts to evaluate the robustness of such CF systems in deployment. Fundamentally different from existing attacks, this work revisits the item promotion task and reformulates it from a targeted topological attack perspective for the first time. Specifically, we first develop a targeted attack formulation to maximally increase a target item's popularity. We then leverage gradient-based optimizations to find a solution. However, we observe the gradient estimates often appear noisy due to the discrete nature of a graph, which leads to a degradation of attack ability. To resolve noisy gradient effects, we then propose a masked attack objective that can remarkably enhance the topological attack ability. Furthermore, we design a computationally efficient approach to the proposed attack, thus making it feasible to evaluate large-large CF systems. Experiments on two real-world datasets show the effectiveness of our attack in analyzing the robustness of GNN-based CF more practically.", + "primary_area": "safe and robust ai", + "author": "Yongwei Wang; Yong Liu; Zhiqi Shen", + "authorids": "", + "aff": "Joint NTU-WeBank Research Centre on Fintech, Nanyang Technological University; Joint NTU-UBC Research Centre of Excellence in Active Living for the Elderly, Nanyang Technological University; School of Computer Science and Engineering, Nanyang Technological University", + "bibtex": "@article{Wang_Liu_Shen_2023, title={Revisiting Item Promotion in GNN-Based Collaborative Filtering: A Masked Targeted Topological Attack Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26774}, DOI={10.1609/aaai.v37i12.26774}, abstractNote={Graph neural networks (GNN) based collaborative filtering (CF) has attracted increasing attention in e-commerce and financial marketing platforms. However, there still lack efforts to evaluate the robustness of such CF systems in deployment. Fundamentally different from existing attacks, this work revisits the item promotion task and reformulates it from a targeted topological attack perspective for the first time. Specifically, we first develop a targeted attack formulation to maximally increase a target item\u2019s popularity. We then leverage gradient-based optimizations to find a solution. However, we observe the gradient estimates often appear noisy due to the discrete nature of a graph, which leads to a degradation of attack ability. To resolve noisy gradient effects, we then propose a masked attack objective that can remarkably enhance the topological attack ability. Furthermore, we design a computationally efficient approach to the proposed attack, thus making it feasible to evaluate large-large CF systems. Experiments on two real-world datasets show the effectiveness of our attack in analyzing the robustness of GNN-based CF more practically.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yongwei and Liu, Yong and Shen, Zhiqi}, year={2023}, month={Jun.}, pages={15206-15214} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26774/26546", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26774", + "pdf_size": 276161, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14488366162665124293&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "email": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanyang Technological University", + "aff_unique_dep": "Joint NTU-WeBank Research Centre on Fintech", + "aff_unique_url": "https://www.ntu.edu.sg", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25367", + "title": "Revisiting Unsupervised Local Descriptor Learning", + "track": "main", + "status": "Technical", + "abstract": "Constructing accurate training tuples is crucial for unsupervised local descriptor learning, yet challenging due to the absence of patch labels. The state-of-the-art approach constructs tuples with heuristic rules, which struggle to precisely depict real-world patch transformations, in spite of enabling fast model convergence. A possible solution to alleviate the problem is the clustering-based approach, which can capture realistic patch variations and learn more accurate class decision boundaries, but suffers from slow model convergence. This paper presents HybridDesc, an unsupervised approach that learns powerful local descriptor models with fast convergence speed by combining the rule-based and clustering-based approaches to construct training tuples. In addition, HybridDesc also contributes two concrete enhancing mechanisms: (1) a Differentiable Hyperparameter Search (DHS) strategy to find the optimal hyperparameter setting of the rule-based approach so as to provide accurate prior for the clustering-based approach, (2) an On-Demand Clustering (ODC) method to reduce the clustering overhead of the clustering-based approach without eroding its advantage. Extensive experimental results show that HybridDesc can efficiently learn local descriptors that surpass existing unsupervised local descriptors and even rival competitive supervised ones.", + "primary_area": "computer vision iii", + "author": "Wufan Wang; Lei Zhang; Hua Huang", + "authorids": "", + "aff": "School of Computer Science and Technology, Beijing Institute of Technology; School of Computer Science and Technology, Beijing Institute of Technology; School of Artificial Intelligence, Beijing Normal University", + "bibtex": "@article{Wang_Zhang_Huang_2023, title={Revisiting Unsupervised Local Descriptor Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25367}, DOI={10.1609/aaai.v37i3.25367}, abstractNote={Constructing accurate training tuples is crucial for unsupervised local descriptor learning, yet challenging due to the absence of patch labels. The state-of-the-art approach constructs tuples with heuristic rules, which struggle to precisely depict real-world patch transformations, in spite of enabling fast model convergence. A possible solution to alleviate the problem is the clustering-based approach, which can capture realistic patch variations and learn more accurate class decision boundaries, but suffers from slow model convergence. This paper presents HybridDesc, an unsupervised approach that learns powerful local descriptor models with fast convergence speed by combining the rule-based and clustering-based approaches to construct training tuples. In addition, HybridDesc also contributes two concrete enhancing mechanisms: (1) a Differentiable Hyperparameter Search (DHS) strategy to find the optimal hyperparameter setting of the rule-based approach so as to provide accurate prior for the clustering-based approach, (2) an On-Demand Clustering (ODC) method to reduce the clustering overhead of the clustering-based approach without eroding its advantage. Extensive experimental results show that HybridDesc can efficiently learn local descriptors that surpass existing unsupervised local descriptors and even rival competitive supervised ones.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Wufan and Zhang, Lei and Huang, Hua}, year={2023}, month={Jun.}, pages={2680-2688} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25367/25139", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25367", + "pdf_size": 443296, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17557910957504718743&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bit.edu.cn;bit.edu.cn;bnu.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;bnu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Beijing Institute of Technology;Beijing Normal University", + "aff_unique_dep": "School of Computer Science and Technology;School of Artificial Intelligence", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.bnu.edu.cn", + "aff_unique_abbr": "BIT;BNU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26748", + "title": "Revisiting the Importance of Amplifying Bias for Debiasing", + "track": "aaai special track", + "status": "Technical", + "abstract": "In image classification, debiasing aims to train a classifier to be less susceptible to dataset bias, the strong correlation between peripheral attributes of data samples and a target class. For example, even if the frog class in the dataset mainly consists of frog images with a swamp background (i.e., bias aligned samples), a debiased classifier should be able to correctly classify a frog at a beach (i.e., bias conflicting samples). Recent debiasing approaches commonly use two components for debiasing, a biased model fB and a debiased model fD. fB is trained to focus on bias aligned samples (i.e., overfitted to the bias) while fD is mainly trained with bias conflicting samples by concentrating on samples which fB fails to learn, leading fD to be less susceptible to the dataset bias. While the state of the art debiasing techniques have aimed to better train fD, we focus on training fB, an overlooked component until now. Our empirical analysis reveals that removing the bias conflicting samples from the training set for fB is important for improving the debiasing performance of fD. This is due to the fact that the bias conflicting samples work as noisy samples for amplifying the bias for fB since those samples do not include the bias attribute. To this end, we propose a simple yet effective data sample selection method which removes the bias conflicting samples to construct a bias amplified dataset for training fB. Our data sample selection method can be directly applied to existing reweighting based debiasing approaches, obtaining consistent performance boost and achieving the state of the art performance on both synthetic and real-world datasets.", + "primary_area": "safe and robust ai", + "author": "Jungsoo Lee; Jeonghoon Park; Daeyoung Kim; Juyoung Lee; Edward Choi; Jaegul Choo", + "authorids": "", + "aff": "KAIST+Kakao Enterprise, South Korea; KAIST+Kakao Enterprise, South Korea; KAIST+Kakao Enterprise, South Korea; Kakao Enterprise, South Korea; KAIST; KAIST", + "bibtex": "@article{Lee_Park_Kim_Lee_Choi_Choo_2023, title={Revisiting the Importance of Amplifying Bias for Debiasing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26748}, DOI={10.1609/aaai.v37i12.26748}, abstractNote={In image classification, debiasing aims to train a classifier to be less susceptible to dataset bias, the strong correlation between peripheral attributes of data samples and a target class. For example, even if the frog class in the dataset mainly consists of frog images with a swamp background (i.e., bias aligned samples), a debiased classifier should be able to correctly classify a frog at a beach (i.e., bias conflicting samples). Recent debiasing approaches commonly use two components for debiasing, a biased model fB and a debiased model fD. fB is trained to focus on bias aligned samples (i.e., overfitted to the bias) while fD is mainly trained with bias conflicting samples by concentrating on samples which fB fails to learn, leading fD to be less susceptible to the dataset bias. While the state of the art debiasing techniques have aimed to better train fD, we focus on training fB, an overlooked component until now. Our empirical analysis reveals that removing the bias conflicting samples from the training set for fB is important for improving the debiasing performance of fD. This is due to the fact that the bias conflicting samples work as noisy samples for amplifying the bias for fB since those samples do not include the bias attribute. To this end, we propose a simple yet effective data sample selection method which removes the bias conflicting samples to construct a bias amplified dataset for training fB. Our data sample selection method can be directly applied to existing reweighting based debiasing approaches, obtaining consistent performance boost and achieving the state of the art performance on both synthetic and real-world datasets.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Jungsoo and Park, Jeonghoon and Kim, Daeyoung and Lee, Juyoung and Choi, Edward and Choo, Jaegul}, year={2023}, month={Jun.}, pages={14974-14981} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26748/26520", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26748", + "pdf_size": 10098696, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15487099349862210843&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kakaoenterprise.com;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kakaoenterprise.com;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;1;0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Kakao Enterprise", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kaist.ac.kr;https://enterprise.kakao.com", + "aff_unique_abbr": "KAIST;Kakao Enterprise", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25403", + "title": "Revisiting the Spatial and Temporal Modeling for Few-Shot Action Recognition", + "track": "main", + "status": "Technical", + "abstract": "Spatial and temporal modeling is one of the most core aspects of few-shot action recognition. Most previous works mainly focus on long-term temporal relation modeling based on high-level spatial representations, without considering the crucial low-level spatial features and short-term temporal relations. Actually, the former feature could bring rich local semantic information, and the latter feature could represent motion characteristics of adjacent frames, respectively. In this paper, we propose SloshNet, a new framework that revisits the spatial and temporal modeling for few-shot action recognition in a finer manner. First, to exploit the low-level spatial features, we design a feature fusion architecture search module to automatically search for the best combination of the low-level and high-level spatial features. Next, inspired by the recent transformer, we introduce a long-term temporal modeling module to model the global temporal relations based on the extracted spatial appearance features. Meanwhile, we design another short-term temporal modeling module to encode the motion characteristics between adjacent frame representations. After that, the final predictions can be obtained by feeding the embedded rich spatial-temporal features to a common frame-level class prototype matcher. We extensively validate the proposed SloshNet on four few-shot action recognition datasets, including Something-Something V2, Kinetics, UCF101, and HMDB51. It achieves favorable results against state-of-the-art methods in all datasets.", + "primary_area": "computer vision iii", + "author": "Jiazheng Xing; Mengmeng Wang; Yong Liu; Boyu Mu", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University", + "bibtex": "@article{Xing_Wang_Liu_Mu_2023, title={Revisiting the Spatial and Temporal Modeling for Few-Shot Action Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25403}, DOI={10.1609/aaai.v37i3.25403}, abstractNote={Spatial and temporal modeling is one of the most core aspects of few-shot action recognition. Most previous works mainly focus on long-term temporal relation modeling based on high-level spatial representations, without considering the crucial low-level spatial features and short-term temporal relations. Actually, the former feature could bring rich local semantic information, and the latter feature could represent motion characteristics of adjacent frames, respectively. In this paper, we propose SloshNet, a new framework that revisits the spatial and temporal modeling for few-shot action recognition in a finer manner. First, to exploit the low-level spatial features, we design a feature fusion architecture search module to automatically search for the best combination of the low-level and high-level spatial features. Next, inspired by the recent transformer, we introduce a long-term temporal modeling module to model the global temporal relations based on the extracted spatial appearance features. Meanwhile, we design another short-term temporal modeling module to encode the motion characteristics between adjacent frame representations. After that, the final predictions can be obtained by feeding the embedded rich spatial-temporal features to a common frame-level class prototype matcher. We extensively validate the proposed SloshNet on four few-shot action recognition datasets, including Something-Something V2, Kinetics, UCF101, and HMDB51. It achieves favorable results against state-of-the-art methods in all datasets.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xing, Jiazheng and Wang, Mengmeng and Liu, Yong and Mu, Boyu}, year={2023}, month={Jun.}, pages={3001-3009} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25403/25175", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25403", + "pdf_size": 12737266, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3773040825012219983&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;iipc.zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;iipc.zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26866", + "title": "Reward Design for an Online Reinforcement Learning Algorithm Supporting Oral Self-Care", + "track": "iaai technical track", + "status": "Technical", + "abstract": "While dental disease is largely preventable, professional advice on optimal oral hygiene practices is often forgotten or abandoned by patients. Therefore patients may benefit from timely and personalized encouragement to engage in oral self-care behaviors. In this paper, we develop an online reinforcement learning (RL) algorithm for use in optimizing the delivery of mobile-based prompts to encourage oral hygiene behaviors. One of the main challenges in developing such an algorithm is ensuring that the algorithm considers the impact of current actions on the effectiveness of future actions (i.e., delayed effects), especially when the algorithm has been designed to run stably and autonomously in a constrained, real-world setting characterized by highly noisy, sparse data. We address this challenge by designing a quality reward that maximizes the desired health outcome (i.e., high-quality brushing) while minimizing user burden. We also highlight a procedure for optimizing the hyperparameters of the reward by building a simulation environment test bed and evaluating candidates using the test bed. The RL algorithm discussed in this paper will be deployed in Oralytics. To the best of our knowledge, Oralytics is the first mobile health study utilizing an RL algorithm designed to prevent dental disease by optimizing the delivery of motivational messages supporting oral self-care behaviors.", + "primary_area": "emerging applications of ai", + "author": "Anna L. Trella; Kelly W. Zhang; Inbal Nahum-Shani; Vivek Shetty; Finale Doshi-Velez; Susan A. Murphy", + "authorids": "", + "aff": "Department of Computer Science, Harvard University; Department of Computer Science, Harvard University; Institute for Social Research, University of Michigan; Schools of Dentistry & Engineering, University of California, Los Angeles; Department of Computer Science, Harvard University; Department of Computer Science, Harvard University", + "bibtex": "@article{Trella_Zhang_Nahum-Shani_Shetty_Doshi-Velez_Murphy_2024, title={Reward Design for an Online Reinforcement Learning Algorithm Supporting Oral Self-Care}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26866}, DOI={10.1609/aaai.v37i13.26866}, abstractNote={While dental disease is largely preventable, professional advice on optimal oral hygiene practices is often forgotten or abandoned by patients. Therefore patients may benefit from timely and personalized encouragement to engage in oral self-care behaviors. In this paper, we develop an online reinforcement learning (RL) algorithm for use in optimizing the delivery of mobile-based prompts to encourage oral hygiene behaviors. One of the main challenges in developing such an algorithm is ensuring that the algorithm considers the impact of current actions on the effectiveness of future actions (i.e., delayed effects), especially when the algorithm has been designed to run stably and autonomously in a constrained, real-world setting characterized by highly noisy, sparse data. We address this challenge by designing a quality reward that maximizes the desired health outcome (i.e., high-quality brushing) while minimizing user burden. We also highlight a procedure for optimizing the hyperparameters of the reward by building a simulation environment test bed and evaluating candidates using the test bed. The RL algorithm discussed in this paper will be deployed in Oralytics. To the best of our knowledge, Oralytics is the first mobile health study utilizing an RL algorithm designed to prevent dental disease by optimizing the delivery of motivational messages supporting oral self-care behaviors.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Trella, Anna L. and Zhang, Kelly W. and Nahum-Shani, Inbal and Shetty, Vivek and Doshi-Velez, Finale and Murphy, Susan A.}, year={2024}, month={Jul.}, pages={15724-15730} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26866/26638", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26866", + "pdf_size": 187226, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14361321430502738868&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 8, + "aff_domain": "g.harvard.edu;seas.harvard.edu;umich.edu;ucla.edu;seas.harvard.edu;fas.harvard.edu", + "email": "g.harvard.edu;seas.harvard.edu;umich.edu;ucla.edu;seas.harvard.edu;fas.harvard.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "Harvard University;University of Michigan;University of California, Los Angeles", + "aff_unique_dep": "Department of Computer Science;Institute for Social Research;Schools of Dentistry & Engineering", + "aff_unique_url": "https://www.harvard.edu;https://www.umich.edu;https://www.ucla.edu", + "aff_unique_abbr": "Harvard;UM;UCLA", + "aff_campus_unique_index": "0;0;1;2;0;0", + "aff_campus_unique": "Cambridge;Ann Arbor;Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26240", + "title": "Reward Poisoning Attacks on Offline Multi-Agent Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In offline multi-agent reinforcement learning (MARL), agents estimate policies from a given dataset. We study reward-poisoning attacks in this setting where an exogenous attacker modifies the rewards in the dataset before the agents see the dataset. The attacker wants to guide each agent into a nefarious target policy while minimizing the Lp norm of the reward modification. Unlike attacks on single-agent RL, we show that the attacker can install the target policy as a Markov Perfect Dominant Strategy Equilibrium (MPDSE), which rational agents are guaranteed to follow. This attack can be significantly cheaper than separate single-agent attacks. We show that the attack works on various MARL agents including uncertainty-aware learners, and we exhibit linear programs to efficiently solve the attack problem. We also study the relationship between the structure of the datasets and the minimal attack cost. Our work paves the way for studying defense in offline MARL.", + "primary_area": "machine learning iv", + "author": "Young Wu; Jeremy McMahan; Xiaojin Zhu; Qiaomin Xie", + "authorids": "", + "aff": "University of Wisconsin-Madison; University of Wisconsin-Madison; University of Wisconsin-Madison; University of Wisconsin-Madison", + "bibtex": "@article{Wu_McMahan_Zhu_Xie_2023, title={Reward Poisoning Attacks on Offline Multi-Agent Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26240}, DOI={10.1609/aaai.v37i9.26240}, abstractNote={In offline multi-agent reinforcement learning (MARL), agents estimate policies from a given dataset. We study reward-poisoning attacks in this setting where an exogenous attacker modifies the rewards in the dataset before the agents see the dataset. The attacker wants to guide each agent into a nefarious target policy while minimizing the Lp norm of the reward modification. Unlike attacks on single-agent RL, we show that the attacker can install the target policy as a Markov Perfect Dominant Strategy Equilibrium (MPDSE), which rational agents are guaranteed to follow. This attack can be significantly cheaper than separate single-agent attacks. We show that the attack works on various MARL agents including uncertainty-aware learners, and we exhibit linear programs to efficiently solve the attack problem. We also study the relationship between the structure of the datasets and the minimal attack cost. Our work paves the way for studying defense in offline MARL.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Young and McMahan, Jeremy and Zhu, Xiaojin and Xie, Qiaomin}, year={2023}, month={Jun.}, pages={10426-10434} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26240/26012", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26240", + "pdf_size": 157018, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2548973426401145747&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "cs.wisc.edu;wisc.edu;cs.wisc.edu;wisc.edu", + "email": "cs.wisc.edu;wisc.edu;cs.wisc.edu;wisc.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Wisconsin-Madison", + "aff_unique_dep": "", + "aff_unique_url": "https://www.wisc.edu", + "aff_unique_abbr": "UW-Madison", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Madison", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26367", + "title": "Reward-Based Negotiating Agent Strategies", + "track": "main", + "status": "Technical", + "abstract": "This study proposed a novel reward-based negotiating agent strategy using an issue-based represented deep policy network. We compared the negotiation strategies with reinforcement learning (RL) by the tournaments toward heuristics-based champion agents in multi-issue negotiation. A bilateral multi-issue negotiation in which the two agents exchange offers in turn was considered. Existing RL architectures for a negotiation strategy incorporate rich utility function that provides concrete information even though the rewards of RL are considered as generalized signals in practice. Additionally, in existing reinforcement learning architectures for negotiation strategies, both the issue-based representations of the negotiation problems and the policy network to improve the scalability of negotiation domains are yet to be considered. This study proposed a novel reward-based negotiation strategy through deep RL by considering an issue-based represented deep policy network for multi-issue negotiation. Comparative studies analyzed the significant properties of negotiation strategies with RL. The results revealed that the policy-based learning agents with issue-based representations achieved comparable or higher utility than the state-of-the-art baselines with RL and heuristics, especially in the large-sized domains. Additionally, negotiation strategies with RL based on the policy network can achieve agreements by effectively using each step.", + "primary_area": "multiagent systems", + "author": "Ryota Higa; Katsuhide Fujita; Toki Takahashi; Takumu Shimizu; Shinji Nakadai", + "authorids": "", + "aff": "NEC Corporation + National Institute of Advanced Industrial Science and Technology(AIST) + Tokyo University of Agriculture and Technology; National Institute of Advanced Industrial Science and Technology(AIST) + Tokyo University of Agriculture and Technology; National Institute of Advanced Industrial Science and Technology(AIST) + Tokyo University of Agriculture and Technology; National Institute of Advanced Industrial Science and Technology(AIST) + Tokyo University of Agriculture and Technology; NEC Corporation + National Institute of Advanced Industrial Science and Technology(AIST)", + "bibtex": "@article{Higa_Fujita_Takahashi_Shimizu_Nakadai_2023, title={Reward-Based Negotiating Agent Strategies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26367}, DOI={10.1609/aaai.v37i10.26367}, abstractNote={This study proposed a novel reward-based negotiating agent strategy using an issue-based represented deep policy network. We compared the negotiation strategies with reinforcement learning (RL) by the tournaments toward heuristics-based champion agents in multi-issue negotiation. A bilateral multi-issue negotiation in which the two agents exchange offers in turn was considered. Existing RL architectures for a negotiation strategy incorporate rich utility function that provides concrete information even though the rewards of RL are considered as generalized signals in practice. Additionally, in existing reinforcement learning architectures for negotiation strategies, both the issue-based representations of the negotiation problems and the policy network to improve the scalability of negotiation domains are yet to be considered. This study proposed a novel reward-based negotiation strategy through deep RL by considering an issue-based represented deep policy network for multi-issue negotiation. Comparative studies analyzed the significant properties of negotiation strategies with RL. The results revealed that the policy-based learning agents with issue-based representations achieved comparable or higher utility than the state-of-the-art baselines with RL and heuristics, especially in the large-sized domains. Additionally, negotiation strategies with RL based on the policy network can achieve agreements by effectively using each step.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Higa, Ryota and Fujita, Katsuhide and Takahashi, Toki and Shimizu, Takumu and Nakadai, Shinji}, year={2023}, month={Jun.}, pages={11569-11577} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26367/26139", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26367", + "pdf_size": 4883445, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2536667244943818348&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "nec.com;cc.tuat.ac.jp;katfuji.lab.tuat.ac.jp;katfuji.lab.tuat.ac.jp;nec.com", + "email": "nec.com;cc.tuat.ac.jp;katfuji.lab.tuat.ac.jp;katfuji.lab.tuat.ac.jp;nec.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;1+2;1+2;1+2;0+1", + "aff_unique_norm": "NEC Corporation;National Institute of Advanced Industrial Science and Technology;Tokyo University of Agriculture and Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nec.com;https://www.aist.go.jp;https://www.tuat.ac.jp", + "aff_unique_abbr": "NEC;AIST;TUAT", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-25961", + "title": "Reward-Biased Maximum Likelihood Estimation for Neural Contextual Bandits: A Distributional Learning Perspective", + "track": "main", + "status": "Technical", + "abstract": "Reward-biased maximum likelihood estimation (RBMLE) is a classic principle in the adaptive control literature for tackling explore-exploit trade-offs. This paper studies the neural contextual bandit problem from a distributional perspective and proposes NeuralRBMLE, which leverages the likelihood of surrogate parametric distributions to learn the unknown reward distributions and thereafter adapts the RBMLE principle to achieve efficient exploration by properly adding a reward-bias term. NeuralRBMLE leverages the representation power of neural networks and directly encodes exploratory behavior in the parameter space, without constructing confidence intervals of the estimated rewards. We propose two variants of NeuralRBMLE algorithms: The first variant directly obtains the RBMLE estimator by gradient ascent, and the second variant simplifies RBMLE to a simple index policy through an approximation. We show that both algorithms achieve order-optimality. Through extensive experiments, we demonstrate that the NeuralRBMLE algorithms achieve comparable or better empirical regrets than the state-of-the-art methods on real-world datasets with non-linear reward functions.", + "primary_area": "machine learning ii", + "author": "Yu-Heng Hung; Ping-Chun Hsieh", + "authorids": "", + "aff": "Department of Computer Science, National Yang Ming Chiao Tung University, Hsinchu, Taiwan; Department of Computer Science, National Yang Ming Chiao Tung University, Hsinchu, Taiwan", + "bibtex": "@article{Hung_Hsieh_2023, title={Reward-Biased Maximum Likelihood Estimation for Neural Contextual Bandits: A Distributional Learning Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25961}, DOI={10.1609/aaai.v37i7.25961}, abstractNote={Reward-biased maximum likelihood estimation (RBMLE) is a classic principle in the adaptive control literature for tackling explore-exploit trade-offs. This paper studies the neural contextual bandit problem from a distributional perspective and proposes NeuralRBMLE, which leverages the likelihood of surrogate parametric distributions to learn the unknown reward distributions and thereafter adapts the RBMLE principle to achieve efficient exploration by properly adding a reward-bias term. NeuralRBMLE leverages the representation power of neural networks and directly encodes exploratory behavior in the parameter space, without constructing confidence intervals of the estimated rewards. We propose two variants of NeuralRBMLE algorithms: The first variant directly obtains the RBMLE estimator by gradient ascent, and the second variant simplifies RBMLE to a simple index policy through an approximation. We show that both algorithms achieve order-optimality. Through extensive experiments, we demonstrate that the NeuralRBMLE algorithms achieve comparable or better empirical regrets than the state-of-the-art methods on real-world datasets with non-linear reward functions.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hung, Yu-Heng and Hsieh, Ping-Chun}, year={2023}, month={Jun.}, pages={7944-7952} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25961/25733", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25961", + "pdf_size": 185195, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7004757677511375734&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff_domain": "nycu.edu.tw;nycu.edu.tw", + "email": "nycu.edu.tw;nycu.edu.tw", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "National Yang Ming Chiao Tung University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.nctu.edu.tw", + "aff_unique_abbr": "NYCU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Hsinchu", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26478", + "title": "Rich Event Modeling for Script Event Prediction", + "track": "main", + "status": "Technical", + "abstract": "Script is a kind of structured knowledge extracted from texts, which contains a sequence of events. Based on such knowledge, script event prediction aims to predict the subsequent event. To do so, two aspects should be considered for events, namely, event description (i.e., what the events should contain) and event encoding (i.e., how they should be encoded). Most existing methods describe an event by a verb together with a few core arguments (i.e., subject, object, and indirect object), which are not precise enough. In addition, existing event encoders are limited to a fixed number of arguments, which are not flexible enough to deal with extra information. Thus, in this paper, we propose the Rich Event Prediction (REP) framework for script event prediction. Fundamentally, it is based on the proposed rich event description, which enriches the existing ones with three kinds of important information, namely, the senses of verbs, extra semantic roles, and types of participants. REP contains an event extractor to extract such information from texts. Based on the extracted rich information, a predictor then selects the most probable subsequent event. The core component of the predictor is a transformer-based event encoder that integrates the above information flexibly. Experimental results on the widely used Gigaword Corpus show the effectiveness of the proposed framework.", + "primary_area": "speech natural language processing", + "author": "Long Bai; Saiping Guan; Zixuan Li; Jiafeng Guo; Xiaolong Jin; Xueqi Cheng", + "authorids": "", + "aff": "CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences (CAS) + School of Computer Science and Technology, University of Chinese Academy of Sciences", + "bibtex": "@article{Bai_Guan_Li_Guo_Jin_Cheng_2023, title={Rich Event Modeling for Script Event Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26478}, DOI={10.1609/aaai.v37i11.26478}, abstractNote={Script is a kind of structured knowledge extracted from texts, which contains a sequence of events. Based on such knowledge, script event prediction aims to predict the subsequent event. To do so, two aspects should be considered for events, namely, event description (i.e., what the events should contain) and event encoding (i.e., how they should be encoded). Most existing methods describe an event by a verb together with a few core arguments (i.e., subject, object, and indirect object), which are not precise enough. In addition, existing event encoders are limited to a fixed number of arguments, which are not flexible enough to deal with extra information. Thus, in this paper, we propose the Rich Event Prediction (REP) framework for script event prediction. Fundamentally, it is based on the proposed rich event description, which enriches the existing ones with three kinds of important information, namely, the senses of verbs, extra semantic roles, and types of participants. REP contains an event extractor to extract such information from texts. Based on the extracted rich information, a predictor then selects the most probable subsequent event. The core component of the predictor is a transformer-based event encoder that integrates the above information flexibly. Experimental results on the widely used Gigaword Corpus show the effectiveness of the proposed framework.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Long and Guan, Saiping and Li, Zixuan and Guo, Jiafeng and Jin, Xiaolong and Cheng, Xueqi}, year={2023}, month={Jun.}, pages={12553-12561} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26478/26250", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26478", + "pdf_size": 554396, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11934047459752693063&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;School of Computer Science and Technology", + "aff_unique_url": "http://www.cas.ac.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25867", + "title": "Riemannian Local Mechanism for SPD Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "The Symmetric Positive Definite (SPD) matrices have received wide attention for data representation in many scientific areas. Although there are many different attempts to develop effective deep architectures for data processing on the Riemannian manifold of SPD matrices, very few solutions explicitly mine the local geometrical information in deep SPD feature representations. Given the great success of local mechanisms in Euclidean methods, we argue that it is of utmost importance to ensure the preservation of local geometric information in the SPD networks. We first analyse the convolution operator commonly used for capturing local information in Euclidean deep networks from the perspective of a higher level of abstraction afforded by category theory. Based on this analysis, we define the local information in the SPD manifold and design a multi-scale submanifold block for mining local geometry. Experiments involving multiple visual tasks validate the effectiveness of our approach.", + "primary_area": "machine learning i", + "author": "Ziheng Chen; Tianyang Xu; Xiao-Jun Wu; Rui Wang; Zhiwu Huang; Josef Kittler", + "authorids": "", + "aff": "School of Artificial Intelligence and Computer Science, Jiangnan University; School of Artificial Intelligence and Computer Science, Jiangnan University; School of Artificial Intelligence and Computer Science, Jiangnan University; School of Artificial Intelligence and Computer Science, Jiangnan University; School of Computing and Information Systems, Singapore Management University; Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey", + "bibtex": "@article{Chen_Xu_Wu_Wang_Huang_Kittler_2023, title={Riemannian Local Mechanism for SPD Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25867}, DOI={10.1609/aaai.v37i6.25867}, abstractNote={The Symmetric Positive Definite (SPD) matrices have received wide attention for data representation in many scientific areas. Although there are many different attempts to develop effective deep architectures for data processing on the Riemannian manifold of SPD matrices, very few solutions explicitly mine the local geometrical information in deep SPD feature representations. Given the great success of local mechanisms in Euclidean methods, we argue that it is of utmost importance to ensure the preservation of local geometric information in the SPD networks. We first analyse the convolution operator commonly used for capturing local information in Euclidean deep networks from the perspective of a higher level of abstraction afforded by category theory. Based on this analysis, we define the local information in the SPD manifold and design a multi-scale submanifold block for mining local geometry. Experiments involving multiple visual tasks validate the effectiveness of our approach.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Ziheng and Xu, Tianyang and Wu, Xiao-Jun and Wang, Rui and Huang, Zhiwu and Kittler, Josef}, year={2023}, month={Jun.}, pages={7104-7112} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25867/25639", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25867", + "pdf_size": 368579, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2061777630772052607&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "163.com;jiangnan.edu.cn;jiangnan.edu.cn;jiangnan.edu.cn;smu.edu.sg;surrey.ac.uk", + "email": "163.com;jiangnan.edu.cn;jiangnan.edu.cn;jiangnan.edu.cn;smu.edu.sg;surrey.ac.uk", + "github": "https://github.com/GitZH-Chen/MSNet.git", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "Jiangnan University;Singapore Management University;University of Surrey", + "aff_unique_dep": "School of Artificial Intelligence and Computer Science;School of Computing and Information Systems;Centre for Vision, Speech and Signal Processing (CVSSP)", + "aff_unique_url": "https://www.jiangnan.edu.cn/;https://www.smu.edu.sg;https://www.surrey.ac.uk", + "aff_unique_abbr": "JNU;SMU;Surrey", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;2", + "aff_country_unique": "China;Singapore;United Kingdom" + }, + { + "id": "article-26888", + "title": "Ripple: Concept-Based Interpretation for Raw Time Series Models in Education", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "Time series is the most prevalent form of input data for educational prediction tasks. The vast majority of research using time series data focuses on hand-crafted features, designed by experts for predictive performance and interpretability. However, extracting these features is labor-intensive for humans and computers. In this paper, we propose an approach that utilizes irregular multivariate time series modeling with graph neural networks to achieve comparable or better accuracy with raw time series clickstreams in comparison to hand-crafted features. Furthermore, we extend concept activation vectors for interpretability in raw time series models. We analyze these advances in the education domain, addressing the task of early student performance prediction for downstream targeted interventions and instructional support. Our experimental analysis on 23 MOOCs with millions of combined interactions over six behavioral dimensions show that models designed with our approach can (i) beat state-of-the-art educational time series baselines with no feature extraction and (ii) provide interpretable insights for personalized interventions.\nSource code: https://github.com/epfl-ml4ed/ripple/.", + "primary_area": "", + "author": "Mohammad Asadi; Vinitra Swamy; Jibril Frej; Julien Vignoud; Mirko Marras; Tanja K\u00e4ser", + "authorids": "", + "aff": "EPFL; EPFL; EPFL; EPFL; University of Cagliari; EPFL", + "bibtex": "@article{Asadi_Swamy_Frej_Vignoud_Marras_K\u00e4ser_2024, title={Ripple: Concept-Based Interpretation for Raw Time Series Models in Education}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26888}, DOI={10.1609/aaai.v37i13.26888}, abstractNote={Time series is the most prevalent form of input data for educational prediction tasks. The vast majority of research using time series data focuses on hand-crafted features, designed by experts for predictive performance and interpretability. However, extracting these features is labor-intensive for humans and computers. In this paper, we propose an approach that utilizes irregular multivariate time series modeling with graph neural networks to achieve comparable or better accuracy with raw time series clickstreams in comparison to hand-crafted features. Furthermore, we extend concept activation vectors for interpretability in raw time series models. We analyze these advances in the education domain, addressing the task of early student performance prediction for downstream targeted interventions and instructional support. Our experimental analysis on 23 MOOCs with millions of combined interactions over six behavioral dimensions show that models designed with our approach can (i) beat state-of-the-art educational time series baselines with no feature extraction and (ii) provide interpretable insights for personalized interventions.\nSource code: https://github.com/epfl-ml4ed/ripple/.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Asadi, Mohammad and Swamy, Vinitra and Frej, Jibril and Vignoud, Julien and Marras, Mirko and K\u00e4ser, Tanja}, year={2024}, month={Jul.}, pages={15903-15911} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26888/26660", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26888", + "pdf_size": 1599767, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11449230775316912804&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;acm.org;epfl.ch", + "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;acm.org;epfl.ch", + "github": "https://github.com/epfl-ml4ed/ripple/", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;University of Cagliari", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.epfl.ch;https://www.unica.it", + "aff_unique_abbr": "EPFL;UNICA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "Switzerland;Italy" + }, + { + "id": "article-26998", + "title": "Risk-Aware Decentralized Safe Control via Dynamic Responsibility Allocation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this work, we present a novel risk-aware decentralized Control Barrier Function (CBF)-based controller for multi-agent systems. The proposed decentralized controller is composed based on pairwise agent responsibility shares (a percentage), calculated from the risk evaluation of each individual agent faces in a multi-agent interaction environment. With our proposed CBF-inspired risk evaluation framework, the responsibility portions between pairwise agents are dynamically updated based on the relative risk they face. Our method allows agents with lower risk to enjoy a higher level of freedom in terms of a wider action space, and the agents exposed to higher risk are constrained more tightly on action spaces, and are therefore forced to proceed with caution.", + "primary_area": "", + "author": "Yiwei Lyu; Wenhao Luo; John M. Dolan", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Carnegie Mellon University, USA; Department of Computer Science, University of North Carolina at Charlotte, USA; Robotics Institute, Carnegie Mellon University, USA", + "bibtex": "@article{Lyu_Luo_Dolan_2024, title={Risk-Aware Decentralized Safe Control via Dynamic Responsibility Allocation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26998}, DOI={10.1609/aaai.v37i13.26998}, abstractNote={In this work, we present a novel risk-aware decentralized Control Barrier Function (CBF)-based controller for multi-agent systems. The proposed decentralized controller is composed based on pairwise agent responsibility shares (a percentage), calculated from the risk evaluation of each individual agent faces in a multi-agent interaction environment. With our proposed CBF-inspired risk evaluation framework, the responsibility portions between pairwise agents are dynamically updated based on the relative risk they face. Our method allows agents with lower risk to enjoy a higher level of freedom in terms of a wider action space, and the agents exposed to higher risk are constrained more tightly on action spaces, and are therefore forced to proceed with caution.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Yiwei and Luo, Wenhao and Dolan, John M.}, year={2024}, month={Jul.}, pages={16276-16277} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26998/26770", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26998", + "pdf_size": 1400258, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:en2aJOX5E3oJ:scholar.google.com/&scioq=Risk-Aware+Decentralized+Safe+Control+via+Dynamic+Responsibility+Allocation+(Student+Abstract)&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "andrew.cmu.edu;uncc.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;uncc.edu;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Carnegie Mellon University;University of North Carolina at Charlotte", + "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Computer Science", + "aff_unique_url": "https://www.cmu.edu;https://www.uncc.edu", + "aff_unique_abbr": "CMU;UNCC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Charlotte", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26775", + "title": "Robust Average-Reward Markov Decision Processes", + "track": "aaai special track", + "status": "Technical", + "abstract": "In robust Markov decision processes (MDPs), the uncertainty in the transition kernel is addressed by finding a policy that optimizes the worst-case performance over an uncertainty set of MDPs. While much of the literature has focused on discounted MDPs, robust average-reward MDPs remain largely unexplored. In this paper, we focus on robust average-reward MDPs, where the goal is to find a policy that optimizes the worst-case average reward over an uncertainty set. We first take an approach that approximates average-reward MDPs using discounted MDPs. We prove that the robust discounted value function converges to the robust average-reward as the discount factor goes to 1, and moreover when it is large, any optimal policy of the robust discounted MDP is also an optimal policy of the robust average-reward. We further design a robust dynamic programming approach, and theoretically characterize its convergence to the optimum. Then, we investigate robust average-reward MDPs directly without using discounted MDPs as an intermediate step. We derive the robust Bellman equation for robust average-reward MDPs, prove that the optimal policy can be derived from its solution, and further design a robust relative value iteration algorithm that provably finds its solution, or equivalently, the optimal robust policy.", + "primary_area": "safe and robust ai", + "author": "Yue Wang; Alvaro Velasquez; George Atia; Ashley Prater-Bennette; Shaofeng Zou", + "authorids": "", + "aff": "University at Buffalo, The State University of New York; University of Colorado Boulder; University of Central Florida; Air Force Research Laboratory; University at Buffalo, The State University of New York", + "bibtex": "@article{Wang_Velasquez_Atia_Prater-Bennette_Zou_2023, title={Robust Average-Reward Markov Decision Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26775}, DOI={10.1609/aaai.v37i12.26775}, abstractNote={In robust Markov decision processes (MDPs), the uncertainty in the transition kernel is addressed by finding a policy that optimizes the worst-case performance over an uncertainty set of MDPs. While much of the literature has focused on discounted MDPs, robust average-reward MDPs remain largely unexplored. In this paper, we focus on robust average-reward MDPs, where the goal is to find a policy that optimizes the worst-case average reward over an uncertainty set. We first take an approach that approximates average-reward MDPs using discounted MDPs. We prove that the robust discounted value function converges to the robust average-reward as the discount factor goes to 1, and moreover when it is large, any optimal policy of the robust discounted MDP is also an optimal policy of the robust average-reward. We further design a robust dynamic programming approach, and theoretically characterize its convergence to the optimum. Then, we investigate robust average-reward MDPs directly without using discounted MDPs as an intermediate step. We derive the robust Bellman equation for robust average-reward MDPs, prove that the optimal policy can be derived from its solution, and further design a robust relative value iteration algorithm that provably finds its solution, or equivalently, the optimal robust policy.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yue and Velasquez, Alvaro and Atia, George and Prater-Bennette, Ashley and Zou, Shaofeng}, year={2023}, month={Jun.}, pages={15215-15223} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26775/26547", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26775", + "pdf_size": 363944, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8892236739425789675&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "buffalo.com;colorado.edu;ucf.edu;us.af.mil;buffalo.edu", + "email": "buffalo.com;colorado.edu;ucf.edu;us.af.mil;buffalo.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "University at Buffalo;University of Colorado;University of Central Florida;Air Force Research Laboratory", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.buffalo.edu;https://www.colorado.edu;https://www.ucf.edu;https://www.afrl.af.mil/", + "aff_unique_abbr": "UB;CU Boulder;UCF;AFRL", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Buffalo;Boulder;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25925", + "title": "Robust Causal Graph Representation Learning against Confounding Effects", + "track": "main", + "status": "Technical", + "abstract": "The prevailing graph neural network models have achieved significant progress in graph representation learning. However, in this paper, we uncover an ever-overlooked phenomenon: the pre-trained graph representation learning model tested with full graphs underperforms the model tested with well-pruned graphs. This observation reveals that there exist confounders in graphs, which may interfere with the model learning semantic information, and current graph representation learning methods have not eliminated their influence. To tackle this issue, we propose Robust Causal Graph Representation Learning (RCGRL) to learn robust graph representations against confounding effects. RCGRL introduces an active approach to generate instrumental variables under unconditional moment restrictions, which empowers the graph representation learning model to eliminate confounders, thereby capturing discriminative information that is causally related to downstream predictions. We offer theorems and proofs to guarantee the theoretical effectiveness of the proposed approach. Empirically, we conduct extensive experiments on a synthetic dataset and multiple benchmark datasets. Experimental results demonstrate the effectiveness and generalization ability of RCGRL. Our codes are available at https://github.com/hang53/RCGRL.", + "primary_area": "machine learning i", + "author": "Hang Gao; Jiangmeng Li; Wenwen Qiang; Lingyu Si; Bing Xu; Changwen Zheng; Fuchun Sun", + "authorids": "", + "aff": "Science and Technology on Integrated Information System Laboratory, Institute of Software Chinese Academy of Sciences+University of Chinese Academy of Sciences; Science and Technology on Integrated Information System Laboratory, Institute of Software Chinese Academy of Sciences+University of Chinese Academy of Sciences; Science and Technology on Integrated Information System Laboratory, Institute of Software Chinese Academy of Sciences+University of Chinese Academy of Sciences; Science and Technology on Integrated Information System Laboratory, Institute of Software Chinese Academy of Sciences+University of Chinese Academy of Sciences; China Communications Technology Information Group Co., Ltd.; Science and Technology on Integrated Information System Laboratory, Institute of Software Chinese Academy of Sciences; Tsinghua University", + "bibtex": "@article{Gao_Li_Qiang_Si_Xu_Zheng_Sun_2023, title={Robust Causal Graph Representation Learning against Confounding Effects}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25925}, DOI={10.1609/aaai.v37i6.25925}, abstractNote={The prevailing graph neural network models have achieved significant progress in graph representation learning. However, in this paper, we uncover an ever-overlooked phenomenon: the pre-trained graph representation learning model tested with full graphs underperforms the model tested with well-pruned graphs. This observation reveals that there exist confounders in graphs, which may interfere with the model learning semantic information, and current graph representation learning methods have not eliminated their influence. To tackle this issue, we propose Robust Causal Graph Representation Learning (RCGRL) to learn robust graph representations against confounding effects. RCGRL introduces an active approach to generate instrumental variables under unconditional moment restrictions, which empowers the graph representation learning model to eliminate confounders, thereby capturing discriminative information that is causally related to downstream predictions. We offer theorems and proofs to guarantee the theoretical effectiveness of the proposed approach. Empirically, we conduct extensive experiments on a synthetic dataset and multiple benchmark datasets. Experimental results demonstrate the effectiveness and generalization ability of RCGRL. Our codes are available at https://github.com/hang53/RCGRL.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Hang and Li, Jiangmeng and Qiang, Wenwen and Si, Lingyu and Xu, Bing and Zheng, Changwen and Sun, Fuchun}, year={2023}, month={Jun.}, pages={7624-7632} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25925/25697", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25925", + "pdf_size": 850203, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14906528850810111284&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;ccccltd.cn;mail.tsinghua.edu.cn", + "email": "iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;ccccltd.cn;mail.tsinghua.edu.cn", + "github": "https://github.com/hang53/RCGRL", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;0;3", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;China Communications Technology Information Group;Tsinghua University", + "aff_unique_dep": "Institute of Software;;;", + "aff_unique_url": "http://www.ios.ac.cn;http://www.ucas.ac.cn;;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "CAS;UCAS;CCTIG;THU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25974", + "title": "Robust Domain Adaptation for Machine Reading Comprehension", + "track": "main", + "status": "Technical", + "abstract": "Most domain adaptation methods for machine reading comprehension (MRC) use a pre-trained question-answer (QA) construction model to generate pseudo QA pairs for MRC transfer. Such a process will inevitably introduce mismatched pairs (i.e., Noisy Correspondence) due to i) the unavailable QA pairs in target documents, and ii) the domain shift during applying the QA construction model to the target domain. Undoubtedly, the noisy correspondence will degenerate the performance of MRC, which however is neglected by existing works. To solve such an untouched problem, we propose to construct QA pairs by additionally using the dialogue related to the documents, as well as a new domain adaptation method for MRC. Specifically, we propose Robust Domain Adaptation for Machine Reading Comprehension (RMRC) method which consists of an answer extractor (AE), a question selector (QS), and an MRC model. Specifically, RMRC filters out the irrelevant answers by estimating the correlation to the document via the AE, and extracts the questions by fusing the candidate questions in multiple rounds of dialogue chats via the QS. With the extracted QA pairs, MRC is fine-tuned and provides the feedback to optimize the QS through a novel reinforced self-training method. Thanks to the optimization of the QS, our method will greatly alleviate the noisy correspondence problem caused by the domain shift. To the best of our knowledge, this could be the first study to reveal the influence of noisy correspondence in domain adaptation MRC models and show a feasible solution to achieve the robustness against the mismatched pairs. Extensive experiments on three datasets demonstrate the effectiveness of our method.", + "primary_area": "machine learning ii", + "author": "Liang Jiang; Zhenyu Huang; Jia Liu; Zujie Wen; Xi Peng", + "authorids": "", + "aff": "Ant Group; College of Computer Science, Sichuan University; Ant Group; Ant Group; College of Computer Science, Sichuan University", + "bibtex": "@article{Jiang_Huang_Liu_Wen_Peng_2023, title={Robust Domain Adaptation for Machine Reading Comprehension}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25974}, DOI={10.1609/aaai.v37i7.25974}, abstractNote={Most domain adaptation methods for machine reading comprehension (MRC) use a pre-trained question-answer (QA) construction model to generate pseudo QA pairs for MRC transfer. Such a process will inevitably introduce mismatched pairs (i.e., Noisy Correspondence) due to i) the unavailable QA pairs in target documents, and ii) the domain shift during applying the QA construction model to the target domain. Undoubtedly, the noisy correspondence will degenerate the performance of MRC, which however is neglected by existing works. To solve such an untouched problem, we propose to construct QA pairs by additionally using the dialogue related to the documents, as well as a new domain adaptation method for MRC. Specifically, we propose Robust Domain Adaptation for Machine Reading Comprehension (RMRC) method which consists of an answer extractor (AE), a question selector (QS), and an MRC model. Specifically, RMRC filters out the irrelevant answers by estimating the correlation to the document via the AE, and extracts the questions by fusing the candidate questions in multiple rounds of dialogue chats via the QS. With the extracted QA pairs, MRC is fine-tuned and provides the feedback to optimize the QS through a novel reinforced self-training method. Thanks to the optimization of the QS, our method will greatly alleviate the noisy correspondence problem caused by the domain shift. To the best of our knowledge, this could be the first study to reveal the influence of noisy correspondence in domain adaptation MRC models and show a feasible solution to achieve the robustness against the mismatched pairs. Extensive experiments on three datasets demonstrate the effectiveness of our method.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Liang and Huang, Zhenyu and Liu, Jia and Wen, Zujie and Peng, Xi}, year={2023}, month={Jun.}, pages={8060-8069} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25974/25746", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25974", + "pdf_size": 1029938, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2303176094423586631&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "antgroup.com;gmail.com;antgroup.com;antgroup.com;gmail.com", + "email": "antgroup.com;gmail.com;antgroup.com;antgroup.com;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "Ant Group;Sichuan University", + "aff_unique_dep": ";College of Computer Science", + "aff_unique_url": "https://www.antgroup.com;https://www.scu.edu.cn", + "aff_unique_abbr": "Ant Group;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25492", + "title": "Robust Feature Rectification of Pretrained Vision Models for Object Recognition", + "track": "main", + "status": "Technical", + "abstract": "Pretrained vision models for object recognition often suffer a dramatic performance drop with degradations unseen during training. In this work, we propose a RObust FEature Rectification module (ROFER) to improve the performance of pretrained models against degradations. Specifically, ROFER first estimates the type and intensity of the degradation that corrupts the image features. Then, it leverages a Fully Convolutional Network (FCN) to rectify the features from the degradation by pulling them back to clear features. ROFER is a general-purpose module that can address various degradations simultaneously, including blur, noise, and low contrast. Besides, it can be plugged into pretrained models seamlessly to rectify the degraded features without retraining the whole model. Furthermore, ROFER can be easily extended to address composite degradations by adopting a beam search algorithm to find the composition order. Evaluations on CIFAR-10 and Tiny-ImageNet demonstrate that the accuracy of ROFER is 5% higher than that of SOTA methods on different degradations. With respect to composite degradations, ROFER improves the accuracy of a pretrained CNN by 10% and 6% on CIFAR-10 and Tiny-ImageNet respectively.", + "primary_area": "computer vision iii", + "author": "Shengchao Zhou; Gaofeng Meng; Zhaoxiang Zhang; Richard Yi Da Xu; Shiming Xiang", + "authorids": "", + "aff": "NLPR, Institute of Automation, Chinese Academy of Sciences + School of Arti\ufb01cial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Arti\ufb01cial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Arti\ufb01cial Intelligence, University of Chinese Academy of Sciences + CAIR, HK Institute of Science and Innovation, Chinese Academy of Sciences; FSC1209, Kowloon Tong Campus, Hong Kong Baptist University; NLPR, Institute of Automation, Chinese Academy of Sciences + School of Arti\ufb01cial Intelligence, University of Chinese Academy of Sciences", + "bibtex": "@article{Zhou_Meng_Zhang_Xu_Xiang_2023, title={Robust Feature Rectification of Pretrained Vision Models for Object Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25492}, DOI={10.1609/aaai.v37i3.25492}, abstractNote={Pretrained vision models for object recognition often suffer a dramatic performance drop with degradations unseen during training. In this work, we propose a RObust FEature Rectification module (ROFER) to improve the performance of pretrained models against degradations. Specifically, ROFER first estimates the type and intensity of the degradation that corrupts the image features. Then, it leverages a Fully Convolutional Network (FCN) to rectify the features from the degradation by pulling them back to clear features. ROFER is a general-purpose module that can address various degradations simultaneously, including blur, noise, and low contrast. Besides, it can be plugged into pretrained models seamlessly to rectify the degraded features without retraining the whole model. Furthermore, ROFER can be easily extended to address composite degradations by adopting a beam search algorithm to find the composition order. Evaluations on CIFAR-10 and Tiny-ImageNet demonstrate that the accuracy of ROFER is 5% higher than that of SOTA methods on different degradations. With respect to composite degradations, ROFER improves the accuracy of a pretrained CNN by 10% and 6% on CIFAR-10 and Tiny-ImageNet respectively.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Shengchao and Meng, Gaofeng and Zhang, Zhaoxiang and Xu, Richard Yi Da and Xiang, Shiming}, year={2023}, month={Jun.}, pages={3796-3804} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25492/25264", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25492", + "pdf_size": 1082745, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:LJNISNwT-_sJ:scholar.google.com/&scioq=Robust+Feature+Rectification+of+Pretrained+Vision+Models+for+Object+Recognition&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;hkbu.edu.hk;nlpr.ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;hkbu.edu.hk;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+0;0+1+0;0+1+0;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Hong Kong Baptist University", + "aff_unique_dep": "Institute of Automation;School of Arti\ufb01cial Intelligence;", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;https://www.hkbu.edu.hk", + "aff_unique_abbr": "CAS;UCAS;HKBU", + "aff_campus_unique_index": "1;1;1;2;", + "aff_campus_unique": ";Hong Kong;Kowloon Tong", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26776", + "title": "Robust Graph Meta-Learning via Manifold Calibration with Proxy Subgraphs", + "track": "aaai special track", + "status": "Technical", + "abstract": "Graph meta-learning has become a preferable paradigm for graph-based node classification with long-tail distribution, owing to its capability of capturing the intrinsic manifold of support and query nodes. Despite the remarkable success, graph meta-learning suffers from severe performance degradation when training on graph data with structural noise. In this work, we observe that the structural noise may impair the smoothness of the intrinsic manifold supporting the support and query nodes, leading to the poor transferable priori of the meta-learner. To address the issue, we propose a new approach for graph meta-learning that is robust against structural noise, called Proxy subgraph-based Manifold Calibration method (Pro-MC). Concretely, a subgraph generator is designed to generate proxy subgraphs that can calibrate the smoothness of the manifold. The proxy subgraph compromises two types of subgraphs with two biases, thus preventing the manifold from being rugged and straightforward. By doing so, our proposed meta-learner can obtain generalizable and transferable prior knowledge. In addition, we provide a theoretical analysis to illustrate the effectiveness of Pro-MC. Experimental results have demonstrated that our approach can achieve state-of-the-art performance under various structural noises.", + "primary_area": "safe and robust ai", + "author": "Zhenzhong Wang; Lulu Cao; Wanyu Lin; Min Jiang; Kay Chen Tan", + "authorids": "", + "aff": "Department of Computing, The Hong Kong Polytechnic University; School of Informatics, Xiamen University; Department of Computing, The Hong Kong Polytechnic University; School of Informatics, Xiamen University; Department of Computing, The Hong Kong Polytechnic University", + "bibtex": "@article{Wang_Cao_Lin_Jiang_Tan_2023, title={Robust Graph Meta-Learning via Manifold Calibration with Proxy Subgraphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26776}, DOI={10.1609/aaai.v37i12.26776}, abstractNote={Graph meta-learning has become a preferable paradigm for graph-based node classification with long-tail distribution, owing to its capability of capturing the intrinsic manifold of support and query nodes. Despite the remarkable success, graph meta-learning suffers from severe performance degradation when training on graph data with structural noise. In this work, we observe that the structural noise may impair the smoothness of the intrinsic manifold supporting the support and query nodes, leading to the poor transferable priori of the meta-learner. To address the issue, we propose a new approach for graph meta-learning that is robust against structural noise, called Proxy subgraph-based Manifold Calibration method (Pro-MC). Concretely, a subgraph generator is designed to generate proxy subgraphs that can calibrate the smoothness of the manifold. The proxy subgraph compromises two types of subgraphs with two biases, thus preventing the manifold from being rugged and straightforward. By doing so, our proposed meta-learner can obtain generalizable and transferable prior knowledge. In addition, we provide a theoretical analysis to illustrate the effectiveness of Pro-MC. Experimental results have demonstrated that our approach can achieve state-of-the-art performance under various structural noises.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zhenzhong and Cao, Lulu and Lin, Wanyu and Jiang, Min and Tan, Kay Chen}, year={2023}, month={Jun.}, pages={15224-15232} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26776/26548", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26776", + "pdf_size": 371775, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5413946091069088999&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "connect.polyu.hk;stu.xmu.edu.cn;polyu.hk;xmu.edu.cn;polyu.hk", + "email": "connect.polyu.hk;stu.xmu.edu.cn;polyu.hk;xmu.edu.cn;polyu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;0", + "aff_unique_norm": "The Hong Kong Polytechnic University;Xiamen University", + "aff_unique_dep": "Department of Computing;School of Informatics", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.xmu.edu.cn", + "aff_unique_abbr": "PolyU;XMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hong Kong;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25291", + "title": "Robust Image Denoising of No-Flash Images Guided by Consistent Flash Images", + "track": "main", + "status": "Technical", + "abstract": "Images taken in low light conditions typically contain distracting noise, and eliminating such noise is a crucial computer vision problem. Additional photos captured with a camera flash can guide an image denoiser to preserve edges since the flash images often contain fine details with reduced noise. Nonetheless, a denoiser can be misled by inconsistent flash images, which have image structures (e.g., edges) that do not exist in no-flash images. Unfortunately, this disparity frequently occurs as the flash/no-flash pairs are taken in different light conditions. We propose a learning-based technique that robustly fuses the image pairs while considering their inconsistency. Our framework infers consistent flash image patches locally, which have similar image structures with the ground truth, and denoises no-flash images using the inferred ones via a combination model. We demonstrate that our technique can produce more robust results than state-of-the-art methods, given various flash/no-flash pairs with inconsistent image structures. The source code is available at https://github.com/CGLab-GIST/RIDFnF.", + "primary_area": "computer vision ii", + "author": "Geunwoo Oh; Jonghee Back; Jae-Pil Heo; Bochang Moon", + "authorids": "", + "aff": "Gwangju Institute of Science and Technology, South Korea; Gwangju Institute of Science and Technology, South Korea; Sungkyunkwan University, South Korea; Gwangju Institute of Science and Technology, South Korea", + "bibtex": "@article{Oh_Back_Heo_Moon_2023, title={Robust Image Denoising of No-Flash Images Guided by Consistent Flash Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25291}, DOI={10.1609/aaai.v37i2.25291}, abstractNote={Images taken in low light conditions typically contain distracting noise, and eliminating such noise is a crucial computer vision problem. Additional photos captured with a camera flash can guide an image denoiser to preserve edges since the flash images often contain fine details with reduced noise. Nonetheless, a denoiser can be misled by inconsistent flash images, which have image structures (e.g., edges) that do not exist in no-flash images. Unfortunately, this disparity frequently occurs as the flash/no-flash pairs are taken in different light conditions. We propose a learning-based technique that robustly fuses the image pairs while considering their inconsistency. Our framework infers consistent flash image patches locally, which have similar image structures with the ground truth, and denoises no-flash images using the inferred ones via a combination model. We demonstrate that our technique can produce more robust results than state-of-the-art methods, given various flash/no-flash pairs with inconsistent image structures. The source code is available at https://github.com/CGLab-GIST/RIDFnF.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Oh, Geunwoo and Back, Jonghee and Heo, Jae-Pil and Moon, Bochang}, year={2023}, month={Jun.}, pages={1993-2001} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25291/25063", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25291", + "pdf_size": 16263494, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16223896260033929275&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "gm.gist.ac.kr;gm.gist.ac.kr;skku.edu;gist.ac.kr", + "email": "gm.gist.ac.kr;gm.gist.ac.kr;skku.edu;gist.ac.kr", + "github": "https://github.com/CGLab-GIST/RIDFnF", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Gwangju Institute of Science and Technology;Sungkyunkwan University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.gist.ac.kr;https://www.skku.edu", + "aff_unique_abbr": "GIST;SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26746", + "title": "Robust Image Steganography: Hiding Messages in Frequency Coefficients", + "track": "aaai special track", + "status": "Technical", + "abstract": "Steganography is a technique that hides secret messages into a public multimedia object without raising suspicion from third parties. However, most existing works cannot provide good robustness against lossy JPEG compression while maintaining a relatively large embedding capacity. This paper presents an end-to-end robust steganography system based on the invertible neural network (INN). Instead of hiding in the spatial domain, our method directly hides secret messages into the discrete cosine transform (DCT) coefficients of the cover image, which significantly improves the robustness and anti-steganalysis security. A mutual information loss is first proposed to constrain the flow of information in INN. Besides, a two-way fusion module (TWFM) is implemented, utilizing spatial and DCT domain features as auxiliary information to facilitate message extraction. These two designs aid in recovering secret messages from the DCT coefficients losslessly. Experimental results demonstrate that our method yields significantly lower error rates than other existing hiding methods. For example, our method achieves reliable extraction with 0 error rate for 1 bit per pixel (bpp) embedding payload; and under the JPEG compression with quality factor QF=10, the error rate of our method is about 22% lower than the state-of-the-art robust image hiding methods, which demonstrates remarkable robustness against JPEG compression.", + "primary_area": "safe and robust ai", + "author": "Yuhang Lan; Fei Shang; Jianhua Yang; Xiangui Kang; Enping Li", + "authorids": "", + "aff": "Guangdong Key Laboratory of Information Security Technology, School of Computer Science and Engineering, Sun Yat-Sen University, Guangzhou, China; Guangdong Key Laboratory of Information Security Technology, School of Computer Science and Engineering, Sun Yat-Sen University, Guangzhou, China; Guangdong Polytechnic Normal University, Guangzhou, China; Guangdong Key Laboratory of Information Security Technology, School of Computer Science and Engineering, Sun Yat-Sen University, Guangzhou, China; Computer Science Department, Bridgewater State University, Massachusetts, USA", + "bibtex": "@article{Lan_Shang_Yang_Kang_Li_2023, title={Robust Image Steganography: Hiding Messages in Frequency Coefficients}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26746}, DOI={10.1609/aaai.v37i12.26746}, abstractNote={Steganography is a technique that hides secret messages into a public multimedia object without raising suspicion from third parties. However, most existing works cannot provide good robustness against lossy JPEG compression while maintaining a relatively large embedding capacity. This paper presents an end-to-end robust steganography system based on the invertible neural network (INN). Instead of hiding in the spatial domain, our method directly hides secret messages into the discrete cosine transform (DCT) coefficients of the cover image, which significantly improves the robustness and anti-steganalysis security. A mutual information loss is first proposed to constrain the flow of information in INN. Besides, a two-way fusion module (TWFM) is implemented, utilizing spatial and DCT domain features as auxiliary information to facilitate message extraction. These two designs aid in recovering secret messages from the DCT coefficients losslessly. Experimental results demonstrate that our method yields significantly lower error rates than other existing hiding methods. For example, our method achieves reliable extraction with 0 error rate for 1 bit per pixel (bpp) embedding payload; and under the JPEG compression with quality factor QF=10, the error rate of our method is about 22% lower than the state-of-the-art robust image hiding methods, which demonstrates remarkable robustness against JPEG compression.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Yuhang and Shang, Fei and Yang, Jianhua and Kang, Xiangui and Li, Enping}, year={2023}, month={Jun.}, pages={14955-14963} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26746/26518", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26746", + "pdf_size": 2673025, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7914379428678581901&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;gpnu.edu.cn;mail.sysu.edu.cn;bridgew.edu", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;gpnu.edu.cn;mail.sysu.edu.cn;bridgew.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Sun Yat-Sen University;Guangdong Polytechnic Normal University;Bridgewater State University", + "aff_unique_dep": "School of Computer Science and Engineering;;Computer Science Department", + "aff_unique_url": "http://www.sysu.edu.cn;;https://www.bridgew.edu", + "aff_unique_abbr": "SYSU;;BSU", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Guangzhou;Massachusetts", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26388", + "title": "Robust Multi-Agent Coordination via Evolutionary Generation of Auxiliary Adversarial Attackers", + "track": "main", + "status": "Technical", + "abstract": "Cooperative Multi-agent Reinforcement Learning (CMARL) has shown to be promising for many real-world applications. Previous works mainly focus on improving coordination ability via solving MARL-specific challenges (e.g., non-stationarity, credit assignment, scalability), but ignore the policy perturbation issue when testing in a different environment. This issue hasn't been considered in problem formulation or efficient algorithm design. To address this issue, we firstly model the problem as a Limited Policy Adversary Dec-POMDP (LPA-Dec-POMDP), where some coordinators from a team might accidentally and unpredictably encounter a limited number of malicious action attacks, but the regular coordinators still strive for the intended goal. Then, we propose Robust Multi-Agent Coordination via Evolutionary Generation of Auxiliary Adversarial Attackers (ROMANCE), which enables the trained policy to encounter diversified and strong auxiliary adversarial attacks during training, thus achieving high robustness under various \npolicy perturbations. Concretely, to avoid the ego-system overfitting to a specific attacker, we maintain a set of attackers, which is optimized to guarantee the attackers high attacking quality and behavior diversity. The goal of quality is to minimize the ego-system coordination effect, and a novel diversity regularizer based on sparse action is applied to diversify the behaviors among attackers. The ego-system is then paired with a population of attackers selected from the maintained attacker set, and alternately trained against the constantly evolving attackers. Extensive experiments on multiple scenarios from SMAC indicate our ROMANCE provides comparable or better robustness and generalization ability than other baselines.", + "primary_area": "multiagent systems", + "author": "Lei Yuan; Ziqian Zhang; Ke Xue; Hao Yin; Feng Chen; Cong Guan; Lihe Li; Chao Qian; Yang Yu", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + Polixir Technologies, Nanjing 210000, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + Polixir Technologies, Nanjing 210000, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China + Polixir Technologies, Nanjing 210000, China", + "bibtex": "@article{Yuan_Zhang_Xue_Yin_Chen_Guan_Li_Qian_Yu_2023, title={Robust Multi-Agent Coordination via Evolutionary Generation of Auxiliary Adversarial Attackers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26388}, DOI={10.1609/aaai.v37i10.26388}, abstractNote={Cooperative Multi-agent Reinforcement Learning (CMARL) has shown to be promising for many real-world applications. Previous works mainly focus on improving coordination ability via solving MARL-specific challenges (e.g., non-stationarity, credit assignment, scalability), but ignore the policy perturbation issue when testing in a different environment. This issue hasn\u2019t been considered in problem formulation or efficient algorithm design. To address this issue, we firstly model the problem as a Limited Policy Adversary Dec-POMDP (LPA-Dec-POMDP), where some coordinators from a team might accidentally and unpredictably encounter a limited number of malicious action attacks, but the regular coordinators still strive for the intended goal. Then, we propose Robust Multi-Agent Coordination via Evolutionary Generation of Auxiliary Adversarial Attackers (ROMANCE), which enables the trained policy to encounter diversified and strong auxiliary adversarial attacks during training, thus achieving high robustness under various policy perturbations. Concretely, to avoid the ego-system overfitting to a specific attacker, we maintain a set of attackers, which is optimized to guarantee the attackers high attacking quality and behavior diversity. The goal of quality is to minimize the ego-system coordination effect, and a novel diversity regularizer based on sparse action is applied to diversify the behaviors among attackers. The ego-system is then paired with a population of attackers selected from the maintained attacker set, and alternately trained against the constantly evolving attackers. Extensive experiments on multiple scenarios from SMAC indicate our ROMANCE provides comparable or better robustness and generalization ability than other baselines.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Lei and Zhang, Ziqian and Xue, Ke and Yin, Hao and Chen, Feng and Guan, Cong and Li, Lihe and Qian, Chao and Yu, Yang}, year={2023}, month={Jun.}, pages={11753-11762} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26388/26160", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26388", + "pdf_size": 1096115, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13023670729839832675&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0+1;0;0;0;0;0;0;0+1", + "aff_unique_norm": "Nanjing University;Polixir Technologies", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;", + "aff_unique_abbr": "Nanjing U;", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_campus_unique": "Nanjing;", + "aff_country_unique_index": "0+0;0+0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26408", + "title": "Robust Neuro-Symbolic Goal and Plan Recognition", + "track": "main", + "status": "Technical", + "abstract": "Goal Recognition is the task of discerning the intended goal of an agent given a sequence of observations, whereas Plan Recognition consists of identifying the plan to achieve such intended goal. Regardless of the underlying techniques, most recognition approaches are directly affected by the quality of the available observations. In this paper, we develop neuro-symbolic recognition approaches that can combine learning and planning techniques, compensating for noise and missing observations using prior data. We evaluate our approaches in standard human-designed planning domains as well as domain models automatically learned from real-world data. Empirical experimentation shows that our approaches reliably infer goals and compute correct plans in the experimental datasets. An ablation study shows that outperform approaches that rely exclusively on the domain model, or exclusively on machine learning in problems with both noisy observations and low observability.", + "primary_area": "planning routing and scheduling", + "author": "Leonardo Amado; Ramon Fraga Pereira; Felipe Meneguzzi", + "authorids": "", + "aff": "Pontifical Catholic University of Rio Grande do Sul, Brazil; University of Manchester, England, UK + Sapienza University of Rome, Italy; University of Aberdeen, Scotland, UK + Pontifical Catholic University of Rio Grande do Sul, Brazil", + "bibtex": "@article{Amado_Fraga Pereira_Meneguzzi_2023, title={Robust Neuro-Symbolic Goal and Plan Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26408}, DOI={10.1609/aaai.v37i10.26408}, abstractNote={Goal Recognition is the task of discerning the intended goal of an agent given a sequence of observations, whereas Plan Recognition consists of identifying the plan to achieve such intended goal. Regardless of the underlying techniques, most recognition approaches are directly affected by the quality of the available observations. In this paper, we develop neuro-symbolic recognition approaches that can combine learning and planning techniques, compensating for noise and missing observations using prior data. We evaluate our approaches in standard human-designed planning domains as well as domain models automatically learned from real-world data. Empirical experimentation shows that our approaches reliably infer goals and compute correct plans in the experimental datasets. An ablation study shows that outperform approaches that rely exclusively on the domain model, or exclusively on machine learning in problems with both noisy observations and low observability.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Amado, Leonardo and Fraga Pereira, Ramon and Meneguzzi, Felipe}, year={2023}, month={Jun.}, pages={11937-11944} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26408/26180", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26408", + "pdf_size": 671492, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7098744595081921550&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "edu.pucrs.br;manchester.ac.uk;abdn.ac.uk", + "email": "edu.pucrs.br;manchester.ac.uk;abdn.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;3+0", + "aff_unique_norm": "Pontifical Catholic University of Rio Grande do Sul;University of Manchester;Sapienza University of Rome;University of Aberdeen", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.pucrs.br;https://www.manchester.ac.uk;https://www.uniroma1.it;https://www.abdn.ac.uk", + "aff_unique_abbr": "PUCRS;UoM;Sapienza;Aberdeen", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+2;1+0", + "aff_country_unique": "Brazil;United Kingdom;Italy" + }, + { + "id": "article-25276", + "title": "Robust One-Shot Segmentation of Brain Tissues via Image-Aligned Style Transformation", + "track": "main", + "status": "Technical", + "abstract": "One-shot segmentation of brain tissues is typically a dual-model iterative learning: a registration model (reg-model) warps a carefully-labeled atlas onto unlabeled images to initialize their pseudo masks for training a segmentation model (seg-model); the seg-model revises the pseudo masks to enhance the reg-model for a better warping in the next iteration. However, there is a key weakness in such dual-model iteration that the spatial misalignment inevitably caused by the reg-model could misguide the seg-model, which makes it converge on an inferior segmentation performance eventually. In this paper, we propose a novel image-aligned style transformation to reinforce the dual-model iterative learning for robust one-shot segmentation of brain tissues. Specifically, we first utilize the reg-model to warp the atlas onto an unlabeled image, and then employ the Fourier-based amplitude exchange with perturbation to transplant the style of the unlabeled image into the aligned atlas. This allows the subsequent seg-model to learn on the aligned and style-transferred copies of the atlas instead of unlabeled images, which naturally guarantees the correct spatial correspondence of an image-mask training pair, without sacrificing the diversity of intensity patterns carried by the unlabeled images. Furthermore, we introduce a feature-aware content consistency in addition to the image-level similarity to constrain the reg-model for a promising initialization, which avoids the collapse of image-aligned style transformation in the first iteration. Experimental results on two public datasets demonstrate 1) a competitive segmentation performance of our method compared to the fully-supervised method, and 2) a superior performance over other state-of-the-art with an increase of average Dice by up to 4.67%. The source code is available at: https://github.com/JinxLv/One-shot-segmentation-via-IST.", + "primary_area": "computer vision ii", + "author": "Jinxin Lv; Xiaoyu Zeng; Sheng Wang; Ran Duan; Zhiwei Wang; Qiang Li", + "authorids": "", + "aff": "Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China; Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China; Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China; Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China; Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China; Britton Chance Center for Biomedical Photonics, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan, China+MoE Key Laboratory for Biomedical Photonics, Collaborative Innovation Center for Biomedical Engineering, School of Engineering Sciences, Huazhong University of Science and Technology, Wuhan, China", + "bibtex": "@article{Lv_Zeng_Wang_Duan_Wang_Li_2023, title={Robust One-Shot Segmentation of Brain Tissues via Image-Aligned Style Transformation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25276}, DOI={10.1609/aaai.v37i2.25276}, abstractNote={One-shot segmentation of brain tissues is typically a dual-model iterative learning: a registration model (reg-model) warps a carefully-labeled atlas onto unlabeled images to initialize their pseudo masks for training a segmentation model (seg-model); the seg-model revises the pseudo masks to enhance the reg-model for a better warping in the next iteration. However, there is a key weakness in such dual-model iteration that the spatial misalignment inevitably caused by the reg-model could misguide the seg-model, which makes it converge on an inferior segmentation performance eventually. In this paper, we propose a novel image-aligned style transformation to reinforce the dual-model iterative learning for robust one-shot segmentation of brain tissues. Specifically, we first utilize the reg-model to warp the atlas onto an unlabeled image, and then employ the Fourier-based amplitude exchange with perturbation to transplant the style of the unlabeled image into the aligned atlas. This allows the subsequent seg-model to learn on the aligned and style-transferred copies of the atlas instead of unlabeled images, which naturally guarantees the correct spatial correspondence of an image-mask training pair, without sacrificing the diversity of intensity patterns carried by the unlabeled images. Furthermore, we introduce a feature-aware content consistency in addition to the image-level similarity to constrain the reg-model for a promising initialization, which avoids the collapse of image-aligned style transformation in the first iteration. Experimental results on two public datasets demonstrate 1) a competitive segmentation performance of our method compared to the fully-supervised method, and 2) a superior performance over other state-of-the-art with an increase of average Dice by up to 4.67%. The source code is available at: https://github.com/JinxLv/One-shot-segmentation-via-IST.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lv, Jinxin and Zeng, Xiaoyu and Wang, Sheng and Duan, Ran and Wang, Zhiwei and Li, Qiang}, year={2023}, month={Jun.}, pages={1861-1869} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25276/25048", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25276", + "pdf_size": 3321975, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8526849444772887511&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/JinxLv/One-shot-segmentation-via-IST", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "Britton Chance Center for Biomedical Photonics", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26672", + "title": "Robust Planning over Restless Groups: Engagement Interventions for a Large-Scale Maternal Telehealth Program", + "track": "aaai special track", + "status": "Technical", + "abstract": "In 2020, maternal mortality in India was estimated to be as high as 130 deaths per 100K live births, nearly twice the UN's target. To improve health outcomes, the non-profit ARMMAN sends automated voice messages to expecting and new mothers across India. However, 38% of mothers stop listening to these calls, missing critical preventative care information. To improve engagement, ARMMAN employs health workers to intervene by making service calls, but workers can only call a fraction of the 100K enrolled mothers. Partnering with ARMMAN, we model the problem of allocating limited interventions across mothers as a restless multi-armed bandit (RMAB), where the realities of large scale and model uncertainty present key new technical challenges. We address these with GROUPS, a double oracle\u2013based algorithm for robust planning in RMABs with scalable grouped arms. Robustness over grouped arms requires several methodological advances. First, to adversarially select stochastic group dynamics, we develop a new method to optimize Whittle indices over transition probability intervals. Second, to learn group-level RMAB policy best responses to these adversarial environments, we introduce a weighted index heuristic. Third, we prove a key theoretical result that planning over grouped arms achieves the same minimax regret--optimal strategy as planning over individual arms, under a technical condition. Finally, using real-world data from ARMMAN, we show that GROUPS produces robust policies that reduce minimax regret by up to 50%, halving the number of preventable missed voice messages to connect more mothers with life-saving maternal health information.", + "primary_area": "ai for social impact", + "author": "Jackson A. Killian; Arpita Biswas; Lily Xu; Shresth Verma; Vineet Nair; Aparna Taneja; Aparna Hegde; Neha Madhiwalla; Paula Rodriguez Diaz; Sonja Johnson-Yu; Milind Tambe", + "authorids": "", + "aff": "Harvard University; Harvard University; Harvard University; Google Research + ARMMAN; Google Research; Google Research; ARMMAN; ARMMAN; Harvard University; Harvard University; Google Research + ARMMAN", + "bibtex": "@article{Killian_Biswas_Xu_Verma_Nair_Taneja_Hegde_Madhiwalla_Rodriguez Diaz_Johnson-Yu_Tambe_2023, title={Robust Planning over Restless Groups: Engagement Interventions for a Large-Scale Maternal Telehealth Program}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26672}, DOI={10.1609/aaai.v37i12.26672}, abstractNote={In 2020, maternal mortality in India was estimated to be as high as 130 deaths per 100K live births, nearly twice the UN\u2019s target. To improve health outcomes, the non-profit ARMMAN sends automated voice messages to expecting and new mothers across India. However, 38% of mothers stop listening to these calls, missing critical preventative care information. To improve engagement, ARMMAN employs health workers to intervene by making service calls, but workers can only call a fraction of the 100K enrolled mothers. Partnering with ARMMAN, we model the problem of allocating limited interventions across mothers as a restless multi-armed bandit (RMAB), where the realities of large scale and model uncertainty present key new technical challenges. We address these with GROUPS, a double oracle\u2013based algorithm for robust planning in RMABs with scalable grouped arms. Robustness over grouped arms requires several methodological advances. First, to adversarially select stochastic group dynamics, we develop a new method to optimize Whittle indices over transition probability intervals. Second, to learn group-level RMAB policy best responses to these adversarial environments, we introduce a weighted index heuristic. Third, we prove a key theoretical result that planning over grouped arms achieves the same minimax regret--optimal strategy as planning over individual arms, under a technical condition. Finally, using real-world data from ARMMAN, we show that GROUPS produces robust policies that reduce minimax regret by up to 50%, halving the number of preventable missed voice messages to connect more mothers with life-saving maternal health information.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Killian, Jackson A. and Biswas, Arpita and Xu, Lily and Verma, Shresth and Nair, Vineet and Taneja, Aparna and Hegde, Aparna and Madhiwalla, Neha and Rodriguez Diaz, Paula and Johnson-Yu, Sonja and Tambe, Milind}, year={2023}, month={Jun.}, pages={14295-14303} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26672/26444", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26672", + "pdf_size": 1047547, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1432209876220804494&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "g.harvard.edu;g.harvard.edu;g.harvard.edu;google.com;google.com;google.com;armman.org;armman.org;g.harvard.edu;g.harvard.edu;google.com", + "email": "g.harvard.edu;g.harvard.edu;g.harvard.edu;google.com;google.com;google.com;armman.org;armman.org;g.harvard.edu;g.harvard.edu;google.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;1+2;1;1;2;2;0;0;1+2", + "aff_unique_norm": "Harvard University;Google;ARMMAN", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.harvard.edu;https://research.google;", + "aff_unique_abbr": "Harvard;Google Research;", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "article-26063", + "title": "Robust Representation Learning by Clustering with Bisimulation Metrics for Visual Reinforcement Learning with Distractions", + "track": "main", + "status": "Technical", + "abstract": "Recent work has shown that representation learning plays a critical role in sample-efficient reinforcement learning (RL) from pixels. Unfortunately, in real-world scenarios, representation learning is usually fragile to task-irrelevant distractions such as variations in background or viewpoint. To tackle this problem, we propose a novel clustering-based approach, namely Clustering with Bisimulation Metrics (CBM), which learns robust representations by grouping visual observations in the latent space. Specifically, CBM alternates between two steps: (1) grouping observations by measuring their bisimulation distances to the learned prototypes; (2) learning a set of prototypes according to the current cluster assignments. Computing cluster assignments with bisimulation metrics enables CBM to capture task-relevant information, as bisimulation metrics quantify the behavioral similarity between observations. Moreover, CBM encourages the consistency of representations within each group, which facilitates filtering out task-irrelevant information and thus induces robust representations against distractions. An appealing feature is that CBM can achieve sample-efficient representation learning even if multiple distractions exist simultaneously. Experiments demonstrate that CBM significantly improves the sample efficiency of popular visual RL algorithms and achieves state-of-the-art performance on both multiple and single distraction settings. The code is available at https://github.com/MIRALab-USTC/RL-CBM.", + "primary_area": "machine learning ii", + "author": "Qiyuan Liu; Qi Zhou; Rui Yang; Jie Wang", + "authorids": "", + "aff": "CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China; CAS Key Laboratory of Technology in GIPAS, University of Science and Technology of China+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center", + "bibtex": "@article{Liu_Zhou_Yang_Wang_2023, title={Robust Representation Learning by Clustering with Bisimulation Metrics for Visual Reinforcement Learning with Distractions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26063}, DOI={10.1609/aaai.v37i7.26063}, abstractNote={Recent work has shown that representation learning plays a critical role in sample-efficient reinforcement learning (RL) from pixels. Unfortunately, in real-world scenarios, representation learning is usually fragile to task-irrelevant distractions such as variations in background or viewpoint. To tackle this problem, we propose a novel clustering-based approach, namely Clustering with Bisimulation Metrics (CBM), which learns robust representations by grouping visual observations in the latent space. Specifically, CBM alternates between two steps: (1) grouping observations by measuring their bisimulation distances to the learned prototypes; (2) learning a set of prototypes according to the current cluster assignments. Computing cluster assignments with bisimulation metrics enables CBM to capture task-relevant information, as bisimulation metrics quantify the behavioral similarity between observations. Moreover, CBM encourages the consistency of representations within each group, which facilitates filtering out task-irrelevant information and thus induces robust representations against distractions. An appealing feature is that CBM can achieve sample-efficient representation learning even if multiple distractions exist simultaneously. Experiments demonstrate that CBM significantly improves the sample efficiency of popular visual RL algorithms and achieves state-of-the-art performance on both multiple and single distraction settings. The code is available at https://github.com/MIRALab-USTC/RL-CBM.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Qiyuan and Zhou, Qi and Yang, Rui and Wang, Jie}, year={2023}, month={Jun.}, pages={8843-8851} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26063/25835", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26063", + "pdf_size": 581702, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15165470213943547311&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/MIRALab-USTC/RL-CBM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+1", + "aff_unique_norm": "University of Science and Technology of China;Hefei Comprehensive National Science Center", + "aff_unique_dep": "CAS Key Laboratory of Technology in GIPAS;Institute of Artificial Intelligence", + "aff_unique_url": "http://www.ustc.edu.cn/;http://www.hfcn.edu.cn", + "aff_unique_abbr": "USTC;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26217", + "title": "Robust Self-Supervised Multi-Instance Learning with Structure Awareness", + "track": "main", + "status": "Technical", + "abstract": "Multi-instance learning (MIL) is a supervised learning where each example is a labeled bag with many instances. The typical MIL strategies are to train an instance-level feature extractor followed by aggregating instances features as bag-level representation with labeled information. However, learning such a bag-level representation highly depends on a large number of labeled datasets, which are difficult to get in real-world scenarios. In this paper, we make the first attempt to propose a robust Self-supervised Multi-Instance LEarning architecture with Structure awareness (SMILEs) that learns unsupervised bag representation. Our proposed approach is: 1) permutation invariant to the order of instances in bag; 2) structure-aware to encode the topological structures among the instances; and 3) robust against instances noise or permutation. Specifically, to yield robust MIL model without label information, we augment the multi-instance bag and train the representation encoder to maximize the correspondence between the representations of the same bag in its different augmented forms. Moreover, to capture topological structures from nearby instances in bags, our framework learns optimal graph structures for the bags and these graphs are optimized together with message passing layers and the ordered weighted averaging operator towards contrastive loss. Our main theorem characterizes the permutation invariance of the bag representation. Compared with state-of-the-art supervised MIL baselines, SMILEs achieves average improvement of 4.9%, 4.4% in classification accuracy on 5 benchmark datasets and 20 newsgroups datasets, respectively. In addition, we show that the model is robust to the input corruption.", + "primary_area": "machine learning iii", + "author": "Yejiang Wang; Yuhai Zhao; Zhengkui Wang; Meixia Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Northeastern University, China; School of Computer Science and Engineering, Northeastern University, China; InfoComm Technology Cluster, Singapore Institute of Technology, Singapore; School of Computer Science and Engineering, Northeastern University, China", + "bibtex": "@article{Wang_Zhao_Wang_Wang_2023, title={Robust Self-Supervised Multi-Instance Learning with Structure Awareness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26217}, DOI={10.1609/aaai.v37i8.26217}, abstractNote={Multi-instance learning (MIL) is a supervised learning where each example is a labeled bag with many instances. The typical MIL strategies are to train an instance-level feature extractor followed by aggregating instances features as bag-level representation with labeled information. However, learning such a bag-level representation highly depends on a large number of labeled datasets, which are difficult to get in real-world scenarios. In this paper, we make the first attempt to propose a robust Self-supervised Multi-Instance LEarning architecture with Structure awareness (SMILEs) that learns unsupervised bag representation. Our proposed approach is: 1) permutation invariant to the order of instances in bag; 2) structure-aware to encode the topological structures among the instances; and 3) robust against instances noise or permutation. Specifically, to yield robust MIL model without label information, we augment the multi-instance bag and train the representation encoder to maximize the correspondence between the representations of the same bag in its different augmented forms. Moreover, to capture topological structures from nearby instances in bags, our framework learns optimal graph structures for the bags and these graphs are optimized together with message passing layers and the ordered weighted averaging operator towards contrastive loss. Our main theorem characterizes the permutation invariance of the bag representation. Compared with state-of-the-art supervised MIL baselines, SMILEs achieves average improvement of 4.9%, 4.4% in classification accuracy on 5 benchmark datasets and 20 newsgroups datasets, respectively. In addition, we show that the model is robust to the input corruption.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yejiang and Zhao, Yuhai and Wang, Zhengkui and Wang, Meixia}, year={2023}, month={Jun.}, pages={10218-10225} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26217/25989", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26217", + "pdf_size": 155706, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3566719246962350756&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "stumail.neu.edu.cn;mail.neu.edu.cn;singaporetech.edu.sg;stumail.neu.edu.cn", + "email": "stumail.neu.edu.cn;mail.neu.edu.cn;singaporetech.edu.sg;stumail.neu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Northeastern University;Singapore Institute of Technology", + "aff_unique_dep": "School of Computer Science and Engineering;InfoComm Technology Cluster", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.singaporetech.edu.sg", + "aff_unique_abbr": "NEU;SIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-26762", + "title": "Robust Sequence Networked Submodular Maximization", + "track": "aaai special track", + "status": "Technical", + "abstract": "In this paper, we study the Robust optimization for sequence Networked submodular maximization (RoseNets) problem. We interweave the robust optimization with the sequence networked submodular maximization. The elements are connected by a directed acyclic graph and the objective function is not submodular on the elements but on the edges in the graph. Under such networked submodular scenario, the impact of removing an element from a sequence depends both on its position in the sequence and in the network. This makes the existing robust algorithms inapplicable and calls for new robust algorithms. In this paper, we take the first step to study the RoseNets problem. We design a robust greedy algorithms, which is robust against the removal of an arbitrary subset of the selected elements. The approximation ratio of the algorithm depends both on the number of the removed elements and the network topology. We further conduct experiments on real applications of recommendation and link prediction. The experimental results demonstrate the effectiveness of the proposed algorithm.", + "primary_area": "safe and robust ai", + "author": "Qihao Shi; Bingyang Fu; Can Wang; Jiawei Chen; Sheng Zhou; Yan Feng; Chun Chen", + "authorids": "", + "aff": "School of Computing and Computer Science, Zhejiang University, Hangzhou, China+School of Computing and Computer Science, Zhejiang University City College, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China; School of Computing and Computer Science, Zhejiang University, Hangzhou, China", + "bibtex": "@article{Shi_Fu_Wang_Chen_Zhou_Feng_Chen_2023, title={Robust Sequence Networked Submodular Maximization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26762}, DOI={10.1609/aaai.v37i12.26762}, abstractNote={In this paper, we study the Robust optimization for sequence Networked submodular maximization (RoseNets) problem. We interweave the robust optimization with the sequence networked submodular maximization. The elements are connected by a directed acyclic graph and the objective function is not submodular on the elements but on the edges in the graph. Under such networked submodular scenario, the impact of removing an element from a sequence depends both on its position in the sequence and in the network. This makes the existing robust algorithms inapplicable and calls for new robust algorithms. In this paper, we take the first step to study the RoseNets problem. We design a robust greedy algorithms, which is robust against the removal of an arbitrary subset of the selected elements. The approximation ratio of the algorithm depends both on the number of the removed elements and the network topology. We further conduct experiments on real applications of recommendation and link prediction. The experimental results demonstrate the effectiveness of the proposed algorithm.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shi, Qihao and Fu, Bingyang and Wang, Can and Chen, Jiawei and Zhou, Sheng and Feng, Yan and Chen, Chun}, year={2023}, month={Jun.}, pages={15100-15108} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26762/26534", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26762", + "pdf_size": 273163, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:guX9NIk2WGMJ:scholar.google.com/&scioq=Robust+Sequence+Networked+Submodular+Maximization&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;0;0;0;0", + "aff_unique_norm": "Zhejiang University;Zhejiang University City College", + "aff_unique_dep": "School of Computing and Computer Science;School of Computing and Computer Science", + "aff_unique_url": "http://www.zju.edu.cn;", + "aff_unique_abbr": "ZJU;", + "aff_campus_unique_index": "0+0;0;0;0;0;0;0", + "aff_campus_unique": "Hangzhou", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26351", + "title": "Robust Temporal Smoothness in Multi-Task Learning", + "track": "main", + "status": "Technical", + "abstract": "Multi-task learning models based on temporal smoothness assumption, in which each time point of a sequence of time points concerns a task of prediction, assume the adjacent tasks are similar to each other. However, the effect of outliers is not taken into account. In this paper, we show that even only one outlier task will destroy the performance of the entire model. To solve this problem, we propose two Robust Temporal Smoothness (RoTS) frameworks. Compared with the existing models based on temporal relation, our methods not only chase the temporal smoothness information but identify outlier tasks, however, without increasing the computational complexity. Detailed theoretical analyses are presented to evaluate the performance of our methods. Experimental results on synthetic and real-life datasets demonstrate the effectiveness of our frameworks. We also discuss several potential specific applications and extensions of our RoTS frameworks.", + "primary_area": "machine learning iv", + "author": "Menghui Zhou; Yu Zhang; Yun Yang; Tong Liu; Po Yang", + "authorids": "", + "aff": "Deparment of Software, Yunnan University, Kunming, China; Department of Computer Science, Sheffield University, Sheffield, UK; Deparment of Software, Yunnan University, Kunming, China; Department of Computer Science, Sheffield University, Sheffield, UK; Department of Computer Science, Sheffield University, Sheffield, UK", + "bibtex": "@article{Zhou_Zhang_Yang_Liu_Yang_2023, title={Robust Temporal Smoothness in Multi-Task Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26351}, DOI={10.1609/aaai.v37i9.26351}, abstractNote={Multi-task learning models based on temporal smoothness assumption, in which each time point of a sequence of time points concerns a task of prediction, assume the adjacent tasks are similar to each other. However, the effect of outliers is not taken into account. In this paper, we show that even only one outlier task will destroy the performance of the entire model. To solve this problem, we propose two Robust Temporal Smoothness (RoTS) frameworks. Compared with the existing models based on temporal relation, our methods not only chase the temporal smoothness information but identify outlier tasks, however, without increasing the computational complexity. Detailed theoretical analyses are presented to evaluate the performance of our methods. Experimental results on synthetic and real-life datasets demonstrate the effectiveness of our frameworks. We also discuss several potential specific applications and extensions of our RoTS frameworks.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Menghui and Zhang, Yu and Yang, Yun and Liu, Tong and Yang, Po}, year={2023}, month={Jun.}, pages={11426-11434} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26351/26123", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26351", + "pdf_size": 2119708, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4615145472991626154&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ynu.edu.cn;sheffield.ac.uk;ynu.edu.cn;sheffield.ac.uk;sheffield.ac.uk", + "email": "mail.ynu.edu.cn;sheffield.ac.uk;ynu.edu.cn;sheffield.ac.uk;sheffield.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;1", + "aff_unique_norm": "Yunnan University;Sheffield University", + "aff_unique_dep": "Department of Software;Department of Computer Science", + "aff_unique_url": "http://www.ynu.edu.cn;https://www.sheffield.ac.uk", + "aff_unique_abbr": ";Sheffield", + "aff_campus_unique_index": "0;1;0;1;1", + "aff_campus_unique": "Kunming;Sheffield", + "aff_country_unique_index": "0;1;0;1;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "article-26941", + "title": "Robust Training for AC-OPF (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Electricity network operators use computationally demanding mathematical models to optimize AC power flow (AC-OPF). Recent work applies neural networks (NN) rather than optimization methods to estimate locally optimal solutions. However, NN training data is costly and current models cannot guarantee optimal or feasible solutions. This study proposes a robust NN training approach, which starts with a small amount of seed training data and uses iterative feedback to generate additional data in regions where the model makes poor predictions. The method is applied to non-linear univariate and multivariate test functions, and an IEEE 6-bus AC-OPF system. Results suggest robust training can achieve NN prediction performance similar to, or better than, regular NN training, while using significantly less data.", + "primary_area": "", + "author": "Fuat Can Beylunioglu; Mehrdad Pirnia; P. Robert Duimering; Vijay Ganesh", + "authorids": "", + "aff": "Management Sciences, University of Waterloo; Management Sciences, University of Waterloo; Management Sciences, University of Waterloo; Computer Science, University of Waterloo", + "bibtex": "@article{Beylunioglu_Pirnia_Duimering_Ganesh_2024, title={Robust Training for AC-OPF (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26941}, DOI={10.1609/aaai.v37i13.26941}, abstractNote={Electricity network operators use computationally demanding mathematical models to optimize AC power flow (AC-OPF). Recent work applies neural networks (NN) rather than optimization methods to estimate locally optimal solutions. However, NN training data is costly and current models cannot guarantee optimal or feasible solutions. This study proposes a robust NN training approach, which starts with a small amount of seed training data and uses iterative feedback to generate additional data in regions where the model makes poor predictions. The method is applied to non-linear univariate and multivariate test functions, and an IEEE 6-bus AC-OPF system. Results suggest robust training can achieve NN prediction performance similar to, or better than, regular NN training, while using significantly less data.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Beylunioglu, Fuat Can and Pirnia, Mehrdad and Duimering, P. Robert and Ganesh, Vijay}, year={2024}, month={Jul.}, pages={16162-16163} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26941/26713", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26941", + "pdf_size": 132888, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12845789240345561686&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Waterloo", + "aff_unique_dep": "Management Sciences", + "aff_unique_url": "https://uwaterloo.ca", + "aff_unique_abbr": "UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26736", + "title": "Robust Training of Neural Networks against Bias Field Perturbations", + "track": "aaai special track", + "status": "Technical", + "abstract": "We introduce the problem of training neural networks such that they are robust against a class of smooth intensity perturbations modelled by bias fields. We first develop an approach towards this goal based on a state-of-the-art robust training method utilising Interval Bound Propagation (IBP). We analyse the resulting algorithm and observe that IBP often produces very loose bounds for bias field perturbations, which may be detrimental to training. We then propose an alternative approach based on Symbolic Interval Propagation (SIP), which usually results in significantly tighter bounds than IBP. We present ROBNET, a tool implementing these approaches for bias field robust training. In experiments networks trained with the SIP-based approach achieved up to 31% higher certified robustness while also maintaining a better accuracy than networks trained with the IBP approach.", + "primary_area": "safe and robust ai", + "author": "Patrick Henriksen; Alessio Lomuscio", + "authorids": "", + "aff": "Imperial College London; Safe Intelligence", + "bibtex": "@article{Henriksen_Lomuscio_2023, title={Robust Training of Neural Networks against Bias Field Perturbations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26736}, DOI={10.1609/aaai.v37i12.26736}, abstractNote={We introduce the problem of training neural networks such that they are robust against a class of smooth intensity perturbations modelled by bias fields. We first develop an approach towards this goal based on a state-of-the-art robust training method utilising Interval Bound Propagation (IBP). We analyse the resulting algorithm and observe that IBP often produces very loose bounds for bias field perturbations, which may be detrimental to training. We then propose an alternative approach based on Symbolic Interval Propagation (SIP), which usually results in significantly tighter bounds than IBP. We present ROBNET, a tool implementing these approaches for bias field robust training. In experiments networks trained with the SIP-based approach achieved up to 31% higher certified robustness while also maintaining a better accuracy than networks trained with the IBP approach.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Henriksen, Patrick and Lomuscio, Alessio}, year={2023}, month={Jun.}, pages={14865-14873} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26736/26508", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26736", + "pdf_size": 151727, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=833879689212685674&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "imperial.ac.uk;safeintelligence.ai", + "email": "imperial.ac.uk;safeintelligence.ai", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Imperial College London;Safe Intelligence", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.imperial.ac.uk;", + "aff_unique_abbr": "ICL;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-25354", + "title": "Robust Video Portrait Reenactment via Personalized Representation Quantization", + "track": "main", + "status": "Technical", + "abstract": "While progress has been made in the field of portrait reenactment, the problem of how to produce high-fidelity and robust videos remains. Recent studies normally find it challenging to handle rarely seen target poses due to the limitation of source data. This paper proposes the Video Portrait via Non-local Quantization Modeling (VPNQ) framework, which produces pose- and disturbance-robust reenactable video portraits. Our key insight is to learn position-invariant quantized local patch representations and build a mapping between simple driving signals and local textures with non-local spatial-temporal modeling. Specifically, instead of learning a universal quantized codebook, we identify that a personalized one can be trained to preserve desired position-invariant local details better. Then, a simple representation of projected landmarks can be used as sufficient driving signals to avoid 3D rendering. Following, we employ a carefully designed Spatio-Temporal Transformer to predict reasonable and temporally consistent quantized tokens from the driving signal. The predicted codes can be decoded back to robust and high-quality videos. Comprehensive experiments have been conducted to validate the effectiveness of our approach.", + "primary_area": "computer vision ii", + "author": "Kaisiyuan Wang; Changcheng Liang; Hang Zhou; Jiaxiang Tang; Qianyi Wu; Dongliang He; Zhibin Hong; Jingtuo Liu; Errui Ding; Ziwei Liu; Jingdong Wang", + "authorids": "", + "aff": "The University of Sydney; Xidian University; Baidu Inc.; Peking University; Monash University; Baidu Inc.; Baidu Inc.; Baidu Inc.; Baidu Inc.; S-Lab, Nanyang Technological University; Baidu Inc.", + "bibtex": "@article{Wang_Liang_Zhou_Tang_Wu_He_Hong_Liu_Ding_Liu_Wang_2023, title={Robust Video Portrait Reenactment via Personalized Representation Quantization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25354}, DOI={10.1609/aaai.v37i2.25354}, abstractNote={While progress has been made in the field of portrait reenactment, the problem of how to produce high-fidelity and robust videos remains. Recent studies normally find it challenging to handle rarely seen target poses due to the limitation of source data. This paper proposes the Video Portrait via Non-local Quantization Modeling (VPNQ) framework, which produces pose- and disturbance-robust reenactable video portraits. Our key insight is to learn position-invariant quantized local patch representations and build a mapping between simple driving signals and local textures with non-local spatial-temporal modeling. Specifically, instead of learning a universal quantized codebook, we identify that a personalized one can be trained to preserve desired position-invariant local details better. Then, a simple representation of projected landmarks can be used as sufficient driving signals to avoid 3D rendering. Following, we employ a carefully designed Spatio-Temporal Transformer to predict reasonable and temporally consistent quantized tokens from the driving signal. The predicted codes can be decoded back to robust and high-quality videos. Comprehensive experiments have been conducted to validate the effectiveness of our approach.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Kaisiyuan and Liang, Changcheng and Zhou, Hang and Tang, Jiaxiang and Wu, Qianyi and He, Dongliang and Hong, Zhibin and Liu, Jingtuo and Ding, Errui and Liu, Ziwei and Wang, Jingdong}, year={2023}, month={Jun.}, pages={2564-2572} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25354/25126", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25354", + "pdf_size": 2038910, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17552895181274445346&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 2, + "aff_domain": "sydney.edu.au;163.com;baidu.com;pku.edu.cn;monash.edu;baidu.com;gmail.com;baidu.com;baidu.com;gmail.com;baidu.com", + "email": "sydney.edu.au;163.com;baidu.com;pku.edu.cn;monash.edu;baidu.com;gmail.com;baidu.com;baidu.com;gmail.com;baidu.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;1;2;3;4;2;2;2;2;5;2", + "aff_unique_norm": "University of Sydney;Xidian University;Baidu Inc.;Peking University;Monash University;Nanyang Technological University", + "aff_unique_dep": ";;;;;S-Lab", + "aff_unique_url": "https://www.sydney.edu.au;http://www.xidian.edu.cn/;https://www.baidu.com;http://www.pku.edu.cn;https://www.monash.edu;https://www.ntu.edu.sg", + "aff_unique_abbr": "USYD;Xidian;Baidu;Peking U;Monash;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;0;1;1;1;1;2;1", + "aff_country_unique": "Australia;China;Singapore" + }, + { + "id": "article-26813", + "title": "Robust and Adaptive Deep Learning via Bayesian Principles", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Deep learning models have achieved tremendous successes in accurate predictions for computer vision, natural language processing and speech recognition applications. However, to succeed in high-risk and safety-critical domains such as healthcare and finance, these deep learning models need to be made reliable and trustworthy. Specifically, they need to be robust and adaptive to real-world environments which can be drastically different from the training settings. In this talk, I will advocate for Bayesian principles to achieve the goal of building robust and adaptive deep learning models. I will introduce a suite of uncertainty quantification methods for Bayesian deep learning, and demonstrate applications en- abled by accurate uncertainty estimates, e.g., robust predic- tion, continual learning and repairing model failures. I will conclude by discussing the research challenges and potential impact for robust and adaptive deep learning models.\n\nThis paper is part of the AAAI-23 New Faculty Highlights.", + "primary_area": "", + "author": "Yingzhen Li", + "authorids": "", + "aff": "Department of Computing, Imperial College London", + "bibtex": "@article{Li_2024, title={Robust and Adaptive Deep Learning via Bayesian Principles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26813}, DOI={10.1609/aaai.v37i13.26813}, abstractNote={Deep learning models have achieved tremendous successes in accurate predictions for computer vision, natural language processing and speech recognition applications. However, to succeed in high-risk and safety-critical domains such as healthcare and finance, these deep learning models need to be made reliable and trustworthy. Specifically, they need to be robust and adaptive to real-world environments which can be drastically different from the training settings. In this talk, I will advocate for Bayesian principles to achieve the goal of building robust and adaptive deep learning models. I will introduce a suite of uncertainty quantification methods for Bayesian deep learning, and demonstrate applications en- abled by accurate uncertainty estimates, e.g., robust predic- tion, continual learning and repairing model failures. I will conclude by discussing the research challenges and potential impact for robust and adaptive deep learning models. This paper is part of the AAAI-23 New Faculty Highlights.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yingzhen}, year={2024}, month={Jul.}, pages={15446-15446} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26813/26585", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26813", + "pdf_size": 44996, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15733295975780242378&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "imperial.ac.uk", + "email": "imperial.ac.uk", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Imperial College London", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.imperial.ac.uk", + "aff_unique_abbr": "Imperial", + "aff_campus_unique_index": "0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25906", + "title": "Robust and Fast Measure of Information via Low-Rank Representation", + "track": "main", + "status": "Technical", + "abstract": "The matrix-based R\u00e9nyi's entropy allows us to directly quantify information measures from given data, without explicit estimation of the underlying probability distribution. This intriguing property makes it widely applied in statistical inference and machine learning tasks. However, this information theoretical quantity is not robust against noise in the data, and is computationally prohibitive in large-scale applications. To address these issues, we propose a novel measure of information, termed low-rank matrix-based R\u00e9nyi's entropy, based on low-rank representations of infinitely divisible kernel matrices. The proposed entropy functional inherits the specialty of of the original definition to directly quantify information from data, but enjoys additional advantages including robustness and effective calculation. Specifically, our low-rank variant is more sensitive to informative perturbations induced by changes in underlying distributions, while being insensitive to uninformative ones caused by noises. Moreover, low-rank R\u00e9nyi's entropy can be efficiently approximated by random projection and Lanczos iteration techniques, reducing the overall complexity from O(n\u00b3) to O(n\u00b2s) or even O(ns\u00b2), where n is the number of data samples and s \u226a n. We conduct large-scale experiments to evaluate the effectiveness of this new information measure, demonstrating superior results compared to matrix-based R\u00e9nyi's entropy in terms of both performance and computational efficiency.", + "primary_area": "machine learning i", + "author": "Yuxin Dong; Tieliang Gong; Shujian Yu; Hong Chen; Chen Li", + "authorids": "", + "aff": "Xi\u2019an Jiaotong University, China; Xi\u2019an Jiaotong University, China; Vrije Universiteit Amsterdam; Huazhong Agricultural University, China; Xi\u2019an Jiaotong University, China", + "bibtex": "@article{Dong_Gong_Yu_Chen_Li_2023, title={Robust and Fast Measure of Information via Low-Rank Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25906}, DOI={10.1609/aaai.v37i6.25906}, abstractNote={The matrix-based R\u00e9nyi\u2019s entropy allows us to directly quantify information measures from given data, without explicit estimation of the underlying probability distribution. This intriguing property makes it widely applied in statistical inference and machine learning tasks. However, this information theoretical quantity is not robust against noise in the data, and is computationally prohibitive in large-scale applications. To address these issues, we propose a novel measure of information, termed low-rank matrix-based R\u00e9nyi\u2019s entropy, based on low-rank representations of infinitely divisible kernel matrices. The proposed entropy functional inherits the specialty of of the original definition to directly quantify information from data, but enjoys additional advantages including robustness and effective calculation. Specifically, our low-rank variant is more sensitive to informative perturbations induced by changes in underlying distributions, while being insensitive to uninformative ones caused by noises. Moreover, low-rank R\u00e9nyi\u2019s entropy can be efficiently approximated by random projection and Lanczos iteration techniques, reducing the overall complexity from O(n\u00b3) to O(n\u00b2s) or even O(ns\u00b2), where n is the number of data samples and s \u226a n. We conduct large-scale experiments to evaluate the effectiveness of this new information measure, demonstrating superior results compared to matrix-based R\u00e9nyi\u2019s entropy in terms of both performance and computational efficiency.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dong, Yuxin and Gong, Tieliang and Yu, Shujian and Chen, Hong and Li, Chen}, year={2023}, month={Jun.}, pages={7450-7458} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25906/25678", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25906", + "pdf_size": 234296, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=848249017043090979&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "stu.xjtu.edu.cn;gmail.com;gmail.com;mail.hzau.edu.cn;xjtu.edu.cn", + "email": "stu.xjtu.edu.cn;gmail.com;gmail.com;mail.hzau.edu.cn;xjtu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Xi'an Jiaotong University;Vrije Universiteit Amsterdam;Huazhong Agricultural University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://en.xjtu.edu.cn/;https://www.vu.nl;http://www.hzau.edu.cn/", + "aff_unique_abbr": "XJTU;VU Amsterdam;HAU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;Netherlands" + }, + { + "id": "article-27078", + "title": "Robust-MSA: Understanding the Impact of Modality Noise on Multimodal Sentiment Analysis", + "track": "demonstrations", + "status": "Technical", + "abstract": "Improving model robustness against potential modality noise, as an essential step for adapting multimodal models to real-world applications, has received increasing attention among researchers. For Multimodal Sentiment Analysis (MSA), there is also a debate on whether multimodal models are more effective against noisy features than unimodal ones. Stressing on intuitive illustration and in-depth analysis of these concerns, we present Robust-MSA, an interactive platform that visualizes the impact of modality noise as well as simple defence methods to help researchers know better about how their models perform with imperfect real-world data.", + "primary_area": "", + "author": "Huisheng Mao; Baozheng Zhang; Hua Xu; Ziqi Yuan; Yihe Liu", + "authorids": "", + "aff": "State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China+Beijing National Research Center for Information Science and Technology(BNRist), Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China+School of Information Science and Engineering, Hebei University of Science and Technology, Shijiazhuang 050018, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China+Beijing National Research Center for Information Science and Technology(BNRist), Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China+Beijing National Research Center for Information Science and Technology(BNRist), Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China+School of Information Science and Engineering, Hebei University of Science and Technology, Shijiazhuang 050018, China", + "bibtex": "@article{Mao_Zhang_Xu_Yuan_Liu_2024, title={Robust-MSA: Understanding the Impact of Modality Noise on Multimodal Sentiment Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27078}, DOI={10.1609/aaai.v37i13.27078}, abstractNote={Improving model robustness against potential modality noise, as an essential step for adapting multimodal models to real-world applications, has received increasing attention among researchers. For Multimodal Sentiment Analysis (MSA), there is also a debate on whether multimodal models are more effective against noisy features than unimodal ones. Stressing on intuitive illustration and in-depth analysis of these concerns, we present Robust-MSA, an interactive platform that visualizes the impact of modality noise as well as simple defence methods to help researchers know better about how their models perform with imperfect real-world data.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mao, Huisheng and Zhang, Baozheng and Xu, Hua and Yuan, Ziqi and Liu, Yihe}, year={2024}, month={Jul.}, pages={16458-16460} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27078/26850", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27078", + "pdf_size": 383533, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12076250040497816525&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.tsinghua.edu.cn; ; ; ; ", + "email": "mail.tsinghua.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+2;0+1;0+1;0+2", + "aff_unique_norm": "Tsinghua University;Beijing National Research Center for Information Science and Technology;Hebei University of Science and Technology", + "aff_unique_dep": "Department of Computer Science and Technology;;School of Information Science and Engineering", + "aff_unique_url": "https://www.tsinghua.edu.cn;;", + "aff_unique_abbr": "Tsinghua;BNRist;", + "aff_campus_unique_index": "0+0;0+1;0+0;0+0;0+1", + "aff_campus_unique": "Beijing;Shijiazhuang", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26721", + "title": "Robust-by-Design Classification via Unitary-Gradient Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "The use of neural networks in safety-critical systems requires safe and robust models, due to the existence of adversarial attacks. Knowing the minimal adversarial perturbation of any input x, or, equivalently, knowing the distance of x from the classification boundary, allows evaluating the classification robustness, providing certifiable predictions. Unfortunately, state-of-the-art techniques for computing such a distance are computationally expensive and hence not suited for online applications. This work proposes a novel family of classifiers, namely Signed Distance Classifiers (SDCs), that, from a theoretical perspective, directly output the exact distance of x from the classification boundary, rather than a probability score (e.g., SoftMax). SDCs represent a family of robust-by-design classifiers. To practically address the theoretical requirements of an SDC, a novel network architecture named Unitary-Gradient Neural Network is presented. Experimental results show that the proposed architecture approximates a signed distance classifier, hence allowing an online certifiable classification of x at the cost of a single inference.", + "primary_area": "safe and robust ai", + "author": "Fabio Brau; Giulio Rossolini; Alessandro Biondi; Giorgio Buttazzo", + "authorids": "", + "aff": "Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa,Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa,Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa,Italy; Department of Excellence in Robotics and AI, Scuola Superiore Sant\u2019Anna, Pisa,Italy", + "bibtex": "@article{Brau_Rossolini_Biondi_Buttazzo_2023, title={Robust-by-Design Classification via Unitary-Gradient Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26721}, DOI={10.1609/aaai.v37i12.26721}, abstractNote={The use of neural networks in safety-critical systems requires safe and robust models, due to the existence of adversarial attacks. Knowing the minimal adversarial perturbation of any input x, or, equivalently, knowing the distance of x from the classification boundary, allows evaluating the classification robustness, providing certifiable predictions. Unfortunately, state-of-the-art techniques for computing such a distance are computationally expensive and hence not suited for online applications. This work proposes a novel family of classifiers, namely Signed Distance Classifiers (SDCs), that, from a theoretical perspective, directly output the exact distance of x from the classification boundary, rather than a probability score (e.g., SoftMax). SDCs represent a family of robust-by-design classifiers. To practically address the theoretical requirements of an SDC, a novel network architecture named Unitary-Gradient Neural Network is presented. Experimental results show that the proposed architecture approximates a signed distance classifier, hence allowing an online certifiable classification of x at the cost of a single inference.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brau, Fabio and Rossolini, Giulio and Biondi, Alessandro and Buttazzo, Giorgio}, year={2023}, month={Jun.}, pages={14729-14737} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26721/26493", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26721", + "pdf_size": 527017, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10845611032936461050&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 13, + "aff_domain": "santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it", + "email": "santannapisa.it;santannapisa.it;santannapisa.it;santannapisa.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Scuola Superiore Sant\u2019Anna", + "aff_unique_dep": "Department of Excellence in Robotics and AI", + "aff_unique_url": "https://www.sssup.it", + "aff_unique_abbr": "SSSA", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Pisa", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25765", + "title": "RobustLoc: Robust Camera Pose Regression in Challenging Driving Environments", + "track": "main", + "status": "Technical", + "abstract": "Camera relocalization has various applications in autonomous driving. Previous camera pose regression models consider only ideal scenarios where there is little environmental perturbation. To deal with challenging driving environments that may have changing seasons, weather, illumination, and the presence of unstable objects, we propose RobustLoc, which derives its robustness against perturbations from neural differential equations. Our model uses a convolutional neural network to extract feature maps from multi-view images, a robust neural differential equation diffusion block module to diffuse information interactively, and a branched pose decoder with multi-layer training to estimate the vehicle poses. Experiments demonstrate that RobustLoc surpasses current state-of-the-art camera pose regression models and achieves robust performance in various environments. Our code is released at: https://github.com/sijieaaa/RobustLoc", + "primary_area": "intelligent robotics", + "author": "Sijie Wang; Qiyu Kang; Rui She; Wee Peng Tay; Andreas Hartmannsgruber; Diego Navarro Navarro", + "authorids": "", + "aff": "Continental-NTU Corporate Lab, Nanyang Technological University; Continental-NTU Corporate Lab, Nanyang Technological University; Continental-NTU Corporate Lab, Nanyang Technological University; Continental-NTU Corporate Lab, Nanyang Technological University; Continental Automotive Singapore; Continental Automotive Singapore", + "bibtex": "@article{Wang_Kang_She_Tay_Hartmannsgruber_Navarro Navarro_2023, title={RobustLoc: Robust Camera Pose Regression in Challenging Driving Environments}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25765}, DOI={10.1609/aaai.v37i5.25765}, abstractNote={Camera relocalization has various applications in autonomous driving. Previous camera pose regression models consider only ideal scenarios where there is little environmental perturbation. To deal with challenging driving environments that may have changing seasons, weather, illumination, and the presence of unstable objects, we propose RobustLoc, which derives its robustness against perturbations from neural differential equations. Our model uses a convolutional neural network to extract feature maps from multi-view images, a robust neural differential equation diffusion block module to diffuse information interactively, and a branched pose decoder with multi-layer training to estimate the vehicle poses. Experiments demonstrate that RobustLoc surpasses current state-of-the-art camera pose regression models and achieves robust performance in various environments. Our code is released at: https://github.com/sijieaaa/RobustLoc}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Sijie and Kang, Qiyu and She, Rui and Tay, Wee Peng and Hartmannsgruber, Andreas and Navarro Navarro, Diego}, year={2023}, month={Jun.}, pages={6209-6216} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25765/25537", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25765", + "pdf_size": 2371630, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6405924744835473586&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;continental.com;continental.com", + "email": "ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;ntu.edu.sg;continental.com;continental.com", + "github": "https://github.com/sijieaaa/RobustLoc", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;1", + "aff_unique_norm": "Nanyang Technological University;Continental Automotive", + "aff_unique_dep": "Continental-NTU Corporate Lab;", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.continental-automotive.com", + "aff_unique_abbr": "NTU;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Singapore", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26785", + "title": "Robustness to Spurious Correlations Improves Semantic Out-of-Distribution Detection", + "track": "aaai special track", + "status": "Technical", + "abstract": "Methods which utilize the outputs or feature representations of predictive models have emerged as promising approaches for out-of-distribution (OOD) detection of image inputs. However, as demonstrated in previous work, these methods struggle to detect OOD inputs that share nuisance values (e.g. background) with in-distribution inputs. The detection of shared-nuisance OOD (SN-OOD) inputs is particularly relevant in real-world applications, as anomalies and in-distribution inputs tend to be captured in the same settings during deployment. In this work, we provide a possible explanation for these failures and propose nuisance-aware OOD detection to address them. Nuisance-aware OOD detection substitutes a classifier trained via Empirical Risk Minimization (ERM) with one that 1. approximates a distribution where the nuisance-label relationship is broken and 2. yields representations that are independent of the nuisance under this distribution, both marginally and conditioned on the label. We can train a classifier to achieve these objectives using Nuisance-Randomized Distillation (NuRD), an algorithm developed for OOD generalization under spurious correlations. Output- and feature-based nuisance-aware OOD detection perform substantially better than their original counterparts, succeeding even when detection based on domain generalization algorithms fails to improve performance.", + "primary_area": "safe and robust ai", + "author": "Lily H. Zhang; Rajesh Ranganath", + "authorids": "", + "aff": "New York University; New York University", + "bibtex": "@article{Zhang_Ranganath_2023, title={Robustness to Spurious Correlations Improves Semantic Out-of-Distribution Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26785}, DOI={10.1609/aaai.v37i12.26785}, abstractNote={Methods which utilize the outputs or feature representations of predictive models have emerged as promising approaches for out-of-distribution (OOD) detection of image inputs. However, as demonstrated in previous work, these methods struggle to detect OOD inputs that share nuisance values (e.g. background) with in-distribution inputs. The detection of shared-nuisance OOD (SN-OOD) inputs is particularly relevant in real-world applications, as anomalies and in-distribution inputs tend to be captured in the same settings during deployment. In this work, we provide a possible explanation for these failures and propose nuisance-aware OOD detection to address them. Nuisance-aware OOD detection substitutes a classifier trained via Empirical Risk Minimization (ERM) with one that 1. approximates a distribution where the nuisance-label relationship is broken and 2. yields representations that are independent of the nuisance under this distribution, both marginally and conditioned on the label. We can train a classifier to achieve these objectives using Nuisance-Randomized Distillation (NuRD), an algorithm developed for OOD generalization under spurious correlations. Output- and feature-based nuisance-aware OOD detection perform substantially better than their original counterparts, succeeding even when detection based on domain generalization algorithms fails to improve performance.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Lily H. and Ranganath, Rajesh}, year={2023}, month={Jun.}, pages={15305-15312} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26785/26557", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26785", + "pdf_size": 918291, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11889994119540813117&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 11, + "aff_domain": "nyu.edu;cims.nyu.edu", + "email": "nyu.edu;cims.nyu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25644", + "title": "Rolling Horizon Based Temporal Decomposition for the Offline Pickup and Delivery Problem with Time Windows", + "track": "main", + "status": "Technical", + "abstract": "The offline pickup and delivery problem with time windows (PDPTW) is a classical combinatorial optimization problem in the transportation community, which has proven to be very challenging computationally. Due to the complexity of the problem, practical problem instances can be solved only via heuristics, which trade-off solution quality for computational tractability. Among the various heuristics, a common strategy is problem decomposition, that is, the reduction of a large-scale problem into a collection of smaller sub-problems, with spatial and temporal decompositions being two natural approaches. While spatial decomposition has been successful in certain settings, effective temporal decomposition has been challenging due to the difficulty of stitching together the sub-problem solutions across the decomposition boundaries. In this work, we introduce a novel temporal decomposition scheme for solving a class of PDPTWs that have narrow time windows, for which it is able to provide both fast and high-quality solutions. We utilize techniques that have been popularized recently in the context of online dial-a-ride problems along with the general idea of rolling horizon optimization. To the best of our knowledge, this is the first attempt to solve offline PDPTWs using such an approach. To show the performance and scalability of our framework, we use the optimization of paratransit services as a motivating example. Due to the lack of benchmark solvers similar to ours (i.e., temporal decomposition with an online solver), we compare our results with an offline heuristic algorithm using Google OR-Tools. In smaller problem instances (with an average of 129 requests per instance), the baseline approach is as competitive as our framework. However, in larger problem instances (approximately 2,500 requests per instance), our framework is more scalable and can provide good solutions to problem instances of varying degrees of difficulty, while the baseline algorithm often fails to find a feasible solution within comparable compute times.", + "primary_area": "domain s of application", + "author": "Youngseo Kim; Danushka Edirimanna; Michael Wilbur; Philip Pugliese; Aron Laszka; Abhishek Dubey; Samitha Samaranayake", + "authorids": "", + "aff": "Cornell University; Cornell University; Vanderbilt University; Chattanooga Area Regional Transportation Authority; Pennsylvania State University; Vanderbilt University; Cornell University", + "bibtex": "@article{Kim_Edirimanna_Wilbur_Pugliese_Laszka_Dubey_Samaranayake_2023, title={Rolling Horizon Based Temporal Decomposition for the Offline Pickup and Delivery Problem with Time Windows}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25644}, DOI={10.1609/aaai.v37i4.25644}, abstractNote={The offline pickup and delivery problem with time windows (PDPTW) is a classical combinatorial optimization problem in the transportation community, which has proven to be very challenging computationally. Due to the complexity of the problem, practical problem instances can be solved only via heuristics, which trade-off solution quality for computational tractability. Among the various heuristics, a common strategy is problem decomposition, that is, the reduction of a large-scale problem into a collection of smaller sub-problems, with spatial and temporal decompositions being two natural approaches. While spatial decomposition has been successful in certain settings, effective temporal decomposition has been challenging due to the difficulty of stitching together the sub-problem solutions across the decomposition boundaries. In this work, we introduce a novel temporal decomposition scheme for solving a class of PDPTWs that have narrow time windows, for which it is able to provide both fast and high-quality solutions. We utilize techniques that have been popularized recently in the context of online dial-a-ride problems along with the general idea of rolling horizon optimization. To the best of our knowledge, this is the first attempt to solve offline PDPTWs using such an approach. To show the performance and scalability of our framework, we use the optimization of paratransit services as a motivating example. Due to the lack of benchmark solvers similar to ours (i.e., temporal decomposition with an online solver), we compare our results with an offline heuristic algorithm using Google OR-Tools. In smaller problem instances (with an average of 129 requests per instance), the baseline approach is as competitive as our framework. However, in larger problem instances (approximately 2,500 requests per instance), our framework is more scalable and can provide good solutions to problem instances of varying degrees of difficulty, while the baseline algorithm often fails to find a feasible solution within comparable compute times.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Youngseo and Edirimanna, Danushka and Wilbur, Michael and Pugliese, Philip and Laszka, Aron and Dubey, Abhishek and Samaranayake, Samitha}, year={2023}, month={Jun.}, pages={5151-5159} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25644/25416", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25644", + "pdf_size": 299605, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6767128082321908419&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 14, + "aff_domain": "cornell.edu; ; ; ; ; ; ", + "email": "cornell.edu; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;3;1;0", + "aff_unique_norm": "Cornell University;Vanderbilt University;Chattanooga Area Regional Transportation Authority;Pennsylvania State University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cornell.edu;https://www.vanderbilt.edu;;https://www.psu.edu", + "aff_unique_abbr": "Cornell;Vanderbilt;CARTA;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25541", + "title": "Rule Induction in Knowledge Graphs Using Linear Programming", + "track": "main", + "status": "Technical", + "abstract": "We present a simple linear programming (LP) based method to learn compact and interpretable sets of rules encoding the facts in a knowledge graph (KG) and use these rules to solve the KG completion problem. Our LP model chooses a set of rules of bounded complexity from a list of candidate first-order logic rules and assigns weights to them. The complexity bound is enforced via explicit constraints. We combine simple rule generation heuristics with our rule selection LP to obtain predictions with accuracy comparable to state-of-the-art codes, even while generating much more compact rule sets. Furthermore, when we take as input rules generated by other codes, we often improve interpretability by reducing the number of chosen rules, while maintaining accuracy.", + "primary_area": "data mining and knowledge management", + "author": "Sanjeeb Dash; Joao Goncalves", + "authorids": "", + "aff": "IBM Research, Yorktown Heights, New York, USA; IBM Research, Yorktown Heights, New York, USA", + "bibtex": "@article{Dash_Goncalves_2023, title={Rule Induction in Knowledge Graphs Using Linear Programming}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25541}, DOI={10.1609/aaai.v37i4.25541}, abstractNote={We present a simple linear programming (LP) based method to learn compact and interpretable sets of rules encoding the facts in a knowledge graph (KG) and use these rules to solve the KG completion problem. Our LP model chooses a set of rules of bounded complexity from a list of candidate first-order logic rules and assigns weights to them. The complexity bound is enforced via explicit constraints. We combine simple rule generation heuristics with our rule selection LP to obtain predictions with accuracy comparable to state-of-the-art codes, even while generating much more compact rule sets. Furthermore, when we take as input rules generated by other codes, we often improve interpretability by reducing the number of chosen rules, while maintaining accuracy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dash, Sanjeeb and Goncalves, Joao}, year={2023}, month={Jun.}, pages={4233-4241} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25541/25313", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25541", + "pdf_size": 321435, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7952819690054790730&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "us.ibm.com;us.ibm.com", + "email": "us.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Yorktown Heights", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26461", + "title": "Runtime Analysis for the NSGA-II: Provable Speed-Ups from Crossover", + "track": "main", + "status": "Technical", + "abstract": "Very recently, the first mathematical runtime analyses for the NSGA-II, the most common multi-objective evolutionary algorithm, have been conducted. Continuing this research direction, we prove that the NSGA-II optimizes the OneJumpZeroJump benchmark asymptotically faster when crossover is employed. Together with a parallel independent work by Dang, Opris, Salehi, and Sudholt, this is the first time such an advantage of crossover is proven for the NSGA-II. Our arguments can be transferred to single-objective optimization. They then prove that crossover can speed up the (mu+1) genetic algorithm in a different way and more pronounced than known before. Our experiments confirm the added value of crossover and show that the observed advantages are even larger than what our proofs can guarantee.", + "primary_area": "search and optimization", + "author": "Benjamin Doerr; Zhongdi Qu", + "authorids": "", + "aff": "Laboratoire d\u2019Informatique (LIX), Ecole Polytechnique, CNRS, Institut Polytechnique de Paris, Palaiseau, France; Laboratoire d\u2019Informatique (LIX), Ecole Polytechnique, CNRS, Institut Polytechnique de Paris, Palaiseau, France", + "bibtex": "@article{Doerr_Qu_2023, title={Runtime Analysis for the NSGA-II: Provable Speed-Ups from Crossover}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26461}, DOI={10.1609/aaai.v37i10.26461}, abstractNote={Very recently, the first mathematical runtime analyses for the NSGA-II, the most common multi-objective evolutionary algorithm, have been conducted. Continuing this research direction, we prove that the NSGA-II optimizes the OneJumpZeroJump benchmark asymptotically faster when crossover is employed. Together with a parallel independent work by Dang, Opris, Salehi, and Sudholt, this is the first time such an advantage of crossover is proven for the NSGA-II. Our arguments can be transferred to single-objective optimization. They then prove that crossover can speed up the (mu+1) genetic algorithm in a different way and more pronounced than known before. Our experiments confirm the added value of crossover and show that the observed advantages are even larger than what our proofs can guarantee.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Doerr, Benjamin and Qu, Zhongdi}, year={2023}, month={Jun.}, pages={12399-12407} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26461/26233", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26461", + "pdf_size": 246360, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13379272336896136279&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "lix.polytechnique.fr;gmail.com", + "email": "lix.polytechnique.fr;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ecole Polytechnique", + "aff_unique_dep": "Laboratoire d\u2019Informatique (LIX)", + "aff_unique_url": "https://www.polytechnique.edu", + "aff_unique_abbr": "Polytechnique", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Palaiseau", + "aff_country_unique_index": "0;0", + "aff_country_unique": "France" + }, + { + "id": "article-25550", + "title": "SAH: Shifting-Aware Asymmetric Hashing for Reverse k Maximum Inner Product Search", + "track": "main", + "status": "Technical", + "abstract": "This paper investigates a new yet challenging problem called Reverse k-Maximum Inner Product Search (RkMIPS). Given a query (item) vector, a set of item vectors, and a set of user vectors, the problem of RkMIPS aims to find a set of user vectors whose inner products with the query vector are one of the k largest among the query and item vectors. We propose the first subquadratic-time algorithm, i.e., Shifting-aware Asymmetric Hashing (SAH), to tackle the RkMIPS problem. To speed up the Maximum Inner Product Search (MIPS) on item vectors, we design a shifting-invariant asymmetric transformation and develop a novel sublinear-time Shifting-Aware Asymmetric Locality Sensitive Hashing (SA-ALSH) scheme. Furthermore, we devise a new blocking strategy based on the Cone-Tree to effectively prune user vectors (in a batch). We prove that SAH achieves a theoretical guarantee for solving the RMIPS problem. Experimental results on five real-world datasets show that SAH runs 4~8x faster than the state-of-the-art methods for RkMIPS while achieving F1-scores of over 90%. The code is available at https://github.com/HuangQiang/SAH.", + "primary_area": "data mining and knowledge management", + "author": "Qiang Huang; Yanhao Wang; Anthony K. H. Tung", + "authorids": "", + "aff": "School of Computing, National University of Singapore, Singapore; School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Computing, National University of Singapore, Singapore", + "bibtex": "@article{Huang_Wang_Tung_2023, title={SAH: Shifting-Aware Asymmetric Hashing for Reverse k Maximum Inner Product Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25550}, DOI={10.1609/aaai.v37i4.25550}, abstractNote={This paper investigates a new yet challenging problem called Reverse k-Maximum Inner Product Search (RkMIPS). Given a query (item) vector, a set of item vectors, and a set of user vectors, the problem of RkMIPS aims to find a set of user vectors whose inner products with the query vector are one of the k largest among the query and item vectors. We propose the first subquadratic-time algorithm, i.e., Shifting-aware Asymmetric Hashing (SAH), to tackle the RkMIPS problem. To speed up the Maximum Inner Product Search (MIPS) on item vectors, we design a shifting-invariant asymmetric transformation and develop a novel sublinear-time Shifting-Aware Asymmetric Locality Sensitive Hashing (SA-ALSH) scheme. Furthermore, we devise a new blocking strategy based on the Cone-Tree to effectively prune user vectors (in a batch). We prove that SAH achieves a theoretical guarantee for solving the RMIPS problem. Experimental results on five real-world datasets show that SAH runs 4~8x faster than the state-of-the-art methods for RkMIPS while achieving F1-scores of over 90%. The code is available at https://github.com/HuangQiang/SAH.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Qiang and Wang, Yanhao and Tung, Anthony K. H.}, year={2023}, month={Jun.}, pages={4312-4321} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25550/25322", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25550", + "pdf_size": 352336, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16620264268074847275&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff_domain": "comp.nus.edu.sg;dase.ecnu.edu.cn;comp.nus.edu.sg", + "email": "comp.nus.edu.sg;dase.ecnu.edu.cn;comp.nus.edu.sg", + "github": "https://github.com/HuangQiang/SAH", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "National University of Singapore;East China Normal University", + "aff_unique_dep": "School of Computing;School of Data Science and Engineering", + "aff_unique_url": "https://www.nus.edu.sg;http://www.ecnu.edu.cn", + "aff_unique_abbr": "NUS;ECNU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26660", + "title": "SARAS-Net: Scale and Relation Aware Siamese Network for Change Detection", + "track": "aaai special track", + "status": "Technical", + "abstract": "Change detection (CD) aims to find the difference between two images at different times and output a change map to represent whether the region has changed or not. To achieve a better result in generating the change map, many State-of-The-Art (SoTA) methods design a deep learning model that has a powerful discriminative ability. However, these methods still get lower performance because they ignore spatial information and scaling changes between objects, giving rise to blurry boundaries. In addition to these, they also neglect the interactive information of two different images. To alleviate these problems, we propose our network, the Scale and Relation-Aware Siamese Network (SARAS-Net) to deal with this issue. In this paper, three modules are proposed that include relation-aware, scale-aware, and cross-transformer to tackle the problem of scene change detection more effectively. To verify our model, we tested three public datasets, including LEVIR-CD, WHU-CD, and DSFIN, and obtained SoTA accuracy. Our code is available at https://github.com/f64051041/SARAS-Net.", + "primary_area": "ai for social impact", + "author": "Chao-Peng Chen; Jun-Wei Hsieh; Ping-Yang Chen; YI-Kuan Hsieh; Bor-Shiun Wang", + "authorids": "", + "aff": "College of Artificial Intelligence and Green Energy, National Yang Ming Chiao Tung University, Taiwan; College of Artificial Intelligence and Green Energy, National Yang Ming Chiao Tung University, Taiwan; Department of Computer Science, National Yang Ming Chiao Tung University Taiwan; College of Artificial Intelligence and Green Energy, National Yang Ming Chiao Tung University, Taiwan; Department of Computer Science, National Yang Ming Chiao Tung University Taiwan", + "bibtex": "@article{Chen_Hsieh_Chen_Hsieh_Wang_2023, title={SARAS-Net: Scale and Relation Aware Siamese Network for Change Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26660}, DOI={10.1609/aaai.v37i12.26660}, abstractNote={Change detection (CD) aims to find the difference between two images at different times and output a change map to represent whether the region has changed or not. To achieve a better result in generating the change map, many State-of-The-Art (SoTA) methods design a deep learning model that has a powerful discriminative ability. However, these methods still get lower performance because they ignore spatial information and scaling changes between objects, giving rise to blurry boundaries. In addition to these, they also neglect the interactive information of two different images. To alleviate these problems, we propose our network, the Scale and Relation-Aware Siamese Network (SARAS-Net) to deal with this issue. In this paper, three modules are proposed that include relation-aware, scale-aware, and cross-transformer to tackle the problem of scene change detection more effectively. To verify our model, we tested three public datasets, including LEVIR-CD, WHU-CD, and DSFIN, and obtained SoTA accuracy. Our code is available at https://github.com/f64051041/SARAS-Net.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Chao-Peng and Hsieh, Jun-Wei and Chen, Ping-Yang and Hsieh, YI-Kuan and Wang, Bor-Shiun}, year={2023}, month={Jun.}, pages={14187-14195} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26660/26432", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26660", + "pdf_size": 4616647, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15516425302919941803&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw", + "email": "gmail.com;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw;nycu.edu.tw", + "github": "https://github.com/f64051041/SARAS-Net", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "National Yang Ming Chiao Tung University", + "aff_unique_dep": "College of Artificial Intelligence and Green Energy", + "aff_unique_url": "https://www.nycu.edu.tw", + "aff_unique_abbr": "NYCU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25602", + "title": "SCI: A Spectrum Concentrated Implicit Neural Compression for Biomedical Data", + "track": "main", + "status": "Technical", + "abstract": "Massive collection and explosive growth of biomedical data, demands effective compression for efficient storage, transmission and sharing. Readily available visual data compression techniques have been studied extensively but tailored for natural images/videos, and thus show limited performance on biomedical data which are of different features and larger diversity. Emerging implicit neural representation (INR) is gaining momentum and demonstrates high promise for fitting diverse visual data in target-data-specific manner, but a general compression scheme covering diverse biomedical data is so far absent. To address this issue, we firstly derive a mathematical explanation for INR's spectrum concentration property and an analytical insight on the design of INR based compressor. Further, we propose a Spectrum Concentrated Implicit neural compression (SCI) which adaptively partitions the complex biomedical data into blocks matching INR's concentrated spectrum envelop, and design a funnel shaped neural network capable of representing each block with a small number of parameters. Based on this design, we conduct compression via optimization under given budget and allocate the available parameters with high representation accuracy. The experiments show SCI's superior performance to state-of-the-art methods including commercial compressors, data-driven ones, and INR based counterparts on diverse biomedical data. The source code can be found at https://github.com/RichealYoung/ImplicitNeuralCompression.git.", + "primary_area": "data mining and knowledge management", + "author": "Runzhao Yang; Tingxiong Xiao; Yuxiao Cheng; Qianni Cao; Jinyuan Qu; Jinli Suo; Qionghai Dai", + "authorids": "", + "aff": "Department of Automation, Tsinghua University; Department of Automation, Tsinghua University; Department of Automation, Tsinghua University; Department of Electrical Engineering, Tsinghua University; Department of Automation, Tsinghua University; Department of Automation, Tsinghua University; Department of Automation, Tsinghua University", + "bibtex": "@article{Yang_Xiao_Cheng_Cao_Qu_Suo_Dai_2023, title={SCI: A Spectrum Concentrated Implicit Neural Compression for Biomedical Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25602}, DOI={10.1609/aaai.v37i4.25602}, abstractNote={Massive collection and explosive growth of biomedical data, demands effective compression for efficient storage, transmission and sharing. Readily available visual data compression techniques have been studied extensively but tailored for natural images/videos, and thus show limited performance on biomedical data which are of different features and larger diversity. Emerging implicit neural representation (INR) is gaining momentum and demonstrates high promise for fitting diverse visual data in target-data-specific manner, but a general compression scheme covering diverse biomedical data is so far absent. To address this issue, we firstly derive a mathematical explanation for INR\u2019s spectrum concentration property and an analytical insight on the design of INR based compressor. Further, we propose a Spectrum Concentrated Implicit neural compression (SCI) which adaptively partitions the complex biomedical data into blocks matching INR\u2019s concentrated spectrum envelop, and design a funnel shaped neural network capable of representing each block with a small number of parameters. Based on this design, we conduct compression via optimization under given budget and allocate the available parameters with high representation accuracy. The experiments show SCI\u2019s superior performance to state-of-the-art methods including commercial compressors, data-driven ones, and INR based counterparts on diverse biomedical data. The source code can be found at https://github.com/RichealYoung/ImplicitNeuralCompression.git.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Runzhao and Xiao, Tingxiong and Cheng, Yuxiao and Cao, Qianni and Qu, Jinyuan and Suo, Jinli and Dai, Qionghai}, year={2023}, month={Jun.}, pages={4774-4782} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25602/25374", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25602", + "pdf_size": 808657, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15678515637876312646&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "tsinghua.edu.cn; ; ; ; ;tsinghua.edu.cn; ", + "email": "tsinghua.edu.cn; ; ; ; ;tsinghua.edu.cn; ", + "github": "https://github.com/RichealYoung/ImplicitNeuralCompression.git", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "Department of Automation", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26517", + "title": "SEAT: Stable and Explainable Attention", + "track": "main", + "status": "Technical", + "abstract": "Attention mechanism has become a standard fixture in many state-of-the-art natural language processing (NLP) models, not only due to its outstanding performance, but also because it provides plausible innate explanations for neural architectures. However, recent studies show that attention is unstable against randomness and perturbations during training or testing, such as random seeds and slight perturbation of embeddings, which impedes it from being a faithful explanation tool. Thus, a natural question is whether we can find an alternative to vanilla attention, which is more stable and could keep the key characteristics of the explanation. In this paper, we provide a rigorous definition of such an attention method named SEAT (Stable and Explainable ATtention). Specifically, SEAT has the following three properties: (1) Its prediction distribution is close to the prediction of the vanilla attention; (2) Its top-k indices largely overlap with those of the vanilla attention; (3) It is robust w.r.t perturbations, i.e., any slight perturbation on SEAT will not change the attention and prediction distribution too much, which implicitly indicates that it is stable to randomness and perturbations. Furthermore, we propose an optimization method for obtaining SEAT, which could be considered as revising the vanilla attention. Finally, through intensive experiments on various datasets, we compare our SEAT with other baseline methods using RNN, BiLSTM and BERT architectures, with different evaluation metrics on model interpretation, stability and accuracy. Results show that, besides preserving the original explainability and model performance, SEAT is more stable against input perturbations and training randomness, which indicates it is a more faithful explanation.", + "primary_area": "speech natural language processing", + "author": "Lijie Hu; Yixin Liu; Ninghao Liu; Mengdi Huai; Lichao Sun; Di Wang", + "authorids": "", + "aff": "King Abdullah University of Science and Technology; Lehigh University; University of Georgia; Iowa State University; Lehigh University; King Abdullah University of Science and Technology+Computational Bioscience Research Center+SDAIA-KAUST Center of Excellence in Data Science and Artificial Intelligence", + "bibtex": "@article{Hu_Liu_Liu_Huai_Sun_Wang_2023, title={SEAT: Stable and Explainable Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26517}, DOI={10.1609/aaai.v37i11.26517}, abstractNote={Attention mechanism has become a standard fixture in many state-of-the-art natural language processing (NLP) models, not only due to its outstanding performance, but also because it provides plausible innate explanations for neural architectures. However, recent studies show that attention is unstable against randomness and perturbations during training or testing, such as random seeds and slight perturbation of embeddings, which impedes it from being a faithful explanation tool. Thus, a natural question is whether we can find an alternative to vanilla attention, which is more stable and could keep the key characteristics of the explanation. In this paper, we provide a rigorous definition of such an attention method named SEAT (Stable and Explainable ATtention). Specifically, SEAT has the following three properties: (1) Its prediction distribution is close to the prediction of the vanilla attention; (2) Its top-k indices largely overlap with those of the vanilla attention; (3) It is robust w.r.t perturbations, i.e., any slight perturbation on SEAT will not change the attention and prediction distribution too much, which implicitly indicates that it is stable to randomness and perturbations. Furthermore, we propose an optimization method for obtaining SEAT, which could be considered as revising the vanilla attention. Finally, through intensive experiments on various datasets, we compare our SEAT with other baseline methods using RNN, BiLSTM and BERT architectures, with different evaluation metrics on model interpretation, stability and accuracy. Results show that, besides preserving the original explainability and model performance, SEAT is more stable against input perturbations and training randomness, which indicates it is a more faithful explanation.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Lijie and Liu, Yixin and Liu, Ninghao and Huai, Mengdi and Sun, Lichao and Wang, Di}, year={2023}, month={Jun.}, pages={12907-12915} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26517/26289", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26517", + "pdf_size": 4991525, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16501735010820497072&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "kaust.edu.sa;lehigh.edu;uga.edu;iastate.edu;lehigh.edu;kaust.edu.sa", + "email": "kaust.edu.sa;lehigh.edu;uga.edu;iastate.edu;lehigh.edu;kaust.edu.sa", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;1;0+4+0", + "aff_unique_norm": "King Abdullah University of Science and Technology;Lehigh University;University of Georgia;Iowa State University;Computational Bioscience Research Center", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.kast.kau.edu.sa;https://www.lehigh.edu;https://www.uga.edu;https://www.iastate.edu;", + "aff_unique_abbr": "KAUST;Lehigh;UGA;ISU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;0+0", + "aff_country_unique": "Saudi Arabia;United States;" + }, + { + "id": "article-25139", + "title": "SEFormer: Structure Embedding Transformer for 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Effectively preserving and encoding structure features from objects in irregular and sparse LiDAR points is a crucial challenge to 3D object detection on the point cloud. Recently, Transformer has demonstrated promising performance on many 2D and even 3D vision tasks. Compared with the fixed and rigid convolution kernels, the self-attention mechanism in Transformer can adaptively exclude the unrelated or noisy points and is thus suitable for preserving the local spatial structure in the irregular LiDAR point cloud. However, Transformer only performs a simple sum on the point features, based on the self-attention mechanism, and all the points share the same transformation for value. A such isotropic operation cannot capture the direction-distance-oriented local structure, which is essential for 3D object detection. In this work, we propose a Structure-Embedding transFormer (SEFormer), which can not only preserve the local structure as a traditional Transformer but also have the ability to encode the local structure. Compared to the self-attention mechanism in traditional Transformer, SEFormer learns different feature transformations for value points based on the relative directions and distances to the query point. Then we propose a SEFormer-based network for high-performance 3D object detection. Extensive experiments show that the proposed architecture can achieve SOTA results on the Waymo Open Dataset, one of the most significant 3D detection benchmarks for autonomous driving. Specifically, SEFormer achieves 79.02% mAP, which is 1.2% higher than existing works. https://github.com/tdzdog/SEFormer.", + "primary_area": "computer vision i", + "author": "Xiaoyu Feng; Heming Du; Hehe Fan; Yueqi Duan; Yongpan Liu", + "authorids": "", + "aff": "Tsinghua University; Australian National University; National University of Singapore; Tsinghua University; Tsinghua University", + "bibtex": "@article{Feng_Du_Fan_Duan_Liu_2023, title={SEFormer: Structure Embedding Transformer for 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25139}, DOI={10.1609/aaai.v37i1.25139}, abstractNote={Effectively preserving and encoding structure features from objects in irregular and sparse LiDAR points is a crucial challenge to 3D object detection on the point cloud. Recently, Transformer has demonstrated promising performance on many 2D and even 3D vision tasks. Compared with the fixed and rigid convolution kernels, the self-attention mechanism in Transformer can adaptively exclude the unrelated or noisy points and is thus suitable for preserving the local spatial structure in the irregular LiDAR point cloud. However, Transformer only performs a simple sum on the point features, based on the self-attention mechanism, and all the points share the same transformation for value. A such isotropic operation cannot capture the direction-distance-oriented local structure, which is essential for 3D object detection. In this work, we propose a Structure-Embedding transFormer (SEFormer), which can not only preserve the local structure as a traditional Transformer but also have the ability to encode the local structure. Compared to the self-attention mechanism in traditional Transformer, SEFormer learns different feature transformations for value points based on the relative directions and distances to the query point. Then we propose a SEFormer-based network for high-performance 3D object detection. Extensive experiments show that the proposed architecture can achieve SOTA results on the Waymo Open Dataset, one of the most significant 3D detection benchmarks for autonomous driving. Specifically, SEFormer achieves 79.02% mAP, which is 1.2% higher than existing works. https://github.com/tdzdog/SEFormer.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feng, Xiaoyu and Du, Heming and Fan, Hehe and Duan, Yueqi and Liu, Yongpan}, year={2023}, month={Jun.}, pages={632-640} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25139/24911", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25139", + "pdf_size": 1066975, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2979151542326114290&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn; ; ; ; ", + "email": "mails.tsinghua.edu.cn; ; ; ; ", + "github": "https://github.com/tdzdog/SEFormer", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0", + "aff_unique_norm": "Tsinghua University;Australian National University;National University of Singapore", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.anu.edu.au;https://www.nus.edu.sg", + "aff_unique_abbr": "THU;ANU;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0;0", + "aff_country_unique": "China;Australia;Singapore" + }, + { + "id": "article-25249", + "title": "SEPT: Towards Scalable and Efficient Visual Pre-training", + "track": "main", + "status": "Technical", + "abstract": "Recently, the self-supervised pre-training paradigm has shown great potential in leveraging large-scale unlabeled data to improve downstream task performance. However, increasing the scale of unlabeled pre-training data in real-world scenarios requires prohibitive computational costs and faces the challenge of uncurated samples. To address these issues, we build a task-specific self-supervised pre-training framework from a data selection perspective based on a simple hypothesis that pre-training on the unlabeled samples with similar distribution to the target task can bring substantial performance gains. Buttressed by the hypothesis, we propose the first yet novel framework for Scalable and Efficient visual Pre-Training (SEPT) by introducing a retrieval pipeline for data selection. SEPT first leverage a self-supervised pre-trained model to extract the features of the entire unlabeled dataset for retrieval pipeline initialization. Then, for a specific target task, SEPT retrievals the most similar samples from the unlabeled dataset based on feature similarity for each target instance for pre-training. Finally, SEPT pre-trains the target model with the selected unlabeled samples in a self-supervised manner for target data finetuning. By decoupling the scale of pre-training and available upstream data for a target task, SEPT achieves high scalability of the upstream dataset and high efficiency of pre-training, resulting in high model architecture flexibility. Results on various downstream tasks demonstrate that SEPT can achieve competitive or even better performance compared with ImageNet pre-training while reducing the size of training samples by one magnitude without resorting to any extra annotations.", + "primary_area": "computer vision ii", + "author": "Yiqi Lin; Huabin Zheng; Huaping Zhong; Jinjing Zhu; Weijia Li; Conghui He; Lin Wang", + "authorids": "", + "aff": "AI Thrust, Information Hub, HKUST (Guangzhou), Guangzhou, China; SenseTime Research; SenseTime Research; AI Thrust, Information Hub, HKUST (Guangzhou), Guangzhou, China; Sun Yat-Sen University + SenseTime Research; SenseTime Research; Department of Computer Science and Engineering, HKUST, Hong Kong, China", + "bibtex": "@article{Lin_Zheng_Zhong_Zhu_Li_He_Wang_2023, title={SEPT: Towards Scalable and Efficient Visual Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25249}, DOI={10.1609/aaai.v37i2.25249}, abstractNote={Recently, the self-supervised pre-training paradigm has shown great potential in leveraging large-scale unlabeled data to improve downstream task performance. However, increasing the scale of unlabeled pre-training data in real-world scenarios requires prohibitive computational costs and faces the challenge of uncurated samples. To address these issues, we build a task-specific self-supervised pre-training framework from a data selection perspective based on a simple hypothesis that pre-training on the unlabeled samples with similar distribution to the target task can bring substantial performance gains. Buttressed by the hypothesis, we propose the first yet novel framework for Scalable and Efficient visual Pre-Training (SEPT) by introducing a retrieval pipeline for data selection. SEPT first leverage a self-supervised pre-trained model to extract the features of the entire unlabeled dataset for retrieval pipeline initialization. Then, for a specific target task, SEPT retrievals the most similar samples from the unlabeled dataset based on feature similarity for each target instance for pre-training. Finally, SEPT pre-trains the target model with the selected unlabeled samples in a self-supervised manner for target data finetuning. By decoupling the scale of pre-training and available upstream data for a target task, SEPT achieves high scalability of the upstream dataset and high efficiency of pre-training, resulting in high model architecture flexibility. Results on various downstream tasks demonstrate that SEPT can achieve competitive or even better performance compared with ImageNet pre-training while reducing the size of training samples by one magnitude without resorting to any extra annotations.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Yiqi and Zheng, Huabin and Zhong, Huaping and Zhu, Jinjing and Li, Weijia and He, Conghui and Wang, Lin}, year={2023}, month={Jun.}, pages={1622-1630} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25249/25021", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25249", + "pdf_size": 885623, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15937504545436073620&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "connect.hkust-gz.edu.cn;sensetime.com;sensetime.com;connect.hkust-gz.edu.cn;mail.sysu.edu.cn;sensetime.com;ust.hk", + "email": "connect.hkust-gz.edu.cn;sensetime.com;sensetime.com;connect.hkust-gz.edu.cn;mail.sysu.edu.cn;sensetime.com;ust.hk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;0;2+1;1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;SenseTime;Sun Yat-Sen University", + "aff_unique_dep": "AI Thrust, Information Hub;SenseTime Research;", + "aff_unique_url": "https://www.ust.hk;https://www.sensetime.com;http://www.sysu.edu.cn/", + "aff_unique_abbr": "HKUST;SenseTime;SYSU", + "aff_campus_unique_index": "0;0;;2", + "aff_campus_unique": "Guangzhou;;Hong Kong", + "aff_country_unique_index": "0;0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26221", + "title": "SEnsor Alignment for Multivariate Time-Series Unsupervised Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised Domain Adaptation (UDA) methods can reduce label dependency by mitigating the feature discrepancy between labeled samples in a source domain and unlabeled samples in a similar yet shifted target domain. Though achieving good performance, these methods are inapplicable for Multivariate Time-Series (MTS) data. MTS data are collected from multiple sensors, each of which follows various distributions. However, most UDA methods solely focus on aligning global features but cannot consider the distinct distributions of each sensor. To cope with such concerns, a practical domain adaptation scenario is formulated as Multivariate Time-Series Unsupervised Domain Adaptation (MTS-UDA). In this paper, we propose SEnsor Alignment (SEA) for MTS-UDA to reduce the domain discrepancy at both the local and global sensor levels. At the local sensor level, we design the endo-feature alignment to align sensor features and their correlations across domains, whose information represents the features of each sensor and the interactions between sensors. Further, to reduce domain discrepancy at the global sensor level, we design the exo-feature alignment to enforce restrictions on the global sensor features. Meanwhile, MTS also incorporates the essential spatial-temporal dependencies information between sensors, which cannot be transferred by existing UDA methods. Therefore, we model the spatial-temporal information of MTS with a multi-branch self-attention mechanism for simple and effective transfer across domains. Empirical results demonstrate the state-of-the-art performance of our proposed SEA on two public MTS datasets for MTS-UDA. The code is available at\n https://github.com/Frank-Wang-oss/SEA", + "primary_area": "machine learning iii", + "author": "Yucheng Wang; Yuecong Xu; Jianfei Yang; Zhenghua Chen; Min Wu; Xiaoli Li; Lihua Xie", + "authorids": "", + "aff": "Nanyang Technological University, Singapore+ Institute for Infocomm Research, A*STAR, Singapore; Institute for Infocomm Research, A*STAR, Singapore; Nanyang Technological University, Singapore; Institute for Infocomm Research, A*STAR, Singapore+Centre for Frontier AI Research, A*STAR, Singapore; Institute for Infocomm Research, A*STAR, Singapore; Nanyang Technological University, Singapore+Institute for Infocomm Research, A*STAR, Singapore+Centre for Frontier AI Research, A*STAR, Singapore; Nanyang Technological University, Singapore", + "bibtex": "@article{Wang_Xu_Yang_Chen_Wu_Li_Xie_2023, title={SEnsor Alignment for Multivariate Time-Series Unsupervised Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26221}, DOI={10.1609/aaai.v37i8.26221}, abstractNote={Unsupervised Domain Adaptation (UDA) methods can reduce label dependency by mitigating the feature discrepancy between labeled samples in a source domain and unlabeled samples in a similar yet shifted target domain. Though achieving good performance, these methods are inapplicable for Multivariate Time-Series (MTS) data. MTS data are collected from multiple sensors, each of which follows various distributions. However, most UDA methods solely focus on aligning global features but cannot consider the distinct distributions of each sensor. To cope with such concerns, a practical domain adaptation scenario is formulated as Multivariate Time-Series Unsupervised Domain Adaptation (MTS-UDA). In this paper, we propose SEnsor Alignment (SEA) for MTS-UDA to reduce the domain discrepancy at both the local and global sensor levels. At the local sensor level, we design the endo-feature alignment to align sensor features and their correlations across domains, whose information represents the features of each sensor and the interactions between sensors. Further, to reduce domain discrepancy at the global sensor level, we design the exo-feature alignment to enforce restrictions on the global sensor features. Meanwhile, MTS also incorporates the essential spatial-temporal dependencies information between sensors, which cannot be transferred by existing UDA methods. Therefore, we model the spatial-temporal information of MTS with a multi-branch self-attention mechanism for simple and effective transfer across domains. Empirical results demonstrate the state-of-the-art performance of our proposed SEA on two public MTS datasets for MTS-UDA. The code is available at https://github.com/Frank-Wang-oss/SEA}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yucheng and Xu, Yuecong and Yang, Jianfei and Chen, Zhenghua and Wu, Min and Li, Xiaoli and Xie, Lihua}, year={2023}, month={Jun.}, pages={10253-10261} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26221/25993", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26221", + "pdf_size": 392232, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8906211212751874504&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff_domain": "e.ntu.edu.sg;e.ntu.edu.sg;e.ntu.edu.sg;e.ntu.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg;ntu.edu.sg", + "email": "e.ntu.edu.sg;e.ntu.edu.sg;e.ntu.edu.sg;e.ntu.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg;ntu.edu.sg", + "github": "https://github.com/Frank-Wang-oss/SEA", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;0;1+2;1;0+1+2;0", + "aff_unique_norm": "Nanyang Technological University;Institute for Infocomm Research;A*STAR", + "aff_unique_dep": ";;Centre for Frontier AI Research", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.i2r.a-star.edu.sg;https://www.a-star.edu.sg", + "aff_unique_abbr": "NTU;I2R;A*STAR", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0;0;0+0+0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25324", + "title": "SHUNIT: Style Harmonization for Unpaired Image-to-Image Translation", + "track": "main", + "status": "Technical", + "abstract": "We propose a novel solution for unpaired image-to-image (I2I) translation. To translate complex images with a wide range of objects to a different domain, recent approaches often use the object annotations to perform per-class source-to-target style mapping. However, there remains a point for us to exploit in the I2I. An object in each class consists of multiple components, and all the sub-object components have different characteristics. For example, a car in CAR class consists of a car body, tires, windows and head and tail lamps, etc., and they should be handled separately for realistic I2I translation. The simplest solution to the problem will be to use more detailed annotations with sub-object component annotations than the simple object annotations, but it is not possible. The key idea of this paper is to bypass the sub-object component annotations by leveraging the original style of the input image because the original style will include the information about the characteristics of the sub-object components. Specifically, for each pixel, we use not only the per-class style gap between the source and target domains but also the pixel\u2019s original style to determine the target style of a pixel. To this end, we present Style Harmonization for unpaired I2I translation (SHUNIT). Our SHUNIT generates a new style by harmonizing the target domain style retrieved from a class memory and an original source image style. Instead of direct source-to-target style mapping, we aim for source and target styles harmonization. We validate our method with extensive experiments and achieve state-of-the-art performance on the latest benchmark sets. The source code is available online: https://github.com/bluejangbaljang/SHUNIT.", + "primary_area": "computer vision ii", + "author": "Seokbeom Song; Suhyeon Lee; Hongje Seong; Kyoungwon Min; Euntai Kim", + "authorids": "", + "aff": "Yonsei University; Yonsei University; Yonsei University; Korea Electronics Technology Institute; Yonsei University", + "bibtex": "@article{Song_Lee_Seong_Min_Kim_2023, title={SHUNIT: Style Harmonization for Unpaired Image-to-Image Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25324}, DOI={10.1609/aaai.v37i2.25324}, abstractNote={We propose a novel solution for unpaired image-to-image (I2I) translation. To translate complex images with a wide range of objects to a different domain, recent approaches often use the object annotations to perform per-class source-to-target style mapping. However, there remains a point for us to exploit in the I2I. An object in each class consists of multiple components, and all the sub-object components have different characteristics. For example, a car in CAR class consists of a car body, tires, windows and head and tail lamps, etc., and they should be handled separately for realistic I2I translation. The simplest solution to the problem will be to use more detailed annotations with sub-object component annotations than the simple object annotations, but it is not possible. The key idea of this paper is to bypass the sub-object component annotations by leveraging the original style of the input image because the original style will include the information about the characteristics of the sub-object components. Specifically, for each pixel, we use not only the per-class style gap between the source and target domains but also the pixel\u2019s original style to determine the target style of a pixel. To this end, we present Style Harmonization for unpaired I2I translation (SHUNIT). Our SHUNIT generates a new style by harmonizing the target domain style retrieved from a class memory and an original source image style. Instead of direct source-to-target style mapping, we aim for source and target styles harmonization. We validate our method with extensive experiments and achieve state-of-the-art performance on the latest benchmark sets. The source code is available online: https://github.com/bluejangbaljang/SHUNIT.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Seokbeom and Lee, Suhyeon and Seong, Hongje and Min, Kyoungwon and Kim, Euntai}, year={2023}, month={Jun.}, pages={2292-2302} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25324/25096", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25324", + "pdf_size": 1299987, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3888339952458141270&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;keti.re.kr;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;keti.re.kr;yonsei.ac.kr", + "github": "https://github.com/bluejangbaljang/SHUNIT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Yonsei University;Korea Electronics Technology Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.yonsei.ac.kr;https://www.keti.re.kr", + "aff_unique_abbr": "Yonsei;KETI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25902", + "title": "SKDBERT: Compressing BERT via Stochastic Knowledge Distillation", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we propose Stochastic Knowledge Distillation (SKD) to obtain compact BERT-style language model dubbed SKDBERT. In each distillation iteration, SKD samples a teacher model from a pre-defined teacher team, which consists of multiple teacher models with multi-level capacities, to transfer knowledge into student model in an one-to-one manner. Sampling distribution plays an important role in SKD. We heuristically present three types of sampling distributions to assign appropriate probabilities for multi-level teacher models. SKD has two advantages: 1) it can preserve the diversities of multi-level teacher models via stochastically sampling single teacher model in each distillation iteration, and 2) it can also improve the efficacy of knowledge distillation via multi-level teacher models when large capacity gap exists between the teacher model and the student model. Experimental results on GLUE benchmark show that SKDBERT reduces the size of a BERT model by 40% while retaining 99.5% performances of language understanding and being 100% faster.", + "primary_area": "machine learning i", + "author": "Zixiang Ding; Guoqing Jiang; Shuai Zhang; Lin Guo; Wei Lin", + "authorids": "", + "aff": "Meituan; Meituan; Meituan; Meituan; Individual", + "bibtex": "@article{Ding_Jiang_Zhang_Guo_Lin_2023, title={SKDBERT: Compressing BERT via Stochastic Knowledge Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25902}, DOI={10.1609/aaai.v37i6.25902}, abstractNote={In this paper, we propose Stochastic Knowledge Distillation (SKD) to obtain compact BERT-style language model dubbed SKDBERT. In each distillation iteration, SKD samples a teacher model from a pre-defined teacher team, which consists of multiple teacher models with multi-level capacities, to transfer knowledge into student model in an one-to-one manner. Sampling distribution plays an important role in SKD. We heuristically present three types of sampling distributions to assign appropriate probabilities for multi-level teacher models. SKD has two advantages: 1) it can preserve the diversities of multi-level teacher models via stochastically sampling single teacher model in each distillation iteration, and 2) it can also improve the efficacy of knowledge distillation via multi-level teacher models when large capacity gap exists between the teacher model and the student model. Experimental results on GLUE benchmark show that SKDBERT reduces the size of a BERT model by 40% while retaining 99.5% performances of language understanding and being 100% faster.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Zixiang and Jiang, Guoqing and Zhang, Shuai and Guo, Lin and Lin, Wei}, year={2023}, month={Jun.}, pages={7414-7422} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25902/25674", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25902", + "pdf_size": 690038, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15325672384753052536&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "meituan.com;meituan.com;meituan.com;meituan.com;163.com", + "email": "meituan.com;meituan.com;meituan.com;meituan.com;163.com", + "github": "", + "project": "https://arxiv.org/pdf/2211.14466.pdf", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Meituan;Individual", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.meituan.com;", + "aff_unique_abbr": "Meituan;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26541", + "title": "SKIER: A Symbolic Knowledge Integrated Model for Conversational Emotion Recognition", + "track": "main", + "status": "Technical", + "abstract": "Emotion recognition in conversation (ERC) has received increasing attention from the research community. However, the ERC task is challenging, largely due to the complex and unstructured properties of multi-party conversations. Besides, the majority of daily dialogues take place in a specific context or circumstance, which requires rich external knowledge to understand the background of a certain dialogue. In this paper, we address these challenges by explicitly modeling the discourse relations between utterances and incorporating symbolic knowledge into multi-party conversations. We first introduce a dialogue parsing algorithm into ERC and further improve the algorithm through a transfer learning method. Moreover, we leverage different symbolic knowledge graph relations to learn knowledge-enhanced features for the ERC task. Extensive experiments on three benchmarks demonstrate that both dialogue structure graphs and symbolic knowledge are beneficial to the model performance on the task. Additionally, experimental results indicate that the proposed model surpasses baseline models on several indices.", + "primary_area": "speech natural language processing", + "author": "Wei Li; Luyao Zhu; Rui Mao; Erik Cambria", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Li_Zhu_Mao_Cambria_2023, title={SKIER: A Symbolic Knowledge Integrated Model for Conversational Emotion Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26541}, DOI={10.1609/aaai.v37i11.26541}, abstractNote={Emotion recognition in conversation (ERC) has received increasing attention from the research community. However, the ERC task is challenging, largely due to the complex and unstructured properties of multi-party conversations. Besides, the majority of daily dialogues take place in a specific context or circumstance, which requires rich external knowledge to understand the background of a certain dialogue. In this paper, we address these challenges by explicitly modeling the discourse relations between utterances and incorporating symbolic knowledge into multi-party conversations. We first introduce a dialogue parsing algorithm into ERC and further improve the algorithm through a transfer learning method. Moreover, we leverage different symbolic knowledge graph relations to learn knowledge-enhanced features for the ERC task. Extensive experiments on three benchmarks demonstrate that both dialogue structure graphs and symbolic knowledge are beneficial to the model performance on the task. Additionally, experimental results indicate that the proposed model surpasses baseline models on several indices.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Wei and Zhu, Luyao and Mao, Rui and Cambria, Erik}, year={2023}, month={Jun.}, pages={13121-13129} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26541/26313", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26541", + "pdf_size": 352223, + "gs_citation": 74, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4605161344557024141&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 13, + "aff_domain": "e.ntu.edu.sg;e.ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "email": "e.ntu.edu.sg;e.ntu.edu.sg;ntu.edu.sg;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanyang Technological University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.ntu.edu.sg", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Singapore", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26175", + "title": "SLIQ: Quantum Image Similarity Networks on Noisy Quantum Computers", + "track": "main", + "status": "Technical", + "abstract": "Exploration into quantum machine learning has grown\ntremendously in recent years due to the ability of quantum\ncomputers to speed up classical programs. However, these ef-\nforts have yet to solve unsupervised similarity detection tasks\ndue to the challenge of porting them to run on quantum com-\nputers. To overcome this challenge, we propose SLIQ, the\nfirst open-sourced work for resource-efficient quantum sim-\nilarity detection networks, built with practical and effective\nquantum learning and variance-reducing algorithms.", + "primary_area": "machine learning iii", + "author": "Daniel Silver; Tirthak Patel; Aditya Ranjan; Harshitta Gandhi; William Cutler; Devesh Tiwari", + "authorids": "", + "aff": "Northeastern University; Northeastern University; Northeastern University; Northeastern University; Northeastern University; Northeastern University", + "bibtex": "@article{Silver_Patel_Ranjan_Gandhi_Cutler_Tiwari_2023, title={SLIQ: Quantum Image Similarity Networks on Noisy Quantum Computers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26175}, DOI={10.1609/aaai.v37i8.26175}, abstractNote={Exploration into quantum machine learning has grown\ntremendously in recent years due to the ability of quantum\ncomputers to speed up classical programs. However, these ef-\nforts have yet to solve unsupervised similarity detection tasks\ndue to the challenge of porting them to run on quantum com-\nputers. To overcome this challenge, we propose SLIQ, the\nfirst open-sourced work for resource-efficient quantum sim-\nilarity detection networks, built with practical and effective\nquantum learning and variance-reducing algorithms.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Silver, Daniel and Patel, Tirthak and Ranjan, Aditya and Gandhi, Harshitta and Cutler, William and Tiwari, Devesh}, year={2023}, month={Jun.}, pages={9846-9854} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26175/25947", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26175", + "pdf_size": 1737779, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16095645544668696066&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu", + "email": "northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu", + "github": "https://github.com/SilverEngineered/SliQ", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Northeastern University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.northeastern.edu", + "aff_unique_abbr": "NEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26350", + "title": "SLOTH: Structured Learning and Task-Based Optimization for Time Series Forecasting on Hierarchies", + "track": "main", + "status": "Technical", + "abstract": "Multivariate time series forecasting with hierarchical structure\nis widely used in real-world applications, e.g., sales\npredictions for the geographical hierarchy formed by cities,\nstates, and countries. The hierarchical time series (HTS) forecasting\nincludes two sub-tasks, i.e., forecasting and reconciliation.\nIn the previous works, hierarchical information is only\nintegrated in the reconciliation step to maintain coherency,\nbut not in forecasting step for accuracy improvement. In this\npaper, we propose two novel tree-based feature integration\nmechanisms, i.e., top-down convolution and bottom-up attention\nto leverage the information of the hierarchical structure\nto improve the forecasting performance. Moreover, unlike\nmost previous reconciliation methods which either rely\non strong assumptions or focus on coherent constraints only,\nwe utilize deep neural optimization networks, which not only\nachieve coherency without any assumptions, but also allow\nmore flexible and realistic constraints to achieve task-based\ntargets, e.g., lower under-estimation penalty and meaningful\ndecision-making loss to facilitate the subsequent downstream\ntasks. Experiments on real-world datasets demonstrate that\nour tree-based feature integration mechanism achieves superior\nperformances on hierarchical forecasting tasks compared\nto the state-of-the-art methods, and our neural optimization\nnetworks can be applied to real-world tasks effectively without\nany additional effort under coherence and task-based constraints.", + "primary_area": "machine learning iv", + "author": "Fan Zhou; Chen Pan; Lintao Ma; Yu Liu; Shiyu Wang; James Zhang; Xinxin Zhu; Xuanwei Hu; Yunhua Hu; Yangfei Zheng; Lei Lei; Hu Yun", + "authorids": "", + "aff": "Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group + Alibaba Inc", + "bibtex": "@article{Zhou_Pan_Ma_Liu_Wang_Zhang_Zhu_Hu_Hu_Zheng_Lei_Yun_2023, title={SLOTH: Structured Learning and Task-Based Optimization for Time Series Forecasting on Hierarchies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26350}, DOI={10.1609/aaai.v37i9.26350}, abstractNote={Multivariate time series forecasting with hierarchical structure\nis widely used in real-world applications, e.g., sales\npredictions for the geographical hierarchy formed by cities,\nstates, and countries. The hierarchical time series (HTS) forecasting\nincludes two sub-tasks, i.e., forecasting and reconciliation.\nIn the previous works, hierarchical information is only\nintegrated in the reconciliation step to maintain coherency,\nbut not in forecasting step for accuracy improvement. In this\npaper, we propose two novel tree-based feature integration\nmechanisms, i.e., top-down convolution and bottom-up attention\nto leverage the information of the hierarchical structure\nto improve the forecasting performance. Moreover, unlike\nmost previous reconciliation methods which either rely\non strong assumptions or focus on coherent constraints only,\nwe utilize deep neural optimization networks, which not only\nachieve coherency without any assumptions, but also allow\nmore flexible and realistic constraints to achieve task-based\ntargets, e.g., lower under-estimation penalty and meaningful\ndecision-making loss to facilitate the subsequent downstream\ntasks. Experiments on real-world datasets demonstrate that\nour tree-based feature integration mechanism achieves superior\nperformances on hierarchical forecasting tasks compared\nto the state-of-the-art methods, and our neural optimization\nnetworks can be applied to real-world tasks effectively without\nany additional effort under coherence and task-based constraints.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Fan and Pan, Chen and Ma, Lintao and Liu, Yu and Wang, Shiyu and Zhang, James and Zhu, Xinxin and Hu, Xuanwei and Hu, Yunhua and Zheng, Yangfei and Lei, Lei and Yun, Hu}, year={2023}, month={Jun.}, pages={11417-11425} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26350/26122", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26350", + "pdf_size": 772268, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9366946703386079045&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;alibaba-inc.com", + "email": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 12, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0+1", + "aff_unique_norm": "Ant Group;Alibaba Group Holding Limited", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.antgroup.com;https://www.alibaba.com", + "aff_unique_abbr": "Ant Group;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25772", + "title": "SMT Safety Verification of Ontology-Based Processes", + "track": "main", + "status": "Technical", + "abstract": "In the context of verification of data-aware processes, a formal approach based on satisfiability modulo theories (SMT) has been considered to verify parameterised safety properties. This approach requires a combination of model-theoretic notions and algorithmic techniques based on backward reachability. We introduce here Ontology-Based Processes, which are a variant of one of the most investigated models in this spectrum, namely simple artifact systems (SASs), where, instead of managing a database, we operate over a description logic (DL) ontology. We prove that when the DL is expressed in (a slight extension of) RDFS, it enjoys suitable model-theoretic properties, and that by relying on such DL we can define Ontology-Based Processes to which backward reachability can still be applied. Relying on these results we are able to show that in this novel setting, verification of safety properties is decidable in PSPACE.", + "primary_area": "knowledge representation and reasoning", + "author": "Diego Calvanese; Alessandro Gianola; Andrea Mazzullo; Marco Montali", + "authorids": "", + "aff": "Faculty of Engineering, Free University of Bozen-Bolzano, Italy + Computing Science Department, Ume \u02daa University, Sweden; Faculty of Engineering, Free University of Bozen-Bolzano, Italy; Faculty of Engineering, Free University of Bozen-Bolzano, Italy; Faculty of Engineering, Free University of Bozen-Bolzano, Italy", + "bibtex": "@article{Calvanese_Gianola_Mazzullo_Montali_2023, title={SMT Safety Verification of Ontology-Based Processes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25772}, DOI={10.1609/aaai.v37i5.25772}, abstractNote={In the context of verification of data-aware processes, a formal approach based on satisfiability modulo theories (SMT) has been considered to verify parameterised safety properties. This approach requires a combination of model-theoretic notions and algorithmic techniques based on backward reachability. We introduce here Ontology-Based Processes, which are a variant of one of the most investigated models in this spectrum, namely simple artifact systems (SASs), where, instead of managing a database, we operate over a description logic (DL) ontology. We prove that when the DL is expressed in (a slight extension of) RDFS, it enjoys suitable model-theoretic properties, and that by relying on such DL we can define Ontology-Based Processes to which backward reachability can still be applied. Relying on these results we are able to show that in this novel setting, verification of safety properties is decidable in PSPACE.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Calvanese, Diego and Gianola, Alessandro and Mazzullo, Andrea and Montali, Marco}, year={2023}, month={Jun.}, pages={6271-6279} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25772/25544", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25772", + "pdf_size": 167337, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7053075559429389230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "inf.unibz.it;inf.unibz.it;inf.unibz.it;inf.unibz.it", + "email": "inf.unibz.it;inf.unibz.it;inf.unibz.it;inf.unibz.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;0;0", + "aff_unique_norm": "Free University of Bozen-Bolzano;Ume\u00e5 University", + "aff_unique_dep": "Faculty of Engineering;Computing Science Department", + "aff_unique_url": "https://www.unibz.it;https://www.umu.se", + "aff_unique_abbr": "UNIBZ;UMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;0", + "aff_country_unique": "Italy;Sweden" + }, + { + "id": "article-27058", + "title": "SOREO: A System for Safe and Autonomous Drones Fleet Navigation with Reinforcement Learning", + "track": "demonstrations", + "status": "Technical", + "abstract": "This demonstration introduces SOREO, a system that explores the possibility of extending UAVs autonomy through machine learning. It brings a contribution to the following problem: Having a fleet of drones and a geographic area, how to learn the shortest paths between any point with regards to the base points for optimal and safe package delivery?\nStarting from a set of possible actions, a virtual design of a geographic location of interest, e.g., a city, and a reward value, SOREO is capable of learning not only how to prevent collisions with obstacles, e.g., walls and buildings, but also to find the shortest path between any two points, i.e., the base and the target. SOREO exploits based on the Q-learning algorithm.", + "primary_area": "", + "author": "Reda Alami; Hakim Hacid; Lorenzo Bellone; Michal Barcis; Enrico Natalizio", + "authorids": "", + "aff": "Technology Innovation Institute, 9639 Masdar City, Abu Dhabi, United Arab Emirates; Technology Innovation Institute, 9639 Masdar City, Abu Dhabi, United Arab Emirates; Technology Innovation Institute, 9639 Masdar City, Abu Dhabi, United Arab Emirates; Technology Innovation Institute, 9639 Masdar City, Abu Dhabi, United Arab Emirates; Technology Innovation Institute, 9639 Masdar City, Abu Dhabi, United Arab Emirates", + "bibtex": "@article{Alami_Hacid_Bellone_Barcis_Natalizio_2024, title={SOREO: A System for Safe and Autonomous Drones Fleet Navigation with Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27058}, DOI={10.1609/aaai.v37i13.27058}, abstractNote={This demonstration introduces SOREO, a system that explores the possibility of extending UAVs autonomy through machine learning. It brings a contribution to the following problem: Having a fleet of drones and a geographic area, how to learn the shortest paths between any point with regards to the base points for optimal and safe package delivery?\nStarting from a set of possible actions, a virtual design of a geographic location of interest, e.g., a city, and a reward value, SOREO is capable of learning not only how to prevent collisions with obstacles, e.g., walls and buildings, but also to find the shortest path between any two points, i.e., the base and the target. SOREO exploits based on the Q-learning algorithm.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Alami, Reda and Hacid, Hakim and Bellone, Lorenzo and Barcis, Michal and Natalizio, Enrico}, year={2024}, month={Jul.}, pages={16398-16400} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27058/26830", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27058", + "pdf_size": 384135, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9761790285389962290&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "tii.ae;tii.ae;tii.ae;tii.ae;tii.ae", + "email": "tii.ae;tii.ae;tii.ae;tii.ae;tii.ae", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Technology Innovation Institute", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Masdar City", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United Arab Emirates" + }, + { + "id": "article-26562", + "title": "SPRING: Situated Conversation Agent Pretrained with Multimodal Questions from Incremental Layout Graph", + "track": "main", + "status": "Technical", + "abstract": "Existing multimodal conversation agents have shown impressive abilities to locate absolute positions or retrieve attributes in simple scenarios, but they fail to perform well when complex relative positions and information alignments are involved, which poses a bottleneck in response quality. In this paper, we propose a Situated Conversation Agent Pretrained with Multimodal Questions from Incremental Layout Graph (SPRING) with abilities of reasoning multi-hops spatial relations and connecting them with visual attributes in crowded situated scenarios. Specifically, we design two types of Multimodal Question Answering (MQA) tasks to pretrain the agent. All QA pairs utilized during pretraining are generated from novel Increment Layout Graphs (ILG). QA pair difficulty labels automatically annotated by ILG are used to promote MQA-based Curriculum Learning. Experimental results verify the SPRING's effectiveness, showing that it significantly outperforms state-of-the-art approaches on both SIMMC 1.0 and SIMMC 2.0 datasets. We release our code and data at https://github.com/LYX0501/SPRING.", + "primary_area": "speech natural language processing", + "author": "Yuxing Long; Binyuan Hui; Fulong Ye; Yanyang Li; Zhuoxin Han; Caixia Yuan; Yongbin Li; Xiaojie Wang", + "authorids": "", + "aff": "Beijing University of Posts and Telecommunications; Independent Researcher; Beijing University of Posts and Telecommunications; Independent Researcher; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Independent Researcher; Beijing University of Posts and Telecommunications", + "bibtex": "@article{Long_Hui_Ye_Li_Han_Yuan_Li_Wang_2023, title={SPRING: Situated Conversation Agent Pretrained with Multimodal Questions from Incremental Layout Graph}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26562}, DOI={10.1609/aaai.v37i11.26562}, abstractNote={Existing multimodal conversation agents have shown impressive abilities to locate absolute positions or retrieve attributes in simple scenarios, but they fail to perform well when complex relative positions and information alignments are involved, which poses a bottleneck in response quality. In this paper, we propose a Situated Conversation Agent Pretrained with Multimodal Questions from Incremental Layout Graph (SPRING) with abilities of reasoning multi-hops spatial relations and connecting them with visual attributes in crowded situated scenarios. Specifically, we design two types of Multimodal Question Answering (MQA) tasks to pretrain the agent. All QA pairs utilized during pretraining are generated from novel Increment Layout Graphs (ILG). QA pair difficulty labels automatically annotated by ILG are used to promote MQA-based Curriculum Learning. Experimental results verify the SPRING\u2019s effectiveness, showing that it significantly outperforms state-of-the-art approaches on both SIMMC 1.0 and SIMMC 2.0 datasets. We release our code and data at https://github.com/LYX0501/SPRING.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Long, Yuxing and Hui, Binyuan and Ye, Fulong and Li, Yanyang and Han, Zhuoxin and Yuan, Caixia and Li, Yongbin and Wang, Xiaojie}, year={2023}, month={Jun.}, pages={13309-13317} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26562/26334", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26562", + "pdf_size": 987287, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1446617988641548508&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bupt.edu.cn;gmail.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;gmail.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "https://github.com/LYX0501/SPRING", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;1;0;0;1;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Independent Researcher", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;", + "aff_unique_abbr": "BUPT;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26957", + "title": "SR-AnoGAN: You Never Detect Alone. Super Resolution in Anomaly Detection (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Despite the advance in deep learning algorithms, implementing supervised learning algorithms in medical datasets is difficult owing to the medical data's properties. This paper proposes SR-AnoGAN, which could generate higher resolution images and conduct anomaly detection more efficiently than AnoGAN. The most distinctive part of the proposed model is incorporating CNN and SRGAN into AnoGAN for reconstructing high-resolution images. Experimental results from X-ray datasets(pneumonia, covid-19) verify that the SR-AnoGAN outperforms the previous AnoGAN model through qualitative and quantitative approaches. Therefore, this paper shows the possibility of resolving data imbalance problems prevalent in the medical field, and proposing more precise diagnosis.", + "primary_area": "", + "author": "Minjong Cheon", + "authorids": "", + "aff": "Hanyang University, 222 Wangsimni -ro, Seongdong -gu, Seoul, South Korea", + "bibtex": "@article{Cheon_2024, title={SR-AnoGAN: You Never Detect Alone. Super Resolution in Anomaly Detection (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26957}, DOI={10.1609/aaai.v37i13.26957}, abstractNote={Despite the advance in deep learning algorithms, implementing supervised learning algorithms in medical datasets is difficult owing to the medical data\u2019s properties. This paper proposes SR-AnoGAN, which could generate higher resolution images and conduct anomaly detection more efficiently than AnoGAN. The most distinctive part of the proposed model is incorporating CNN and SRGAN into AnoGAN for reconstructing high-resolution images. Experimental results from X-ray datasets(pneumonia, covid-19) verify that the SR-AnoGAN outperforms the previous AnoGAN model through qualitative and quantitative approaches. Therefore, this paper shows the possibility of resolving data imbalance problems prevalent in the medical field, and proposing more precise diagnosis.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheon, Minjong}, year={2024}, month={Jul.}, pages={16194-16195} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26957/26729", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26957", + "pdf_size": 1092961, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7779780484172933418&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "hanyang.ac.kr", + "email": "hanyang.ac.kr", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Hanyang University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hanyang.ac.kr", + "aff_unique_abbr": "HYU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25498", + "title": "SRoUDA: Meta Self-Training for Robust Unsupervised Domain Adaptation", + "track": "main", + "status": "Technical", + "abstract": "As acquiring manual labels on data could be costly, unsupervised domain adaptation (UDA), which transfers knowledge learned from a rich-label dataset to the unlabeled target dataset, is gaining increasingly more popularity. While extensive studies have been devoted to improving the model accuracy on target domain, an important issue of model robustness is neglected. To make things worse, conventional adversarial training (AT) methods for improving model robustness are inapplicable under UDA scenario since they train models on adversarial examples that are generated by supervised loss function. In this paper, we present a new meta self-training pipeline, named SRoUDA, for improving adversarial robustness of UDA models. Based on self-training paradigm, SRoUDA starts with pre-training a source model by applying UDA baseline on source labeled data and taraget unlabeled data with a developed random masked augmentation (RMA), and then alternates between adversarial target model training on pseudo-labeled target data and fine-tuning source model by a meta step. While self-training allows the direct incorporation of AT in UDA, the meta step in SRoUDA further helps in mitigating error propagation from noisy pseudo labels. Extensive experiments on various benchmark datasets demonstrate the state-of-the-art performance of SRoUDA where it achieves significant model robustness improvement without harming clean accuracy.", + "primary_area": "computer vision iii", + "author": "Wanqing Zhu; Jia-Li Yin; Bo-Hao Chen; Ximeng Liu", + "authorids": "", + "aff": "Fujian Province Key Laboratory of Information Security and Network Systems, Fuzhou 350108, China+College of Computer Science and Big Data, Fuzhou University, Fuzhou 350108, China; Fujian Province Key Laboratory of Information Security and Network Systems, Fuzhou 350108, China+College of Computer Science and Big Data, Fuzhou University, Fuzhou 350108, China; Department of Computer Science and Engineering, Yuan Ze University, Taiwan; Fujian Province Key Laboratory of Information Security and Network Systems, Fuzhou 350108, China+College of Computer Science and Big Data, Fuzhou University, Fuzhou 350108, China", + "bibtex": "@article{Zhu_Yin_Chen_Liu_2023, title={SRoUDA: Meta Self-Training for Robust Unsupervised Domain Adaptation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25498}, DOI={10.1609/aaai.v37i3.25498}, abstractNote={As acquiring manual labels on data could be costly, unsupervised domain adaptation (UDA), which transfers knowledge learned from a rich-label dataset to the unlabeled target dataset, is gaining increasingly more popularity. While extensive studies have been devoted to improving the model accuracy on target domain, an important issue of model robustness is neglected. To make things worse, conventional adversarial training (AT) methods for improving model robustness are inapplicable under UDA scenario since they train models on adversarial examples that are generated by supervised loss function. In this paper, we present a new meta self-training pipeline, named SRoUDA, for improving adversarial robustness of UDA models. Based on self-training paradigm, SRoUDA starts with pre-training a source model by applying UDA baseline on source labeled data and taraget unlabeled data with a developed random masked augmentation (RMA), and then alternates between adversarial target model training on pseudo-labeled target data and fine-tuning source model by a meta step. While self-training allows the direct incorporation of AT in UDA, the meta step in SRoUDA further helps in mitigating error propagation from noisy pseudo labels. Extensive experiments on various benchmark datasets demonstrate the state-of-the-art performance of SRoUDA where it achieves significant model robustness improvement without harming clean accuracy.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Wanqing and Yin, Jia-Li and Chen, Bo-Hao and Liu, Ximeng}, year={2023}, month={Jun.}, pages={3852-3860} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25498/25270", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25498", + "pdf_size": 1422598, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6000230248659624251&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "163.com;fzu.edu;saturn.yzu.edu.tw;gmail.com", + "email": "163.com;fzu.edu;saturn.yzu.edu.tw;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;0+1", + "aff_unique_norm": "Fujian Province Key Laboratory of Information Security and Network Systems;Fuzhou University;Yuan Ze University", + "aff_unique_dep": "Information Security and Network Systems;College of Computer Science and Big Data;Department of Computer Science and Engineering", + "aff_unique_url": ";https://www.fzu.edu.cn;https://www.yzu.edu.tw", + "aff_unique_abbr": ";FZU;YZU", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Fuzhou;", + "aff_country_unique_index": "0+0;0+0;1;0+0", + "aff_country_unique": "China;Taiwan, China" + }, + { + "id": "article-25370", + "title": "SSDA3D: Semi-supervised Domain Adaptation for 3D Object Detection from Point Cloud", + "track": "main", + "status": "Technical", + "abstract": "LiDAR-based 3D object detection is an indispensable task in advanced autonomous driving systems. Though impressive detection results have been achieved by superior 3D detectors, they suffer from significant performance degeneration when facing unseen domains, such as different LiDAR configurations, different cities, and weather conditions. The mainstream approaches tend to solve these challenges by leveraging unsupervised domain adaptation (UDA) techniques. However, these UDA solutions just yield unsatisfactory 3D detection results when there is a severe domain shift, e.g., from Waymo (64-beam) to nuScenes (32-beam). To address this, we present a novel Semi-Supervised Domain Adaptation method for 3D object detection (SSDA3D), where only a few labeled target data is available, yet can significantly improve the adaptation performance. In particular, our SSDA3D includes an Inter-domain Adaptation stage and an Intra-domain Generalization stage. In the first stage, an Inter-domain Point-CutMix module is presented to efficiently align the point cloud distribution across domains. The Point-CutMix generates mixed samples of an intermediate domain, thus encouraging to learn domain-invariant knowledge. Then, in the second stage, we further enhance the model for better generalization on the unlabeled target set. This is achieved by exploring Intra-domain Point-MixUp in semi-supervised learning, which essentially regularizes the pseudo label distribution. Experiments from Waymo to nuScenes show that, with only 10% labeled target data, our SSDA3D can surpass the fully-supervised oracle model with 100% target label. Our code is available at https://github.com/yinjunbo/SSDA3D.", + "primary_area": "computer vision iii", + "author": "Yan Wang; Junbo Yin; Wei Li; Pascal Frossard; Ruigang Yang; Jianbing Shen", + "authorids": "", + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Inceptio; \u00b4Ecole Polytechnique F \u00b4ed\u00b4erale de Lausanne (EPFL); Inceptio; SKL-IOTSC, CIS, University of Macau", + "bibtex": "@article{Wang_Yin_Li_Frossard_Yang_Shen_2023, title={SSDA3D: Semi-supervised Domain Adaptation for 3D Object Detection from Point Cloud}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25370}, DOI={10.1609/aaai.v37i3.25370}, abstractNote={LiDAR-based 3D object detection is an indispensable task in advanced autonomous driving systems. Though impressive detection results have been achieved by superior 3D detectors, they suffer from significant performance degeneration when facing unseen domains, such as different LiDAR configurations, different cities, and weather conditions. The mainstream approaches tend to solve these challenges by leveraging unsupervised domain adaptation (UDA) techniques. However, these UDA solutions just yield unsatisfactory 3D detection results when there is a severe domain shift, e.g., from Waymo (64-beam) to nuScenes (32-beam). To address this, we present a novel Semi-Supervised Domain Adaptation method for 3D object detection (SSDA3D), where only a few labeled target data is available, yet can significantly improve the adaptation performance. In particular, our SSDA3D includes an Inter-domain Adaptation stage and an Intra-domain Generalization stage. In the first stage, an Inter-domain Point-CutMix module is presented to efficiently align the point cloud distribution across domains. The Point-CutMix generates mixed samples of an intermediate domain, thus encouraging to learn domain-invariant knowledge. Then, in the second stage, we further enhance the model for better generalization on the unlabeled target set. This is achieved by exploring Intra-domain Point-MixUp in semi-supervised learning, which essentially regularizes the pseudo label distribution. Experiments from Waymo to nuScenes show that, with only 10% labeled target data, our SSDA3D can surpass the fully-supervised oracle model with 100% target label. Our code is available at https://github.com/yinjunbo/SSDA3D.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yan and Yin, Junbo and Li, Wei and Frossard, Pascal and Yang, Ruigang and Shen, Jianbing}, year={2023}, month={Jun.}, pages={2707-2715} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25370/25142", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25370", + "pdf_size": 1301737, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10928237791475099982&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "bit.edu.cn;gmail.com; ; ; ;um.edu.mo", + "email": "bit.edu.cn;gmail.com; ; ; ;um.edu.mo", + "github": "https://github.com/yinjunbo/SSDA3D", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;1;3", + "aff_unique_norm": "Beijing Institute of Technology;Inceptio;Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;University of Macau", + "aff_unique_dep": ";;;Department of Computer and Information Science", + "aff_unique_url": "http://www.bit.edu.cn/;;https://www.epfl.ch;https://www.um.edu.mo", + "aff_unique_abbr": "BIT;;EPFL;UM", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Lausanne", + "aff_country_unique_index": "0;0;2;3", + "aff_country_unique": "China;;Switzerland;Macau" + }, + { + "id": "article-26580", + "title": "SSMI: Semantic Similarity and Mutual Information Maximization Based Enhancement for Chinese NER", + "track": "main", + "status": "Technical", + "abstract": "The Chinese NER task consists of two steps, first determining entity boundaries and then labeling them. Some previous work incorporating related words from pre-trained vocabulary into character-based models has been demonstrated to be effective. However, the number of words that characters can match in the vocabulary is large, and their meanings vary widely. It is unreasonable to concatenate all the matched words into the character's representation without making semantic distinctions. This is because words with different semantics also have distinct vectors by the distributed representation. Moreover, mutual information maximization (MIM) provides a unified way to characterize the correction between different granularity of embeddings, we find it can be used to enhance the features in our task. Consequently, this paper introduces a novel Chinese NER model named SSMI based on semantic similarity and MIM. We first match all the potential word boundaries of the input characters from the pre-trained vocabulary and employ BERT to segment the input sentence to get the segmentation containing these characters. After computing their cosine similarity, we obtain the word boundary with the highest similarity and the word group with similarity score larger than a specific threshold. Then, we concatenate the most relevant word boundaries with character vectors. We further calculate the mutual information maximization of group, character and sentence, respectively. Finally, we feed the result from the above steps to our novel network. The results on four Chinese public NER datasets show that our SSMI achieves state-of-the-art performance.", + "primary_area": "speech natural language processing", + "author": "Pengnian Qi; Biao Qin", + "authorids": "", + "aff": "School of Information, Renmin University of China, Beijing, China; School of Information, Renmin University of China, Beijing, China", + "bibtex": "@article{Qi_Qin_2023, title={SSMI: Semantic Similarity and Mutual Information Maximization Based Enhancement for Chinese NER}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26580}, DOI={10.1609/aaai.v37i11.26580}, abstractNote={The Chinese NER task consists of two steps, first determining entity boundaries and then labeling them. Some previous work incorporating related words from pre-trained vocabulary into character-based models has been demonstrated to be effective. However, the number of words that characters can match in the vocabulary is large, and their meanings vary widely. It is unreasonable to concatenate all the matched words into the character\u2019s representation without making semantic distinctions. This is because words with different semantics also have distinct vectors by the distributed representation. Moreover, mutual information maximization (MIM) provides a unified way to characterize the correction between different granularity of embeddings, we find it can be used to enhance the features in our task. Consequently, this paper introduces a novel Chinese NER model named SSMI based on semantic similarity and MIM. We first match all the potential word boundaries of the input characters from the pre-trained vocabulary and employ BERT to segment the input sentence to get the segmentation containing these characters. After computing their cosine similarity, we obtain the word boundary with the highest similarity and the word group with similarity score larger than a specific threshold. Then, we concatenate the most relevant word boundaries with character vectors. We further calculate the mutual information maximization of group, character and sentence, respectively. Finally, we feed the result from the above steps to our novel network. The results on four Chinese public NER datasets show that our SSMI achieves state-of-the-art performance.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qi, Pengnian and Qin, Biao}, year={2023}, month={Jun.}, pages={13474-13482} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26580/26352", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26580", + "pdf_size": 531747, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5400927790473788483&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "School of Information", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26553", + "title": "SSPAttack: A Simple and Sweet Paradigm for Black-Box Hard-Label Textual Adversarial Attack", + "track": "main", + "status": "Technical", + "abstract": "Hard-label textual adversarial attack is a challenging task, as only the predicted label information is available, and the text space is discrete and non-differentiable. Relevant research work is still in fancy and just a handful of methods are proposed. However, existing methods suffer from either the high complexity of genetic algorithms or inaccurate gradient estimation, thus are arduous to obtain adversarial examples with high semantic similarity and low perturbation rate under the tight-budget scenario. In this paper, we propose a simple and sweet paradigm for hard-label textual adversarial attack, named SSPAttack. Specifically, SSPAttack first utilizes initialization to generate an adversarial example, and removes unnecessary replacement words to reduce the number of changed words. Then it determines the replacement order and searches for an anchor synonym, thus avoiding going through all the synonyms. Finally, it pushes substitution words towards original words until an appropriate adversarial example is obtained. The core idea of SSPAttack is just swapping words whose mechanism is simple. Experimental results on eight benchmark datasets and two real-world APIs have shown that the performance of SSPAttack is sweet in terms of similarity, perturbation rate and query efficiency.", + "primary_area": "speech natural language processing", + "author": "Han Liu; Zhi Xu; Xiaotong Zhang; Xiaoming Xu; Feng Zhang; Fenglong Ma; Hongyang Chen; Hong Yu; Xianchao Zhang", + "authorids": "", + "aff": "Dalian University of Technology; Dalian University of Technology; Dalian University of Technology; Dalian University of Technology; Peking University; The Pennsylvania State University; Zhejiang Lab; Dalian University of Technology; Dalian University of Technology", + "bibtex": "@article{Liu_Xu_Zhang_Xu_Zhang_Ma_Chen_Yu_Zhang_2023, title={SSPAttack: A Simple and Sweet Paradigm for Black-Box Hard-Label Textual Adversarial Attack}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26553}, DOI={10.1609/aaai.v37i11.26553}, abstractNote={Hard-label textual adversarial attack is a challenging task, as only the predicted label information is available, and the text space is discrete and non-differentiable. Relevant research work is still in fancy and just a handful of methods are proposed. However, existing methods suffer from either the high complexity of genetic algorithms or inaccurate gradient estimation, thus are arduous to obtain adversarial examples with high semantic similarity and low perturbation rate under the tight-budget scenario. In this paper, we propose a simple and sweet paradigm for hard-label textual adversarial attack, named SSPAttack. Specifically, SSPAttack first utilizes initialization to generate an adversarial example, and removes unnecessary replacement words to reduce the number of changed words. Then it determines the replacement order and searches for an anchor synonym, thus avoiding going through all the synonyms. Finally, it pushes substitution words towards original words until an appropriate adversarial example is obtained. The core idea of SSPAttack is just swapping words whose mechanism is simple. Experimental results on eight benchmark datasets and two real-world APIs have shown that the performance of SSPAttack is sweet in terms of similarity, perturbation rate and query efficiency.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Han and Xu, Zhi and Zhang, Xiaotong and Xu, Xiaoming and Zhang, Feng and Ma, Fenglong and Chen, Hongyang and Yu, Hong and Zhang, Xianchao}, year={2023}, month={Jun.}, pages={13228-13235} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26553/26325", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26553", + "pdf_size": 513079, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14703799695137499597&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com;hotmail.com;gmail.com;gmail.com;psu.edu;ieee.org;dlut.edu.cn;dlut.edu.cn", + "email": "gmail.com;gmail.com;hotmail.com;gmail.com;gmail.com;psu.edu;ieee.org;dlut.edu.cn;dlut.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;1;2;3;0;0", + "aff_unique_norm": "Dalian University of Technology;Peking University;The Pennsylvania State University;Zhejiang Lab", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.dlut.edu.cn/;http://www.pku.edu.cn;https://www.psu.edu;http://www.zhejianglab.com", + "aff_unique_abbr": "DUT;Peking U;PSU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26547", + "title": "STAGE: Span Tagging and Greedy Inference Scheme for Aspect Sentiment Triplet Extraction", + "track": "main", + "status": "Technical", + "abstract": "Aspect Sentiment Triplet Extraction (ASTE) has become an emerging task in sentiment analysis research, aiming to extract triplets of the aspect term, its corresponding opinion term, and its associated sentiment polarity from a given sentence. Recently, many neural networks based models with different tagging schemes have been proposed, but almost all of them have their limitations: heavily relying on 1) prior assumption that each word is only associated with a single role (e.g., aspect term, or opinion term, etc. ) and 2) word-level interactions and treating each opinion/aspect as a set of independent words. Hence, they perform poorly on the complex ASTE task, such as a word associated with multiple roles or an aspect/opinion term with multiple words. Hence, we propose a novel approach, Span TAgging and Greedy infErence (STAGE), to extract sentiment triplets in span-level, where each span may consist of multiple words and play different roles simultaneously. To this end, this paper formulates the ASTE task as a multi-class span classification problem. Specifically, STAGE generates more accurate aspect sentiment triplet extractions via exploring span-level information and constraints, which consists of two components, namely, span tagging scheme and greedy inference strategy. The former tag all possible candidate spans based on a newly-defined tagging set. The latter retrieves the aspect/opinion term with the maximum length from the candidate sentiment snippet to output sentiment triplets. Furthermore, we propose a simple but effective model based on the STAGE, which outperforms the state-of-the-arts by a large margin on four widely-used datasets. Moreover, our STAGE can be easily generalized to other pair/triplet extraction tasks, which also demonstrates the superiority of the proposed scheme STAGE.", + "primary_area": "speech natural language processing", + "author": "Shuo Liang; Wei Wei; Xian-Ling Mao; Yuanyuan Fu; Rui Fang; Dangyang Chen", + "authorids": "", + "aff": "Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Department of Computer Science and Technology, Beijing Institute of Technology; Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL) + Ping An Property & Casualty Insurance company of China, Ltd; Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL) + Ping An Property & Casualty Insurance company of China, Ltd; Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL) + Ping An Property & Casualty Insurance company of China, Ltd", + "bibtex": "@article{Liang_Wei_Mao_Fu_Fang_Chen_2023, title={STAGE: Span Tagging and Greedy Inference Scheme for Aspect Sentiment Triplet Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26547}, DOI={10.1609/aaai.v37i11.26547}, abstractNote={Aspect Sentiment Triplet Extraction (ASTE) has become an emerging task in sentiment analysis research, aiming to extract triplets of the aspect term, its corresponding opinion term, and its associated sentiment polarity from a given sentence. Recently, many neural networks based models with different tagging schemes have been proposed, but almost all of them have their limitations: heavily relying on 1) prior assumption that each word is only associated with a single role (e.g., aspect term, or opinion term, etc. ) and 2) word-level interactions and treating each opinion/aspect as a set of independent words. Hence, they perform poorly on the complex ASTE task, such as a word associated with multiple roles or an aspect/opinion term with multiple words. Hence, we propose a novel approach, Span TAgging and Greedy infErence (STAGE), to extract sentiment triplets in span-level, where each span may consist of multiple words and play different roles simultaneously. To this end, this paper formulates the ASTE task as a multi-class span classification problem. Specifically, STAGE generates more accurate aspect sentiment triplet extractions via exploring span-level information and constraints, which consists of two components, namely, span tagging scheme and greedy inference strategy. The former tag all possible candidate spans based on a newly-defined tagging set. The latter retrieves the aspect/opinion term with the maximum length from the candidate sentiment snippet to output sentiment triplets. Furthermore, we propose a simple but effective model based on the STAGE, which outperforms the state-of-the-arts by a large margin on four widely-used datasets. Moreover, our STAGE can be easily generalized to other pair/triplet extraction tasks, which also demonstrates the superiority of the proposed scheme STAGE.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Shuo and Wei, Wei and Mao, Xian-Ling and Fu, Yuanyuan and Fang, Rui and Chen, Dangyang}, year={2023}, month={Jun.}, pages={13174-13182} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26547/26319", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26547", + "pdf_size": 231712, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5516246616111708541&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "hust.edu.cn;hust.edu.cn;bit.edu.cn;gmail.com;pingan.com.cn;pingan.com.cn", + "email": "hust.edu.cn;hust.edu.cn;bit.edu.cn;gmail.com;pingan.com.cn;pingan.com.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;1;0+2;0+2;0+2", + "aff_unique_norm": "Huazhong University of Science and Technology;Beijing Institute of Technology;Ping An Property & Casualty Insurance Company of China, Ltd", + "aff_unique_dep": "School of Computer Science and Technology;Department of Computer Science and Technology;", + "aff_unique_url": ";http://www.bit.edu.cn/;https://www.pingan.com", + "aff_unique_abbr": ";BIT;Ping An", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26301", + "title": "STARS: Spatial-Temporal Active Re-sampling for Label-Efficient Learning from Noisy Annotations", + "track": "main", + "status": "Technical", + "abstract": "Active learning (AL) aims to sample the most informative data instances for labeling, which makes the model fitting data efficient while significantly reducing the annotation cost. However, most existing AL models make a strong assumption that the annotated data instances are always assigned correct labels, which may not hold true in many practical settings. In this paper, we develop a theoretical framework to formally analyze the impact of noisy annotations and show that systematically re-sampling guarantees to reduce the noise rate, which can lead to improved generalization capability. More importantly, the theoretical framework demonstrates the key benefit of conducting active re-sampling on label-efficient learning, which is critical for AL. The theoretical results also suggest essential properties of an active re-sampling function with a fast convergence speed and guaranteed error reduction. This inspires us to design a novel spatial-temporal active re-sampling function by leveraging the important spatial and temporal properties of maximum-margin classifiers. Extensive experiments conducted on both synthetic and real-world data clearly demonstrate the effectiveness of the proposed active re-sampling function.", + "primary_area": "machine learning iv", + "author": "Dayou Yu; Weishi Shi; Qi Yu", + "authorids": "", + "aff": "Rochester Institute of Technology; University of North Texas; Rochester Institute of Technology", + "bibtex": "@article{Yu_Shi_Yu_2023, title={STARS: Spatial-Temporal Active Re-sampling for Label-Efficient Learning from Noisy Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26301}, DOI={10.1609/aaai.v37i9.26301}, abstractNote={Active learning (AL) aims to sample the most informative data instances for labeling, which makes the model fitting data efficient while significantly reducing the annotation cost. However, most existing AL models make a strong assumption that the annotated data instances are always assigned correct labels, which may not hold true in many practical settings. In this paper, we develop a theoretical framework to formally analyze the impact of noisy annotations and show that systematically re-sampling guarantees to reduce the noise rate, which can lead to improved generalization capability. More importantly, the theoretical framework demonstrates the key benefit of conducting active re-sampling on label-efficient learning, which is critical for AL. The theoretical results also suggest essential properties of an active re-sampling function with a fast convergence speed and guaranteed error reduction. This inspires us to design a novel spatial-temporal active re-sampling function by leveraging the important spatial and temporal properties of maximum-margin classifiers. Extensive experiments conducted on both synthetic and real-world data clearly demonstrate the effectiveness of the proposed active re-sampling function.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Dayou and Shi, Weishi and Yu, Qi}, year={2023}, month={Jun.}, pages={10980-10988} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26301/26073", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26301", + "pdf_size": 13191305, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:c0Oi0v3aFNoJ:scholar.google.com/&scioq=STARS:+Spatial-Temporal+Active+Re-sampling+for+Label-Efficient+Learning+from+Noisy+Annotations&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "rit.edu;unt.edu;rit.edu", + "email": "rit.edu;unt.edu;rit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Rochester Institute of Technology;University of North Texas", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.rit.edu;https://www.unt.edu", + "aff_unique_abbr": "RIT;UNT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26764", + "title": "STL-Based Synthesis of Feedback Controllers Using Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep Reinforcement Learning (DRL) has the potential to be used for synthesizing feedback controllers (agents) for various complex systems with unknown dynamics. These systems are expected to satisfy diverse safety and liveness properties best captured using temporal logic. In RL, the reward function plays a crucial role in specifying the desired behaviour of these agents. However, the problem of designing the reward function for an RL agent to satisfy complex temporal logic specifications has received limited attention in the literature. To address this, we provide a systematic way of generating rewards in real-time by using the quantitative semantics of Signal Temporal Logic (STL), a widely used temporal logic to specify the behaviour of cyber-physical systems. We propose a new quantitative semantics for STL having several desirable properties, making it suitable for reward generation. We evaluate our STL-based reinforcement learning mechanism on several complex continuous control benchmarks and compare our STL semantics with those available in the literature in terms of their efficacy in synthesizing the controller agent. Experimental results establish our new semantics to be the most suitable for synthesizing feedback controllers for complex continuous dynamical systems through reinforcement learning.", + "primary_area": "safe and robust ai", + "author": "Nikhil Kumar Singh; Indranil Saha", + "authorids": "", + "aff": "Department of Computer Science and Engineering, IIT Kanpur; Department of Computer Science and Engineering, IIT Kanpur", + "bibtex": "@article{Singh_Saha_2023, title={STL-Based Synthesis of Feedback Controllers Using Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26764}, DOI={10.1609/aaai.v37i12.26764}, abstractNote={Deep Reinforcement Learning (DRL) has the potential to be used for synthesizing feedback controllers (agents) for various complex systems with unknown dynamics. These systems are expected to satisfy diverse safety and liveness properties best captured using temporal logic. In RL, the reward function plays a crucial role in specifying the desired behaviour of these agents. However, the problem of designing the reward function for an RL agent to satisfy complex temporal logic specifications has received limited attention in the literature. To address this, we provide a systematic way of generating rewards in real-time by using the quantitative semantics of Signal Temporal Logic (STL), a widely used temporal logic to specify the behaviour of cyber-physical systems. We propose a new quantitative semantics for STL having several desirable properties, making it suitable for reward generation. We evaluate our STL-based reinforcement learning mechanism on several complex continuous control benchmarks and compare our STL semantics with those available in the literature in terms of their efficacy in synthesizing the controller agent. Experimental results establish our new semantics to be the most suitable for synthesizing feedback controllers for complex continuous dynamical systems through reinforcement learning.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Singh, Nikhil Kumar and Saha, Indranil}, year={2023}, month={Jun.}, pages={15118-15126} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26764/26536", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26764", + "pdf_size": 180603, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10440086332036786391&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "cse.iitk.ac.in;cse.iitk.ac.in", + "email": "cse.iitk.ac.in;cse.iitk.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indian Institute of Technology Kanpur", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitk.ac.in", + "aff_unique_abbr": "IIT Kanpur", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Kanpur", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25483", + "title": "STOA-VLP: Spatial-Temporal Modeling of Object and Action for Video-Language Pre-training", + "track": "main", + "status": "Technical", + "abstract": "Although large-scale video-language pre-training models, which usually build a global alignment between the video and the text, have achieved remarkable progress on various downstream tasks, the idea of adopting fine-grained information during the pre-training stage is not well explored. In this work, we propose STOA-VLP, a pre-training framework that jointly models object and action information across spatial and temporal dimensions. More specifically, the model regards object trajectories across frames and multiple action features from the video as fine-grained features. Besides, We design two auxiliary tasks to better incorporate both kinds of information into the pre-training process of the video-language model. The first is the dynamic object-text alignment task, which builds a better connection between object trajectories and the relevant noun tokens. The second is the spatial-temporal action set prediction, which guides the model to generate consistent action features by predicting actions found in the text. Extensive experiments on three downstream tasks (video captioning, text-video retrieval, and video question answering) demonstrate the effectiveness of our proposed STOA-VLP (e.g. 3.7 Rouge-L improvements on MSR-VTT video captioning benchmark, 2.9% accuracy improvements on MSVD video question answering benchmark, compared to previous approaches).", + "primary_area": "computer vision iii", + "author": "Weihong Zhong; Mao Zheng; Duyu Tang; Xuan Luo; Heng Gong; Xiaocheng Feng; Bing Qin", + "authorids": "", + "aff": "Harbin Institute of Technology; Tencent MLPD; Independent Researcher; Tencent MLPD; Harbin Institute of Technology; Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology+Peng Cheng Laboratory", + "bibtex": "@article{Zhong_Zheng_Tang_Luo_Gong_Feng_Qin_2023, title={STOA-VLP: Spatial-Temporal Modeling of Object and Action for Video-Language Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25483}, DOI={10.1609/aaai.v37i3.25483}, abstractNote={Although large-scale video-language pre-training models, which usually build a global alignment between the video and the text, have achieved remarkable progress on various downstream tasks, the idea of adopting fine-grained information during the pre-training stage is not well explored. In this work, we propose STOA-VLP, a pre-training framework that jointly models object and action information across spatial and temporal dimensions. More specifically, the model regards object trajectories across frames and multiple action features from the video as fine-grained features. Besides, We design two auxiliary tasks to better incorporate both kinds of information into the pre-training process of the video-language model. The first is the dynamic object-text alignment task, which builds a better connection between object trajectories and the relevant noun tokens. The second is the spatial-temporal action set prediction, which guides the model to generate consistent action features by predicting actions found in the text. Extensive experiments on three downstream tasks (video captioning, text-video retrieval, and video question answering) demonstrate the effectiveness of our proposed STOA-VLP (e.g. 3.7 Rouge-L improvements on MSR-VTT video captioning benchmark, 2.9% accuracy improvements on MSVD video question answering benchmark, compared to previous approaches).}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Weihong and Zheng, Mao and Tang, Duyu and Luo, Xuan and Gong, Heng and Feng, Xiaocheng and Qin, Bing}, year={2023}, month={Jun.}, pages={3715-3723} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25483/25255", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25483", + "pdf_size": 1968962, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15600997485162864089&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ir.hit.edu.cn;tencent.com;hotmail.com;tencent.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;tencent.com;hotmail.com;tencent.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;0;0+3;0+3", + "aff_unique_norm": "Harbin Institute of Technology;Tencent;Independent Researcher;Peng Cheng Laboratory", + "aff_unique_dep": ";Machine Learning Platform and Data Engineering Group;;", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.tencent.com;;http://www.pcl.ac.cn", + "aff_unique_abbr": "HIT;Tencent;;PCL", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "article-25393", + "title": "SVFI: Spiking-Based Video Frame Interpolation for High-Speed Motion", + "track": "main", + "status": "Technical", + "abstract": "Occlusion and motion blur make it challenging to interpolate video frame, since estimating complex motions between two frames is hard and unreliable, especially in highly dynamic scenes. This paper aims to address these issues by exploiting spike stream as auxiliary visual information between frames to synthesize target frames. Instead of estimating motions by optical flow from RGB frames, we present a new dual-modal pipeline adopting both RGB frames and the corresponding spike stream as inputs (SVFI). It extracts the scene structure and objects' outline feature maps of the target frames from spike stream. Those feature maps are fused with the color and texture feature maps extracted from RGB frames to synthesize target frames. Benefited by the spike stream that contains consecutive information between two frames, SVFI can directly extract the information in occlusion and motion blur areas of target frames from spike stream, thus it is more robust than previous optical flow-based methods. Experiments show SVFI outperforms the SOTA methods on wide variety of datasets. For instance, in 7 and 15 frame skip evaluations, it shows up to 5.58 dB and 6.56 dB improvements in terms of PSNR over the corresponding second best methods BMBC and DAIN. SVFI also shows visually impressive performance in real-world scenes.", + "primary_area": "computer vision iii", + "author": "Lujie Xia; Jing Zhao; Ruiqin Xiong; Tiejun Huang", + "authorids": "", + "aff": "National Engineering Research Center of Visual Technology (NERCVT), Peking University + Institute of Digital Media, School of Computer Science, Peking University; National Engineering Research Center of Visual Technology (NERCVT), Peking University + Institute of Digital Media, School of Computer Science, Peking University + National Computer Network Emergency Response Technical Team/Coordination Center of China; National Engineering Research Center of Visual Technology (NERCVT), Peking University + Institute of Digital Media, School of Computer Science, Peking University; National Engineering Research Center of Visual Technology (NERCVT), Peking University + Institute of Digital Media, School of Computer Science, Peking University + Beijing Academy of Artificial Intelligence", + "bibtex": "@article{Xia_Zhao_Xiong_Huang_2023, title={SVFI: Spiking-Based Video Frame Interpolation for High-Speed Motion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25393}, DOI={10.1609/aaai.v37i3.25393}, abstractNote={Occlusion and motion blur make it challenging to interpolate video frame, since estimating complex motions between two frames is hard and unreliable, especially in highly dynamic scenes. This paper aims to address these issues by exploiting spike stream as auxiliary visual information between frames to synthesize target frames. Instead of estimating motions by optical flow from RGB frames, we present a new dual-modal pipeline adopting both RGB frames and the corresponding spike stream as inputs (SVFI). It extracts the scene structure and objects\u2019 outline feature maps of the target frames from spike stream. Those feature maps are fused with the color and texture feature maps extracted from RGB frames to synthesize target frames. Benefited by the spike stream that contains consecutive information between two frames, SVFI can directly extract the information in occlusion and motion blur areas of target frames from spike stream, thus it is more robust than previous optical flow-based methods. Experiments show SVFI outperforms the SOTA methods on wide variety of datasets. For instance, in 7 and 15 frame skip evaluations, it shows up to 5.58 dB and 6.56 dB improvements in terms of PSNR over the corresponding second best methods BMBC and DAIN. SVFI also shows visually impressive performance in real-world scenes.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xia, Lujie and Zhao, Jing and Xiong, Ruiqin and Huang, Tiejun}, year={2023}, month={Jun.}, pages={2910-2918} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25393/25165", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25393", + "pdf_size": 10207009, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2484107561771204241&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0+1;0+0;0+0+2", + "aff_unique_norm": "Peking University;National Computer Network Emergency Response Technical Team/Coordination Center;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "National Engineering Research Center of Visual Technology (NERCVT);;", + "aff_unique_url": "http://www.pku.edu.cn;;https://www.baaic.cn", + "aff_unique_abbr": "PKU;NCNERTT/CC;BAAI", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26359", + "title": "SVP-T: A Shape-Level Variable-Position Transformer for Multivariate Time Series Classification", + "track": "main", + "status": "Technical", + "abstract": "Multivariate time series classi\ufb01cation (MTSC), one of the most fundamental time series applications, has not only gained substantial research attentions but has also emerged in many real-life applications. Recently, using transformers to solve MTSC has been reported. However, current transformer-based methods take data points of individual timestamps as inputs (timestamp-level), which only capture the temporal dependencies, not the dependencies among variables. In this\npaper, we propose a novel method, called SVP-T. Specifically, we \ufb01rst propose to take time series subsequences, which can be from different variables and positions (time interval), as the inputs (shape-level). The temporal and variable dependencies are both handled by capturing the long- and short-term dependencies among shapes. Second, we propose a variable-position encoding layer (VP-layer) to utilize both the variable and position information of each shape. Third, we introduce a novel VP-based (Variable-Position) self-attention mechanism to allow the enhancing the attention weights of overlapping shapes. We evaluate our method on all UEA MTS datasets. SVP-T achieves the best accuracy rank when compared with several competitive state-of-the-art methods. Furthermore, we demonstrate the effectiveness of the VP-layer and the VP-based self-attention mechanism. Finally, we present one case study to interpret the result of SVP-T.", + "primary_area": "machine learning iv", + "author": "Rundong Zuo; Guozhong Li; Byron Choi; Sourav S Bhowmick; Daphne Ngar-yin Mah; Grace L.H. Wong", + "authorids": "", + "aff": "Department of Computer Science, Hong Kong Baptist University, Hong Kong SAR; Department of Computer Science, Hong Kong Baptist University, Hong Kong SAR; Department of Computer Science, Hong Kong Baptist University, Hong Kong SAR; School of Computing Engineering, Nanyang Technological University, Singapore; Department of Geography, Hong Kong Baptist University, Hong Kong SAR; Department of Medicine and Therapeutics, The Chinese University of Hong Kong, Hong Kong SAR", + "bibtex": "@article{Zuo_Li_Choi_S Bhowmick_Mah_Wong_2023, title={SVP-T: A Shape-Level Variable-Position Transformer for Multivariate Time Series Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26359}, DOI={10.1609/aaai.v37i9.26359}, abstractNote={Multivariate time series classi\ufb01cation (MTSC), one of the most fundamental time series applications, has not only gained substantial research attentions but has also emerged in many real-life applications. Recently, using transformers to solve MTSC has been reported. However, current transformer-based methods take data points of individual timestamps as inputs (timestamp-level), which only capture the temporal dependencies, not the dependencies among variables. In this\npaper, we propose a novel method, called SVP-T. Specifically, we \ufb01rst propose to take time series subsequences, which can be from different variables and positions (time interval), as the inputs (shape-level). The temporal and variable dependencies are both handled by capturing the long- and short-term dependencies among shapes. Second, we propose a variable-position encoding layer (VP-layer) to utilize both the variable and position information of each shape. Third, we introduce a novel VP-based (Variable-Position) self-attention mechanism to allow the enhancing the attention weights of overlapping shapes. We evaluate our method on all UEA MTS datasets. SVP-T achieves the best accuracy rank when compared with several competitive state-of-the-art methods. Furthermore, we demonstrate the effectiveness of the VP-layer and the VP-based self-attention mechanism. Finally, we present one case study to interpret the result of SVP-T.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zuo, Rundong and Li, Guozhong and Choi, Byron and S Bhowmick, Sourav and Mah, Daphne Ngar-yin and Wong, Grace L.H.}, year={2023}, month={Jun.}, pages={11497-11505} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26359/26131", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26359", + "pdf_size": 764742, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5502818692839945009&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "comp.hkbu.edu.hk;comp.hkbu.edu.hk;comp.hkbu.edu.hk;ntu.edu.sg;hkbu.edu.hk;cuhk.edu.hk", + "email": "comp.hkbu.edu.hk;comp.hkbu.edu.hk;comp.hkbu.edu.hk;ntu.edu.sg;hkbu.edu.hk;cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;2", + "aff_unique_norm": "Hong Kong Baptist University;Nanyang Technological University;The Chinese University of Hong Kong", + "aff_unique_dep": "Department of Computer Science;School of Computing Engineering;Department of Medicine and Therapeutics", + "aff_unique_url": "https://www.hkbu.edu.hk;https://www.ntu.edu.sg;https://www.cuhk.edu.hk", + "aff_unique_abbr": "HKBU;NTU;CUHK", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Hong Kong SAR;", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25211", + "title": "SWBNet: A Stable White Balance Network for sRGB Images", + "track": "main", + "status": "Technical", + "abstract": "The white balance methods for sRGB images (sRGB-WB) aim to directly remove their color temperature shifts. Despite achieving promising white balance (WB) performance, the existing methods suffer from WB instability, i.e., their results are inconsistent for images with different color temperatures. We propose a stable white balance network (SWBNet) to alleviate this problem. It learns the color temperature-insensitive features to generate white-balanced images, resulting in consistent WB results. Specifically, the color temperatureinsensitive features are learned by implicitly suppressing lowfrequency information sensitive to color temperatures. Then, a color temperature contrastive loss is introduced to facilitate the most information shared among features of the same scene and different color temperatures. This way, features from the same scene are more insensitive to color temperatures regardless of the inputs. We also present a color temperature sensitivity-oriented transformer that globally perceives multiple color temperature shifts within an image and corrects them by different weights. It helps to improve the accuracy of stabilized SWBNet, especially for multiillumination sRGB images. Experiments indicate that our SWBNet achieves stable and remarkable WB performance.", + "primary_area": "computer vision i", + "author": "Chunxiao Li; Xuejing Kang; Zhifeng Zhang; Anlong Ming", + "authorids": "", + "aff": "School of Computer Science (National Pilot Software Engineering School), Beijing University of Posts and Telecommunications; School of Computer Science (National Pilot Software Engineering School), Beijing University of Posts and Telecommunications; School of Computer Science (National Pilot Software Engineering School), Beijing University of Posts and Telecommunications; School of Computer Science (National Pilot Software Engineering School), Beijing University of Posts and Telecommunications", + "bibtex": "@article{Li_Kang_Zhang_Ming_2023, title={SWBNet: A Stable White Balance Network for sRGB Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25211}, DOI={10.1609/aaai.v37i1.25211}, abstractNote={The white balance methods for sRGB images (sRGB-WB) aim to directly remove their color temperature shifts. Despite achieving promising white balance (WB) performance, the existing methods suffer from WB instability, i.e., their results are inconsistent for images with different color temperatures. We propose a stable white balance network (SWBNet) to alleviate this problem. It learns the color temperature-insensitive features to generate white-balanced images, resulting in consistent WB results. Specifically, the color temperatureinsensitive features are learned by implicitly suppressing lowfrequency information sensitive to color temperatures. Then, a color temperature contrastive loss is introduced to facilitate the most information shared among features of the same scene and different color temperatures. This way, features from the same scene are more insensitive to color temperatures regardless of the inputs. We also present a color temperature sensitivity-oriented transformer that globally perceives multiple color temperature shifts within an image and corrects them by different weights. It helps to improve the accuracy of stabilized SWBNet, especially for multiillumination sRGB images. Experiments indicate that our SWBNet achieves stable and remarkable WB performance.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Chunxiao and Kang, Xuejing and Zhang, Zhifeng and Ming, Anlong}, year={2023}, month={Jun.}, pages={1278-1286} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25211/24983", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25211", + "pdf_size": 16275718, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16550699494700542271&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25743", + "title": "SWL-Adapt: An Unsupervised Domain Adaptation Model with Sample Weight Learning for Cross-User Wearable Human Activity Recognition", + "track": "main", + "status": "Technical", + "abstract": "In practice, Wearable Human Activity Recognition (WHAR) models usually face performance degradation on the new user due to user variance. Unsupervised domain adaptation (UDA) becomes the natural solution to cross-user WHAR under annotation scarcity. Existing UDA models usually align samples across domains without differentiation, which ignores the difference among samples. In this paper, we propose an unsupervised domain adaptation model with sample weight learning (SWL-Adapt) for cross-user WHAR. SWL-Adapt calculates sample weights according to the classification loss and domain discrimination loss of each sample with a parameterized network. We introduce the meta-optimization based update rule to learn this network end-to-end, which is guided by meta-classification loss on the selected pseudo-labeled target samples. Therefore, this network can fit a weighting function according to the cross-user WHAR task at hand, which is superior to existing sample differentiation rules fixed for special scenarios. Extensive experiments on three public WHAR datasets demonstrate that SWL-Adapt achieves the state-of-the-art performance on the cross-user WHAR task, outperforming the best baseline by an average of 3.1% and 5.3% in accuracy and macro F1 score, respectively.", + "primary_area": "humans and ai", + "author": "Rong Hu; Ling Chen; Shenghuan Miao; Xing Tang", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University + Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Hu_Chen_Miao_Tang_2023, title={SWL-Adapt: An Unsupervised Domain Adaptation Model with Sample Weight Learning for Cross-User Wearable Human Activity Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25743}, DOI={10.1609/aaai.v37i5.25743}, abstractNote={In practice, Wearable Human Activity Recognition (WHAR) models usually face performance degradation on the new user due to user variance. Unsupervised domain adaptation (UDA) becomes the natural solution to cross-user WHAR under annotation scarcity. Existing UDA models usually align samples across domains without differentiation, which ignores the difference among samples. In this paper, we propose an unsupervised domain adaptation model with sample weight learning (SWL-Adapt) for cross-user WHAR. SWL-Adapt calculates sample weights according to the classification loss and domain discrimination loss of each sample with a parameterized network. We introduce the meta-optimization based update rule to learn this network end-to-end, which is guided by meta-classification loss on the selected pseudo-labeled target samples. Therefore, this network can fit a weighting function according to the cross-user WHAR task at hand, which is superior to existing sample differentiation rules fixed for special scenarios. Extensive experiments on three public WHAR datasets demonstrate that SWL-Adapt achieves the state-of-the-art performance on the cross-user WHAR task, outperforming the best baseline by an average of 3.1% and 5.3% in accuracy and macro F1 score, respectively.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Rong and Chen, Ling and Miao, Shenghuan and Tang, Xing}, year={2023}, month={Jun.}, pages={6012-6020} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25743/25515", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25743", + "pdf_size": 601848, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4662926825892331728&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.zju.edu.cn;cs.zju.edu.cn;cs.zju.edu.cn;cs.zju.edu.cn", + "email": "cs.zju.edu.cn;cs.zju.edu.cn;cs.zju.edu.cn;cs.zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Computer Science and Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26922", + "title": "Safe Interactive Autonomy for Multi-Agent Systems", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "It is envisioned that in the near future autonomous systems such as multi-agent systems, will co-exist with humans, e.g., autonomous vehicles will share roads with human drivers. These safety-critical scenarios require formally provable safety guarantees so that the robots will never collide with humans or with each other. It is challenging to provide such guarantees in the real world due to the stochastic environments and inaccurate models of heterogeneous agents including robots and humans. My PhD research investigates decision-making algorithm design for provably-correct safety guarantees in mixed multi-agent systems.", + "primary_area": "", + "author": "Yiwei Lyu", + "authorids": "", + "aff": "Carnegie Mellon University", + "bibtex": "@article{Lyu_2024, title={Safe Interactive Autonomy for Multi-Agent Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26922}, DOI={10.1609/aaai.v37i13.26922}, abstractNote={It is envisioned that in the near future autonomous systems such as multi-agent systems, will co-exist with humans, e.g., autonomous vehicles will share roads with human drivers. These safety-critical scenarios require formally provable safety guarantees so that the robots will never collide with humans or with each other. It is challenging to provide such guarantees in the real world due to the stochastic environments and inaccurate models of heterogeneous agents including robots and humans. My PhD research investigates decision-making algorithm design for provably-correct safety guarantees in mixed multi-agent systems.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Yiwei}, year={2024}, month={Jul.}, pages={16123-16124} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26922/26694", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26922", + "pdf_size": 54980, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:HPFlzdWfVa0J:scholar.google.com/&scioq=Safe+Interactive+Autonomy+for+Multi-Agent+Systems&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "andrew.cmu.edu", + "email": "andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26453", + "title": "Safe Interval Path Planning with Kinodynamic Constraints", + "track": "main", + "status": "Technical", + "abstract": "Safe Interval Path Planning (SIPP) is a powerful algorithm for solving a single-agent pathfinding problem where the agent is confined to a graph and certain vertices/edges of this graph are blocked at certain time intervals due to dynamic obstacles that populate the environment. The original SIPP algorithm relies on the assumption that the agent is able to stop instantaneously. However, this assumption often does not hold in practice, e.g. a mobile robot moving at a cruising speed cannot stop immediately but rather requires gradual deceleration to a full stop that takes time. In other words, the robot is subject to kinodynamic constraints. Unfortunately, as we show in this work, in such a case, the original SIPP is incomplete. To this end, we introduce a novel variant of SIPP that is provably complete and optimal for planning with acceleration/deceleration. In the experimental evaluation, we show that the key property of the original SIPP still holds for the modified version: it performs much fewer expansions compared to A* and, as a result, is notably faster.", + "primary_area": "search and optimization", + "author": "Zain Alabedeen Ali; Konstantin Yakovlev", + "authorids": "", + "aff": "Moscow Institute of Physics and Technology, Moscow, Russia; Federal Research Center for Computer Science and Control of Russian Academy of Sciences, Moscow, Russia + AIRI, Moscow, Russia", + "bibtex": "@article{Ali_Yakovlev_2023, title={Safe Interval Path Planning with Kinodynamic Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26453}, DOI={10.1609/aaai.v37i10.26453}, abstractNote={Safe Interval Path Planning (SIPP) is a powerful algorithm for solving a single-agent pathfinding problem where the agent is confined to a graph and certain vertices/edges of this graph are blocked at certain time intervals due to dynamic obstacles that populate the environment. The original SIPP algorithm relies on the assumption that the agent is able to stop instantaneously. However, this assumption often does not hold in practice, e.g. a mobile robot moving at a cruising speed cannot stop immediately but rather requires gradual deceleration to a full stop that takes time. In other words, the robot is subject to kinodynamic constraints. Unfortunately, as we show in this work, in such a case, the original SIPP is incomplete. To this end, we introduce a novel variant of SIPP that is provably complete and optimal for planning with acceleration/deceleration. In the experimental evaluation, we show that the key property of the original SIPP still holds for the modified version: it performs much fewer expansions compared to A* and, as a result, is notably faster.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ali, Zain Alabedeen and Yakovlev, Konstantin}, year={2023}, month={Jun.}, pages={12330-12337} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26453/26225", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26453", + "pdf_size": 252678, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4288258390143481146&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "phystech.edu;isa.ru", + "email": "phystech.edu;isa.ru", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+2", + "aff_unique_norm": "Moscow Institute of Physics and Technology;Federal Research Center for Computer Science and Control;Advanced Institute for Research and Innovation", + "aff_unique_dep": ";Russian Academy of Sciences;", + "aff_unique_url": "https://www.mipt.ru/en;;", + "aff_unique_abbr": "MIPT;;AIRI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Moscow;", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "Russia" + }, + { + "id": "article-26066", + "title": "Safe Multi-View Deep Classification", + "track": "main", + "status": "Technical", + "abstract": "Multi-view deep classification expects to obtain better classification performance than using a single view. However, due to the uncertainty and inconsistency of data sources, adding data views does not necessarily lead to the performance improvements in multi-view classification. How to avoid worsening classification performance when adding views is crucial for multi-view deep learning but rarely studied. To tackle this limitation, in this paper, we reformulate the multi-view classification problem from the perspective of safe learning and thereby propose a Safe Multi-view Deep Classification (SMDC) method, which can guarantee that the classification performance does not deteriorate when fusing multiple views. In the SMDC method, we dynamically integrate multiple views and estimate the inherent uncertainties among multiple views with different root causes based on evidence theory. Through minimizing the uncertainties, SMDC promotes the evidences from data views for correct classification, and in the meantime excludes the incorrect evidences to produce the safe multi-view classification results. Furthermore, we theoretically prove that in the safe multi-view classification, adding data views will certainly not increase the empirical risk of classification. The experiments on various kinds of multi-view datasets validate that the proposed SMDC method can achieve precise and safe classification results.", + "primary_area": "machine learning ii", + "author": "Wei Liu; Yufei Chen; Xiaodong Yue; Changqing Zhang; Shaorong Xie", + "authorids": "", + "aff": "College of Electronics and Information Engineering, Tongji University, Shanghai, China; College of Electronics and Information Engineering, Tongji University, Shanghai, China + Artificial Intelligence Institute of Shanghai University, Shanghai, China + VLN Lab, NA VI MedTech Co., Ltd. Shanghai, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China + Artificial Intelligence Institute of Shanghai University, Shanghai, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China", + "bibtex": "@article{Liu_Chen_Yue_Zhang_Xie_2023, title={Safe Multi-View Deep Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26066}, DOI={10.1609/aaai.v37i7.26066}, abstractNote={Multi-view deep classification expects to obtain better classification performance than using a single view. However, due to the uncertainty and inconsistency of data sources, adding data views does not necessarily lead to the performance improvements in multi-view classification. How to avoid worsening classification performance when adding views is crucial for multi-view deep learning but rarely studied. To tackle this limitation, in this paper, we reformulate the multi-view classification problem from the perspective of safe learning and thereby propose a Safe Multi-view Deep Classification (SMDC) method, which can guarantee that the classification performance does not deteriorate when fusing multiple views. In the SMDC method, we dynamically integrate multiple views and estimate the inherent uncertainties among multiple views with different root causes based on evidence theory. Through minimizing the uncertainties, SMDC promotes the evidences from data views for correct classification, and in the meantime excludes the incorrect evidences to produce the safe multi-view classification results. Furthermore, we theoretically prove that in the safe multi-view classification, adding data views will certainly not increase the empirical risk of classification. The experiments on various kinds of multi-view datasets validate that the proposed SMDC method can achieve precise and safe classification results.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Wei and Chen, Yufei and Yue, Xiaodong and Zhang, Changqing and Xie, Shaorong}, year={2023}, month={Jun.}, pages={8870-8878} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26066/25838", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26066", + "pdf_size": 344708, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8462416256416921974&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 2, + "aff_domain": "outlook.com;tongji.edu.cn;shu.edu.cn;tju.edu.cn;shu.edu.cn", + "email": "outlook.com;tongji.edu.cn;shu.edu.cn;tju.edu.cn;shu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1+2;1+1;3;1", + "aff_unique_norm": "Tongji University;Shanghai University;NA VI MedTech Co., Ltd.;Tianjin University", + "aff_unique_dep": "College of Electronics and Information Engineering;Artificial Intelligence Institute;VLN Lab;College of Intelligence and Computing", + "aff_unique_url": "http://www.tongji.edu.cn;https://www.shu.edu.cn;;http://www.tju.edu.cn", + "aff_unique_abbr": "Tongji;SHU;;Tianjin University", + "aff_campus_unique_index": "0;0+0;0+0;2;0", + "aff_campus_unique": "Shanghai;;Tianjin", + "aff_country_unique_index": "0;0+0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26763", + "title": "Safe Policy Improvement for POMDPs via Finite-State Controllers", + "track": "aaai special track", + "status": "Technical", + "abstract": "We study safe policy improvement (SPI) for partially observable Markov decision processes (POMDPs). SPI is an offline reinforcement learning (RL) problem that assumes access to (1) historical data about an environment, and (2) the so-called behavior policy that previously generated this data by interacting with the environment. SPI methods neither require access to a model nor the environment itself, and aim to reliably improve upon the behavior policy in an offline manner. Existing methods make the strong assumption that the environment is fully observable. In our novel approach to the SPI problem for POMDPs, we assume that a finite-state controller (FSC) represents the behavior policy and that finite memory is sufficient to derive optimal policies. This assumption allows us to map the POMDP to a finite-state fully observable MDP, the history MDP. We estimate this MDP by combining the historical data and the memory of the FSC, and compute an improved policy using an off-the-shelf SPI algorithm. The underlying SPI method constrains the policy space according to the available data, such that the newly computed policy only differs from the behavior policy when sufficient data is available. We show that this new policy, converted into a new FSC for the (unknown) POMDP, outperforms the behavior policy with high probability. Experimental results on several well-established benchmarks show the applicability of the approach, even in cases where finite memory is not sufficient.", + "primary_area": "safe and robust ai", + "author": "Thiago D. Sim\u00e3o; Marnix Suilen; Nils Jansen", + "authorids": "", + "aff": "Department of Software Science, Radboud University, Nijmegen, The Netherlands; Department of Software Science, Radboud University, Nijmegen, The Netherlands; Department of Software Science, Radboud University, Nijmegen, The Netherlands", + "bibtex": "@article{Sim\u00e3o_Suilen_Jansen_2023, title={Safe Policy Improvement for POMDPs via Finite-State Controllers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26763}, DOI={10.1609/aaai.v37i12.26763}, abstractNote={We study safe policy improvement (SPI) for partially observable Markov decision processes (POMDPs). SPI is an offline reinforcement learning (RL) problem that assumes access to (1) historical data about an environment, and (2) the so-called behavior policy that previously generated this data by interacting with the environment. SPI methods neither require access to a model nor the environment itself, and aim to reliably improve upon the behavior policy in an offline manner. Existing methods make the strong assumption that the environment is fully observable. In our novel approach to the SPI problem for POMDPs, we assume that a finite-state controller (FSC) represents the behavior policy and that finite memory is sufficient to derive optimal policies. This assumption allows us to map the POMDP to a finite-state fully observable MDP, the history MDP. We estimate this MDP by combining the historical data and the memory of the FSC, and compute an improved policy using an off-the-shelf SPI algorithm. The underlying SPI method constrains the policy space according to the available data, such that the newly computed policy only differs from the behavior policy when sufficient data is available. We show that this new policy, converted into a new FSC for the (unknown) POMDP, outperforms the behavior policy with high probability. Experimental results on several well-established benchmarks show the applicability of the approach, even in cases where finite memory is not sufficient.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sim\u00e3o, Thiago D. and Suilen, Marnix and Jansen, Nils}, year={2023}, month={Jun.}, pages={15109-15117} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26763/26535", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26763", + "pdf_size": 243886, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14697896764554453871&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "ru.nl;ru.nl;ru.nl", + "email": "ru.nl;ru.nl;ru.nl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Radboud University", + "aff_unique_dep": "Department of Software Science", + "aff_unique_url": "https://www.ru.nl", + "aff_unique_abbr": "RU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nijmegen", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-26723", + "title": "Safe Reinforcement Learning via Shielding under Partial Observability", + "track": "aaai special track", + "status": "Technical", + "abstract": "Safe exploration is a common problem in reinforcement learning (RL) that aims to prevent agents from making disastrous decisions while exploring their environment. A family of approaches to this problem assume domain knowledge in the form of a (partial) model of this environment to decide upon the safety of an action. A so-called shield forces the RL agent to select only safe actions. However, for adoption in various applications, one must look beyond enforcing safety and also ensure the applicability of RL with good performance. We extend the applicability of shields via tight integration with state-of-the-art deep RL, and provide an extensive, empirical study in challenging, sparse-reward environments under partial observability. We show that a carefully integrated shield ensures safety and can improve the convergence rate and final performance of RL agents. We furthermore show that a shield can be used to bootstrap state-of-the-art RL agents: they remain safe after initial learning in a shielded setting, allowing us to disable a potentially too conservative shield eventually.", + "primary_area": "safe and robust ai", + "author": "Steven Carr; Nils Jansen; Sebastian Junges; Ufuk Topcu", + "authorids": "", + "aff": "The University of Texas at Austin; Radboud University, Nijmegen, The Netherlands; Radboud University, Nijmegen, The Netherlands; The University of Texas at Austin", + "bibtex": "@article{Carr_Jansen_Junges_Topcu_2023, title={Safe Reinforcement Learning via Shielding under Partial Observability}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26723}, DOI={10.1609/aaai.v37i12.26723}, abstractNote={Safe exploration is a common problem in reinforcement learning (RL) that aims to prevent agents from making disastrous decisions while exploring their environment. A family of approaches to this problem assume domain knowledge in the form of a (partial) model of this environment to decide upon the safety of an action. A so-called shield forces the RL agent to select only safe actions. However, for adoption in various applications, one must look beyond enforcing safety and also ensure the applicability of RL with good performance. We extend the applicability of shields via tight integration with state-of-the-art deep RL, and provide an extensive, empirical study in challenging, sparse-reward environments under partial observability. We show that a carefully integrated shield ensures safety and can improve the convergence rate and final performance of RL agents. We furthermore show that a shield can be used to bootstrap state-of-the-art RL agents: they remain safe after initial learning in a shielded setting, allowing us to disable a potentially too conservative shield eventually.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carr, Steven and Jansen, Nils and Junges, Sebastian and Topcu, Ufuk}, year={2023}, month={Jun.}, pages={14748-14756} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26723/26495", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26723", + "pdf_size": 323310, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7879786827639217392&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "utexas.edu;science.ru.nl;cs.ru.nl;utexas.edu", + "email": "utexas.edu;science.ru.nl;cs.ru.nl;utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "University of Texas at Austin;Radboud University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.utexas.edu;https://www.ru.nl", + "aff_unique_abbr": "UT Austin;RU", + "aff_campus_unique_index": "0;1;1;0", + "aff_campus_unique": "Austin;Nijmegen", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "United States;The Netherlands" + }, + { + "id": "article-26729", + "title": "SafeLight: A Reinforcement Learning Method toward Collision-Free Traffic Signal Control", + "track": "aaai special track", + "status": "Technical", + "abstract": "Traffic signal control is safety-critical for our daily life. Roughly one-quarter of road accidents in the U.S. happen at intersections due to problematic signal timing, urging the development of safety-oriented intersection control. However, existing studies on adaptive traffic signal control using reinforcement learning technologies have focused mainly on minimizing traffic delay but neglecting the potential exposure to unsafe conditions. We, for the first time, incorporate road safety standards as enforcement to ensure the safety of existing reinforcement learning methods, aiming toward operating intersections with zero collisions. We have proposed a safety-enhanced residual reinforcement learning method (SafeLight) and employed multiple optimization techniques, such as multi-objective loss function and reward shaping for better knowledge integration. Extensive experiments are conducted using both synthetic and real-world benchmark datasets. Results show that our method can significantly reduce collisions while increasing traffic mobility.", + "primary_area": "safe and robust ai", + "author": "Wenlu Du; Junyi Ye; Jingyi Gu; Jing Li; Hua Wei; Guiling Wang", + "authorids": "", + "aff": "New Jersey Institute of Technology, Newark, USA; New Jersey Institute of Technology, Newark, USA; New Jersey Institute of Technology, Newark, USA; New Jersey Institute of Technology, Newark, USA; New Jersey Institute of Technology, Newark, USA; New Jersey Institute of Technology, Newark, USA", + "bibtex": "@article{Du_Ye_Gu_Li_Wei_Wang_2023, title={SafeLight: A Reinforcement Learning Method toward Collision-Free Traffic Signal Control}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26729}, DOI={10.1609/aaai.v37i12.26729}, abstractNote={Traffic signal control is safety-critical for our daily life. Roughly one-quarter of road accidents in the U.S. happen at intersections due to problematic signal timing, urging the development of safety-oriented intersection control. However, existing studies on adaptive traffic signal control using reinforcement learning technologies have focused mainly on minimizing traffic delay but neglecting the potential exposure to unsafe conditions. We, for the first time, incorporate road safety standards as enforcement to ensure the safety of existing reinforcement learning methods, aiming toward operating intersections with zero collisions. We have proposed a safety-enhanced residual reinforcement learning method (SafeLight) and employed multiple optimization techniques, such as multi-objective loss function and reward shaping for better knowledge integration. Extensive experiments are conducted using both synthetic and real-world benchmark datasets. Results show that our method can significantly reduce collisions while increasing traffic mobility.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Wenlu and Ye, Junyi and Gu, Jingyi and Li, Jing and Wei, Hua and Wang, Guiling}, year={2023}, month={Jun.}, pages={14801-14810} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26729/26501", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26729", + "pdf_size": 5133764, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18071034998935943628&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 9, + "aff_domain": "njit.edu;njit.edu;njit.edu;njit.edu;njit.edu;njit.edu", + "email": "njit.edu;njit.edu;njit.edu;njit.edu;njit.edu;njit.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "New Jersey Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.njit.edu", + "aff_unique_abbr": "NJIT", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Newark", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25950", + "title": "Safeguarded Learned Convex Optimization", + "track": "main", + "status": "Technical", + "abstract": "Applications abound in which optimization problems must be repeatedly solved, each time with new (but similar) data. Analytic optimization algorithms can be hand-designed to provably solve these problems in an iterative fashion. On one hand, data-driven algorithms can \"learn to optimize\" (L2O) with much fewer iterations and similar cost per iteration as general-purpose optimization algorithms. On the other hand, unfortunately, many L2O algorithms lack converge guarantees. To fuse the advantages of these approaches, we present a Safe-L2O framework. Safe-L2O updates incorporate a safeguard to guarantee convergence for convex problems with proximal and/or gradient oracles. The safeguard is simple and computationally cheap to implement, and it is activated only when the data-driven L2O updates would perform poorly or appear to diverge. This yields the numerical benefits of employing machine learning to create rapid L2O algorithms while still guaranteeing convergence. Our numerical examples show convergence of Safe-L2O algorithms, even when the provided data is not from the distribution of training data.", + "primary_area": "machine learning i", + "author": "Howard Heaton; Xiaohan Chen; Zhangyang Wang; Wotao Yin", + "authorids": "", + "aff": "Typal Research, Typal LLC; Department of Electrical and Computer and Engineering, The University of Texas at Austin; Department of Electrical and Computer and Engineering, The University of Texas at Austin; Alibaba US, DAMO Academy, Decision Intelligence Lab", + "bibtex": "@article{Heaton_Chen_Wang_Yin_2023, title={Safeguarded Learned Convex Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25950}, DOI={10.1609/aaai.v37i6.25950}, abstractNote={Applications abound in which optimization problems must be repeatedly solved, each time with new (but similar) data. Analytic optimization algorithms can be hand-designed to provably solve these problems in an iterative fashion. On one hand, data-driven algorithms can "learn to optimize" (L2O) with much fewer iterations and similar cost per iteration as general-purpose optimization algorithms. On the other hand, unfortunately, many L2O algorithms lack converge guarantees. To fuse the advantages of these approaches, we present a Safe-L2O framework. Safe-L2O updates incorporate a safeguard to guarantee convergence for convex problems with proximal and/or gradient oracles. The safeguard is simple and computationally cheap to implement, and it is activated only when the data-driven L2O updates would perform poorly or appear to diverge. This yields the numerical benefits of employing machine learning to create rapid L2O algorithms while still guaranteeing convergence. Our numerical examples show convergence of Safe-L2O algorithms, even when the provided data is not from the distribution of training data.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Heaton, Howard and Chen, Xiaohan and Wang, Zhangyang and Yin, Wotao}, year={2023}, month={Jun.}, pages={7848-7855} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25950/25722", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25950", + "pdf_size": 338213, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14053036506278817859&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "; ; ; ", + "email": "; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "Typal Research;The University of Texas at Austin;Alibaba Group", + "aff_unique_dep": ";Department of Electrical and Computer Engineering;DAMO Academy", + "aff_unique_url": ";https://www.utexas.edu;https://www.alibaba.com", + "aff_unique_abbr": "Typal;UT Austin;Alibaba", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26966", + "title": "Safety Aware Neural Pruning for Deep Reinforcement Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Neural network pruning is a technique of network compression by removing weights of lower importance from an optimized neural network. Often, pruned networks are compared\nin terms of accuracy, which is realized in terms of rewards for Deep Reinforcement Learning (DRL) networks. However, networks that estimate control actions for safety-critical tasks, must also adhere to safety requirements along with obtaining rewards. We propose a methodology to iteratively refine the weights of a pruned neural network such that we get a sparse high-performance network without significant side effects on safety.", + "primary_area": "", + "author": "Briti Gangopadhyay; Pallab Dasgupta; Soumyajit Dey", + "authorids": "", + "aff": "Indian Institute of Technology Kharagpur; Indian Institute of Technology Kharagpur; Indian Institute of Technology Kharagpur", + "bibtex": "@article{Gangopadhyay_Dasgupta_Dey_2024, title={Safety Aware Neural Pruning for Deep Reinforcement Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26966}, DOI={10.1609/aaai.v37i13.26966}, abstractNote={Neural network pruning is a technique of network compression by removing weights of lower importance from an optimized neural network. Often, pruned networks are compared\nin terms of accuracy, which is realized in terms of rewards for Deep Reinforcement Learning (DRL) networks. However, networks that estimate control actions for safety-critical tasks, must also adhere to safety requirements along with obtaining rewards. We propose a methodology to iteratively refine the weights of a pruned neural network such that we get a sparse high-performance network without significant side effects on safety.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gangopadhyay, Briti and Dasgupta, Pallab and Dey, Soumyajit}, year={2024}, month={Jul.}, pages={16212-16213} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26966/26738", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26966", + "pdf_size": 1948896, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=454392362155529230&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "iitkgp.ac.in; ; ", + "email": "iitkgp.ac.in; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Kharagpur", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitkgp.ac.in", + "aff_unique_abbr": "IIT Kharagpur", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Kharagpur", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-26799", + "title": "Safety Validation of Learning-Based Autonomous Systems: A Multi-Fidelity Approach", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "In recent years, learning-based autonomous systems have emerged as a promising tool for automating many crucial tasks. The key question is how we can build trust in such systems for safety-critical applications. My research aims to focus on the creation and validation of safety frameworks that leverage multiple sources of information. The ultimate goal is to establish a solid foundation for a long-term research program aimed at understanding the role of fidelity in simulators for safety validation and robot learning.", + "primary_area": "", + "author": "Ali Baheri", + "authorids": "", + "aff": "Rochester Institute of Technology", + "bibtex": "@article{Baheri_2024, title={Safety Validation of Learning-Based Autonomous Systems: A Multi-Fidelity Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26799}, DOI={10.1609/aaai.v37i13.26799}, abstractNote={In recent years, learning-based autonomous systems have emerged as a promising tool for automating many crucial tasks. The key question is how we can build trust in such systems for safety-critical applications. My research aims to focus on the creation and validation of safety frameworks that leverage multiple sources of information. The ultimate goal is to establish a solid foundation for a long-term research program aimed at understanding the role of fidelity in simulators for safety validation and robot learning.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Baheri, Ali}, year={2024}, month={Jul.}, pages={15432-15432} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26799/26571", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26799", + "pdf_size": 37697, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5974431821480771500&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "rit.edu", + "email": "rit.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26782", + "title": "Safety Verification of Nonlinear Systems with Bayesian Neural Network Controllers", + "track": "aaai special track", + "status": "Technical", + "abstract": "Bayesian neural networks (BNNs) retain NN structures with a probability distribution placed over their weights. With the introduced uncertainties and redundancies, BNNs are proper choices of robust controllers for safety-critical control systems. This paper considers the problem of verifying the safety of nonlinear closed-loop systems with BNN controllers over unbounded-time horizon. In essence, we compute a safe weight set such that as long as the BNN controller is always applied with weights sampled from the safe weight set, the controlled system is guaranteed to be safe. We propose a novel two-phase method for the safe weight set computation. First, we construct a reference safe control set that constraints the control inputs, through polynomial approximation to the BNN controller followed by polynomial-optimization-based barrier certificate generation. Then, the computation of safe weight set is reduced to a range inclusion problem of the BNN on the system domain w.r.t. the safe control set, which can be solved incrementally and the set of safe weights can be extracted. Compared with the existing method based on invariant learning and mixed-integer linear programming, we could compute safe weight sets with larger radii on a series of linear benchmarks. Moreover, experiments on a series of widely used nonlinear control tasks show that our method can synthesize large safe weight sets with probability measure as high as 95% even for a large-scale system of dimension 7.", + "primary_area": "safe and robust ai", + "author": "Xia Zeng; Zhengfeng Yang; Li Zhang; Xiaochao Tang; Zhenbing Zeng; Zhiming Liu", + "authorids": "", + "aff": "School of Computer and Information Science, Southwest University, Chongqing, China; Shanghai Key Lab of Trustworthy Computing, East China Normal University, Shanghai, China; Shanghai Key Lab of Trustworthy Computing, East China Normal University, Shanghai, China; Shanghai Key Lab of Trustworthy Computing, East China Normal University, Shanghai, China; Department of Mathematics, Shanghai University, Shanghai, China; School of Computer and Information Science, Southwest University, Chongqing, China", + "bibtex": "@article{Zeng_Yang_Zhang_Tang_Zeng_Liu_2023, title={Safety Verification of Nonlinear Systems with Bayesian Neural Network Controllers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26782}, DOI={10.1609/aaai.v37i12.26782}, abstractNote={Bayesian neural networks (BNNs) retain NN structures with a probability distribution placed over their weights. With the introduced uncertainties and redundancies, BNNs are proper choices of robust controllers for safety-critical control systems. This paper considers the problem of verifying the safety of nonlinear closed-loop systems with BNN controllers over unbounded-time horizon. In essence, we compute a safe weight set such that as long as the BNN controller is always applied with weights sampled from the safe weight set, the controlled system is guaranteed to be safe. We propose a novel two-phase method for the safe weight set computation. First, we construct a reference safe control set that constraints the control inputs, through polynomial approximation to the BNN controller followed by polynomial-optimization-based barrier certificate generation. Then, the computation of safe weight set is reduced to a range inclusion problem of the BNN on the system domain w.r.t. the safe control set, which can be solved incrementally and the set of safe weights can be extracted. Compared with the existing method based on invariant learning and mixed-integer linear programming, we could compute safe weight sets with larger radii on a series of linear benchmarks. Moreover, experiments on a series of widely used nonlinear control tasks show that our method can synthesize large safe weight sets with probability measure as high as 95% even for a large-scale system of dimension 7.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, Xia and Yang, Zhengfeng and Zhang, Li and Tang, Xiaochao and Zeng, Zhenbing and Liu, Zhiming}, year={2023}, month={Jun.}, pages={15278-15286} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26782/26554", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26782", + "pdf_size": 575537, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14712058124166420954&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 2, + "aff_domain": "swu.edu.cn;sei.ecnu.edu.cn; ; ;shu.edu.cn;swu.edu.cn", + "email": "swu.edu.cn;sei.ecnu.edu.cn; ; ;shu.edu.cn;swu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;2;0", + "aff_unique_norm": "Southwest University;East China Normal University;Shanghai University", + "aff_unique_dep": "School of Computer and Information Science;Shanghai Key Lab of Trustworthy Computing;Department of Mathematics", + "aff_unique_url": "http://www.swu.edu.cn;http://www.ecnu.edu.cn;https://www.shu.edu.cn", + "aff_unique_abbr": ";ECNU;SHU", + "aff_campus_unique_index": "0;1;1;1;1;0", + "aff_campus_unique": "Chongqing;Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26742", + "title": "Sample-Dependent Adaptive Temperature Scaling for Improved Calibration", + "track": "aaai special track", + "status": "Technical", + "abstract": "It is now well known that neural networks can be wrong with high confidence in their predictions, leading to poor calibration. The most common post-hoc approach to compensate for this is to perform temperature scaling, which adjusts the confidences of the predictions on any input by scaling the logits by a fixed value. Whilst this approach typically improves the average calibration across the whole test dataset, this improvement typically reduces the individual confidences of the predictions irrespective of whether the classification of a given input is correct or incorrect. With this insight, we base our method on the observation that different samples contribute to the calibration error by varying amounts, with some needing to increase their confidence and others needing to decrease it. Therefore, for each input, we propose to predict a different temperature value, allowing us to adjust the mismatch between confidence and accuracy at a finer granularity. Our method is applied post-hoc, enabling it to be very fast with a negligible memory footprint and is applied to off-the-shelf pre-trained classifiers. We test our method on the ResNet50 and WideResNet28-10 architectures using the CIFAR10/100 and Tiny-ImageNet datasets, showing that producing per-data-point temperatures improves the expected calibration error across the whole test set.", + "primary_area": "safe and robust ai", + "author": "Tom Joy; Francesco Pinto; Ser-Nam Lim; Philip H.S. Torr; Puneet K. Dokania", + "authorids": "", + "aff": "University of Oxford + Five AI; University of Oxford; Meta AI; University of Oxford + Five AI; University of Oxford + Five AI", + "bibtex": "@article{Joy_Pinto_Lim_Torr_Dokania_2023, title={Sample-Dependent Adaptive Temperature Scaling for Improved Calibration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26742}, DOI={10.1609/aaai.v37i12.26742}, abstractNote={It is now well known that neural networks can be wrong with high confidence in their predictions, leading to poor calibration. The most common post-hoc approach to compensate for this is to perform temperature scaling, which adjusts the confidences of the predictions on any input by scaling the logits by a fixed value. Whilst this approach typically improves the average calibration across the whole test dataset, this improvement typically reduces the individual confidences of the predictions irrespective of whether the classification of a given input is correct or incorrect. With this insight, we base our method on the observation that different samples contribute to the calibration error by varying amounts, with some needing to increase their confidence and others needing to decrease it. Therefore, for each input, we propose to predict a different temperature value, allowing us to adjust the mismatch between confidence and accuracy at a finer granularity. Our method is applied post-hoc, enabling it to be very fast with a negligible memory footprint and is applied to off-the-shelf pre-trained classifiers. We test our method on the ResNet50 and WideResNet28-10 architectures using the CIFAR10/100 and Tiny-ImageNet datasets, showing that producing per-data-point temperatures improves the expected calibration error across the whole test set.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Joy, Tom and Pinto, Francesco and Lim, Ser-Nam and Torr, Philip H.S. and Dokania, Puneet K.}, year={2023}, month={Jun.}, pages={14919-14926} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26742/26514", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26742", + "pdf_size": 390634, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4903288440267494293&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "five.ai; ; ; ; ", + "email": "five.ai; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;2;0+1;0+1", + "aff_unique_norm": "University of Oxford;Five AI;Meta Platforms, Inc.", + "aff_unique_dep": ";;Meta AI", + "aff_unique_url": "https://www.ox.ac.uk;https://www.five.ai;https://meta.com", + "aff_unique_abbr": "Oxford;Five AI;Meta", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;1;0+0;0+0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-25918", + "title": "Scalable Attributed-Graph Subspace Clustering", + "track": "main", + "status": "Technical", + "abstract": "Over recent years, graph convolutional networks emerged as powerful node clustering methods and have set state of the art results for this task. In this paper, we argue that some of these methods are unnecessarily complex and propose a node clustering model that is more scalable while being more effective. The proposed model uses Laplacian smoothing to learn an initial representation of the graph before applying an efficient self-expressive subspace clustering procedure.\nThis is performed via learning a factored coefficient matrix. These factors are then embedded into a new feature space in such a way as to generate a valid affinity matrix (symmetric and non-negative) on which an implicit spectral clustering algorithm is performed. \nExperiments on several real-world attributed datasets demonstrate the cost-effective nature of our method with respect to the state of the art.", + "primary_area": "machine learning i", + "author": "Chakib Fettal; Lazhar Labiod; Mohamed Nadif", + "authorids": "", + "aff": "Centre Borelli UMR 9010, Universit \u00b4e Paris Cit \u00b4e, 75006 Paris, France+Informatique Caisse des D \u00b4ep\u02c6ots et Consignations; Centre Borelli UMR 9010, Universit \u00b4e Paris Cit \u00b4e, 75006 Paris, France; Centre Borelli UMR 9010, Universit \u00b4e Paris Cit \u00b4e, 75006 Paris, France", + "bibtex": "@article{Fettal_Labiod_Nadif_2023, title={Scalable Attributed-Graph Subspace Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25918}, DOI={10.1609/aaai.v37i6.25918}, abstractNote={Over recent years, graph convolutional networks emerged as powerful node clustering methods and have set state of the art results for this task. In this paper, we argue that some of these methods are unnecessarily complex and propose a node clustering model that is more scalable while being more effective. The proposed model uses Laplacian smoothing to learn an initial representation of the graph before applying an efficient self-expressive subspace clustering procedure.\nThis is performed via learning a factored coefficient matrix. These factors are then embedded into a new feature space in such a way as to generate a valid affinity matrix (symmetric and non-negative) on which an implicit spectral clustering algorithm is performed. Experiments on several real-world attributed datasets demonstrate the cost-effective nature of our method with respect to the state of the art.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fettal, Chakib and Labiod, Lazhar and Nadif, Mohamed}, year={2023}, month={Jun.}, pages={7559-7567} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25918/25690", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25918", + "pdf_size": 379046, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13905714197442637352&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff_domain": "u-paris.fr;u-paris.fr;u-paris.fr", + "email": "u-paris.fr;u-paris.fr;u-paris.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Universit\u00e9 Paris Cit\u00e9;Caisse des D\u00e9p\u00f4ts et Consignations", + "aff_unique_dep": "Centre Borelli UMR 9010;Informatique", + "aff_unique_url": "https://www.univ-paris13.fr;https://www.caisse-des-depots.fr", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Paris;", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26337", + "title": "Scalable Bayesian Meta-Learning through Generalized Implicit Gradients", + "track": "main", + "status": "Technical", + "abstract": "Meta-learning owns unique effectiveness and swiftness in tackling emerging tasks with limited data. Its broad applicability is revealed by viewing it as a bi-level optimization problem. The resultant algorithmic viewpoint however, faces scalability issues when the inner-level optimization relies on gradient-based iterations. Implicit differentiation has been considered to alleviate this challenge, but it is restricted to an isotropic Gaussian prior, and only favors deterministic meta-learning approaches. This work markedly mitigates the scalability bottleneck by cross-fertilizing the benefits of implicit differentiation to probabilistic Bayesian meta-learning. The novel implicit Bayesian meta-learning (iBaML) method not only broadens the scope of learnable priors, but also quantifies the associated uncertainty. Furthermore, the ultimate complexity is well controlled regardless of the inner-level optimization trajectory. Analytical error bounds are established to demonstrate the precision and efficiency of the generalized implicit gradient over the explicit one. Extensive numerical tests are also carried out to empirically validate the performance of the proposed method.", + "primary_area": "machine learning iv", + "author": "Yilang Zhang; Bingcong Li; Shijian Gao; Georgios B. Giannakis", + "authorids": "", + "aff": "Dept. of ECE, University of Minnesota, Minneapolis, MN, USA; Dept. of ECE, University of Minnesota, Minneapolis, MN, USA; Dept. of ECE, University of Minnesota, Minneapolis, MN, USA; Dept. of ECE, University of Minnesota, Minneapolis, MN, USA", + "bibtex": "@article{Zhang_Li_Gao_Giannakis_2023, title={Scalable Bayesian Meta-Learning through Generalized Implicit Gradients}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26337}, DOI={10.1609/aaai.v37i9.26337}, abstractNote={Meta-learning owns unique effectiveness and swiftness in tackling emerging tasks with limited data. Its broad applicability is revealed by viewing it as a bi-level optimization problem. The resultant algorithmic viewpoint however, faces scalability issues when the inner-level optimization relies on gradient-based iterations. Implicit differentiation has been considered to alleviate this challenge, but it is restricted to an isotropic Gaussian prior, and only favors deterministic meta-learning approaches. This work markedly mitigates the scalability bottleneck by cross-fertilizing the benefits of implicit differentiation to probabilistic Bayesian meta-learning. The novel implicit Bayesian meta-learning (iBaML) method not only broadens the scope of learnable priors, but also quantifies the associated uncertainty. Furthermore, the ultimate complexity is well controlled regardless of the inner-level optimization trajectory. Analytical error bounds are established to demonstrate the precision and efficiency of the generalized implicit gradient over the explicit one. Extensive numerical tests are also carried out to empirically validate the performance of the proposed method.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yilang and Li, Bingcong and Gao, Shijian and Giannakis, Georgios B.}, year={2023}, month={Jun.}, pages={11298-11306} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26337/26109", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26337", + "pdf_size": 185291, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6784036320930040757&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "umn.edu;umn.edu;umn.edu;umn.edu", + "email": "umn.edu;umn.edu;umn.edu;umn.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Minnesota", + "aff_unique_dep": "Dept. of Electrical and Computer Engineering", + "aff_unique_url": "https://www.umn.edu", + "aff_unique_abbr": "UMN", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Minneapolis", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26431", + "title": "Scalable Decision-Focused Learning in Restless Multi-Armed Bandits with Application to Maternal and Child Health", + "track": "main", + "status": "Technical", + "abstract": "This paper studies restless multi-armed bandit (RMAB) problems with unknown arm transition dynamics but with known correlated arm features. The goal is to learn a model to predict transition dynamics given features, where the Whittle index policy solves the RMAB problems using predicted transitions. However, prior works often learn the model by maximizing the predictive accuracy instead of final RMAB solution quality, causing a mismatch between training and evaluation objectives. To address this shortcoming, we propose a novel approach for decision-focused learning in RMAB that directly trains the predictive model to maximize the Whittle index solution quality. We present three key contributions: (i) we establish differentiability of the Whittle index policy to support decision-focused learning; (ii) we significantly improve the scalability of decision-focused learning approaches in sequential problems, specifically RMAB problems; (iii) we apply our algorithm to a previously collected dataset of maternal and child health to demonstrate its performance. Indeed, our algorithm is the first for decision-focused learning in RMAB that scales to real-world problem sizes.", + "primary_area": "planning routing and scheduling", + "author": "Kai Wang; Shresth Verma; Aditya Mate; Sanket Shah; Aparna Taneja; Neha Madhiwalla; Aparna Hegde; Milind Tambe", + "authorids": "", + "aff": "Harvard University; Google Research; Harvard University; Harvard University; Google Research; ARMMAN; ARMMAN; Harvard University+Google Research", + "bibtex": "@article{Wang_Verma_Mate_Shah_Taneja_Madhiwalla_Hegde_Tambe_2023, title={Scalable Decision-Focused Learning in Restless Multi-Armed Bandits with Application to Maternal and Child Health}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26431}, DOI={10.1609/aaai.v37i10.26431}, abstractNote={This paper studies restless multi-armed bandit (RMAB) problems with unknown arm transition dynamics but with known correlated arm features. The goal is to learn a model to predict transition dynamics given features, where the Whittle index policy solves the RMAB problems using predicted transitions. However, prior works often learn the model by maximizing the predictive accuracy instead of final RMAB solution quality, causing a mismatch between training and evaluation objectives. To address this shortcoming, we propose a novel approach for decision-focused learning in RMAB that directly trains the predictive model to maximize the Whittle index solution quality. We present three key contributions: (i) we establish differentiability of the Whittle index policy to support decision-focused learning; (ii) we significantly improve the scalability of decision-focused learning approaches in sequential problems, specifically RMAB problems; (iii) we apply our algorithm to a previously collected dataset of maternal and child health to demonstrate its performance. Indeed, our algorithm is the first for decision-focused learning in RMAB that scales to real-world problem sizes.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Kai and Verma, Shresth and Mate, Aditya and Shah, Sanket and Taneja, Aparna and Madhiwalla, Neha and Hegde, Aparna and Tambe, Milind}, year={2023}, month={Jun.}, pages={12138-12146} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26431/26203", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26431", + "pdf_size": 356568, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17595746291336449656&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "g.harvard.edu;google.com;g.harvard.edu;g.harvard.edu;google.com;armman.org;armman.org;google.com", + "email": "g.harvard.edu;google.com;g.harvard.edu;g.harvard.edu;google.com;armman.org;armman.org;google.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;1;2;2;0+1", + "aff_unique_norm": "Harvard University;Google;ARMMAN", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.harvard.edu;https://research.google;", + "aff_unique_abbr": "Harvard;Google Research;", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "United States;" + }, + { + "id": "article-25701", + "title": "Scalable Edge Blocking Algorithms for Defending Active Directory Style Attack Graphs", + "track": "main", + "status": "Technical", + "abstract": "Active Directory (AD) is the default security management system for Windows domain networks. An AD environment naturally describes an attack graph where nodes represent computers/accounts/security groups, and edges represent existing accesses/known exploits that allow the attacker to gain access from one node to another. Motivated by practical AD use cases, we study a Stackelberg game between one attacker and one defender. There are multiple entry nodes for the attacker to choose from and there is a single target (Domain Admin). Every edge has a failure rate. The attacker chooses the attack path with the maximum success rate. The defender can block a limited number of edges (i.e., revoke accesses) from a set of blockable edges, limited by budget. The defender's aim is to minimize the attacker's success rate.\n\nWe exploit the tree-likeness of practical AD graphs to design scalable algorithms. We propose two novel methods that combine theoretical fixed parameter analysis and practical optimisation techniques.\n\nFor graphs with small tree widths, we propose a tree decomposition based dynamic program. We then propose a general method for converting tree decomposition based dynamic programs to reinforcement learning environments, which leads to an anytime algorithm that scales better, but loses the optimality guarantee.\n\nFor graphs with small numbers of non-splitting paths (a parameter we invent specifically for AD graphs), we propose a kernelization technique that significantly downsizes the model, which is then solved via mixed-integer programming.\n\nExperimentally, our algorithms scale to handle synthetic AD graphs with tens of thousands of nodes.", + "primary_area": "game theory and economic paradigms", + "author": "Mingyu Guo; Max Ward; Aneta Neumann; Frank Neumann; Hung Nguyen", + "authorids": "", + "aff": "School of Computer and Mathematical Sciences, University of Adelaide, Australia; School of Physics, Maths and Computing, Computer Science and Software Engineering, University of Western Australia + Department of Molecular and Cellular Biology, Harvard University, Cambridge, Massachusetts, USA; School of Computer and Mathematical Sciences, University of Adelaide, Australia; School of Computer and Mathematical Sciences, University of Adelaide, Australia; School of Computer and Mathematical Sciences, University of Adelaide, Australia", + "bibtex": "@article{Guo_Ward_Neumann_Neumann_Nguyen_2023, title={Scalable Edge Blocking Algorithms for Defending Active Directory Style Attack Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25701}, DOI={10.1609/aaai.v37i5.25701}, abstractNote={Active Directory (AD) is the default security management system for Windows domain networks. An AD environment naturally describes an attack graph where nodes represent computers/accounts/security groups, and edges represent existing accesses/known exploits that allow the attacker to gain access from one node to another. Motivated by practical AD use cases, we study a Stackelberg game between one attacker and one defender. There are multiple entry nodes for the attacker to choose from and there is a single target (Domain Admin). Every edge has a failure rate. The attacker chooses the attack path with the maximum success rate. The defender can block a limited number of edges (i.e., revoke accesses) from a set of blockable edges, limited by budget. The defender\u2019s aim is to minimize the attacker\u2019s success rate. We exploit the tree-likeness of practical AD graphs to design scalable algorithms. We propose two novel methods that combine theoretical fixed parameter analysis and practical optimisation techniques. For graphs with small tree widths, we propose a tree decomposition based dynamic program. We then propose a general method for converting tree decomposition based dynamic programs to reinforcement learning environments, which leads to an anytime algorithm that scales better, but loses the optimality guarantee. For graphs with small numbers of non-splitting paths (a parameter we invent specifically for AD graphs), we propose a kernelization technique that significantly downsizes the model, which is then solved via mixed-integer programming. Experimentally, our algorithms scale to handle synthetic AD graphs with tens of thousands of nodes.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Mingyu and Ward, Max and Neumann, Aneta and Neumann, Frank and Nguyen, Hung}, year={2023}, month={Jun.}, pages={5649-5656} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25701/25473", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25701", + "pdf_size": 171928, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10032056201679318&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "adelaide.edu.au;uwa.edu.au;adelaide.edu.au;adelaide.edu.au;adelaide.edu.au", + "email": "adelaide.edu.au;uwa.edu.au;adelaide.edu.au;adelaide.edu.au;adelaide.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;0;0;0", + "aff_unique_norm": "University of Adelaide;University of Western Australia;Harvard University", + "aff_unique_dep": "School of Computer and Mathematical Sciences;School of Physics, Maths and Computing;Department of Molecular and Cellular Biology", + "aff_unique_url": "https://www.adelaide.edu.au;https://www.uwa.edu.au;https://www.harvard.edu", + "aff_unique_abbr": "Adelaide;UWA;Harvard", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0+1;0;0;0", + "aff_country_unique": "Australia;United States" + }, + { + "id": "article-27023", + "title": "Scalable Negotiating Agent Strategy via Multi-Issue Policy Network (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Previous research on the comprehensive negotiation strategy using deep reinforcement learning (RL) has scalability issues of not performing effectively in the large-sized domains.\nWe improve negotiation strategy via deep RL by considering an issue-based represented deep policy network to deal with multi-issue negotiation.\nThe architecture of the proposed learning agent considers the characteristics of multi-issue negotiation domains and policy-based learning.\nWe demonstrate that proposed method achieve equivalent or higher utility than existing negotiation agents in the large-sized domains.", + "primary_area": "", + "author": "Takumu Shimizu; Ryota Higa; Toki Takahashi; Katsuhide Fujita; Shinji Nakadai", + "authorids": "", + "aff": "Tokyo University of Agriculture and Technology; National Institute of Advanced Industrial Science and Technology+NEC Data Science Research Laboratories; Tokyo University of Agriculture and Technology; Tokyo University of Agriculture and Technology; National Institute of Advanced Industrial Science and Technology+NEC Data Science Research Laboratories", + "bibtex": "@article{Shimizu_Higa_Takahashi_Fujita_Nakadai_2024, title={Scalable Negotiating Agent Strategy via Multi-Issue Policy Network (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27023}, DOI={10.1609/aaai.v37i13.27023}, abstractNote={Previous research on the comprehensive negotiation strategy using deep reinforcement learning (RL) has scalability issues of not performing effectively in the large-sized domains.\nWe improve negotiation strategy via deep RL by considering an issue-based represented deep policy network to deal with multi-issue negotiation.\nThe architecture of the proposed learning agent considers the characteristics of multi-issue negotiation domains and policy-based learning.\nWe demonstrate that proposed method achieve equivalent or higher utility than existing negotiation agents in the large-sized domains.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shimizu, Takumu and Higa, Ryota and Takahashi, Toki and Fujita, Katsuhide and Nakadai, Shinji}, year={2024}, month={Jul.}, pages={16326-16327} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27023/26795", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27023", + "pdf_size": 501850, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4154818709536767897&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "katfuji.lab.tuat.ac.jp;nec.com;katfuji.lab.tuat.ac.jp;cc.tuat.ac.jp;nec.com", + "email": "katfuji.lab.tuat.ac.jp;nec.com;katfuji.lab.tuat.ac.jp;cc.tuat.ac.jp;nec.com", + "github": "", + "project": "http://web.tuat.ac.jp/~katfuji/ANAC2021/", + "author_num": 5, + "aff_unique_index": "0;1+2;0;0;1+2", + "aff_unique_norm": "Tokyo University of Agriculture and Technology;National Institute of Advanced Industrial Science and Technology;NEC", + "aff_unique_dep": ";;Data Science Research Laboratories", + "aff_unique_url": "https://www.tuat.ac.jp;https://www.aist.go.jp;https://www.nec.com", + "aff_unique_abbr": "TUAT;AIST;NEC", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26180", + "title": "Scalable Optimal Multiway-Split Decision Trees with Constraints", + "track": "main", + "status": "Technical", + "abstract": "There has been a surge of interest in learning optimal decision trees using mixed-integer programs (MIP) in recent years, as heuristic-based methods do not guarantee optimality and find it challenging to incorporate constraints that are critical for many practical applications. However, existing MIP methods that build on an arc-based formulation do not scale well as the number of binary variables is in the order of 2 to the power of the depth of the tree and the size of the dataset. Moreover, they can only handle sample-level constraints and linear metrics. In this paper, we propose a novel path-based MIP formulation where the number of decision variables is independent of dataset size. We present a scalable column generation framework to solve the MIP. Our framework produces a multiway-split tree which is more interpretable than the typical binary-split trees due to its shorter rules. Our framework is more general as it can handle nonlinear metrics such as F1 score, and incorporate a broader class of constraints. We demonstrate its efficacy with extensive experiments. We present results on datasets containing up to 1,008,372 samples while existing MIP-based decision tree models do not scale well on data beyond a few thousand points. We report superior or competitive results compared to the state-of-art MIP-based methods with up to a 24X reduction in runtime.", + "primary_area": "machine learning iii", + "author": "Shivaram Subramanian; Wei Sun", + "authorids": "", + "aff": "IBM Research, Yorktown Heights, New York, USA; IBM Research, Yorktown Heights, New York, USA", + "bibtex": "@article{Subramanian_Sun_2023, title={Scalable Optimal Multiway-Split Decision Trees with Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26180}, DOI={10.1609/aaai.v37i8.26180}, abstractNote={There has been a surge of interest in learning optimal decision trees using mixed-integer programs (MIP) in recent years, as heuristic-based methods do not guarantee optimality and find it challenging to incorporate constraints that are critical for many practical applications. However, existing MIP methods that build on an arc-based formulation do not scale well as the number of binary variables is in the order of 2 to the power of the depth of the tree and the size of the dataset. Moreover, they can only handle sample-level constraints and linear metrics. In this paper, we propose a novel path-based MIP formulation where the number of decision variables is independent of dataset size. We present a scalable column generation framework to solve the MIP. Our framework produces a multiway-split tree which is more interpretable than the typical binary-split trees due to its shorter rules. Our framework is more general as it can handle nonlinear metrics such as F1 score, and incorporate a broader class of constraints. We demonstrate its efficacy with extensive experiments. We present results on datasets containing up to 1,008,372 samples while existing MIP-based decision tree models do not scale well on data beyond a few thousand points. We report superior or competitive results compared to the state-of-art MIP-based methods with up to a 24X reduction in runtime.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Subramanian, Shivaram and Sun, Wei}, year={2023}, month={Jun.}, pages={9891-9899} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26180/25952", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26180", + "pdf_size": 659014, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1670573902944379507&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "us.ibm.com;us.ibm.com", + "email": "us.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Yorktown Heights", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25110", + "title": "Scalable Spatial Memory for Scene Rendering and Navigation", + "track": "main", + "status": "Technical", + "abstract": "Neural scene representation and rendering methods have shown promise in learning the implicit form of scene structure without supervision. However, the implicit representation learned in most existing methods is non-expandable and cannot be inferred online for novel scenes, which makes the learned representation difficult to be applied across different reinforcement learning (RL) tasks. In this work, we introduce Scene Memory Network (SMN) to achieve online spatial memory construction and expansion for view rendering in novel scenes. SMN models the camera projection and back-projection as spatially aware memory control processes, where the memory values store the information of the partial 3D area, and the memory keys indicate the position of that area. The memory controller can learn the geometry property from observations without the camera's intrinsic parameters and depth supervision. We further apply the memory constructed by SMN to exploration and navigation tasks. The experimental results reveal the generalization ability of our proposed SMN in large-scale scene synthesis and its potential to improve the performance of spatial RL tasks.", + "primary_area": "computer vision i", + "author": "Wen-Cheng Chen; Chu-Song Chen; Wei-Chen Chiu; Min-Chun Hu", + "authorids": "", + "aff": "National Cheng Kung University; National Taiwan University; National Yang Ming Chiao Tung University; National Tsing Hua University", + "bibtex": "@article{Chen_Chen_Chiu_Hu_2023, title={Scalable Spatial Memory for Scene Rendering and Navigation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25110}, DOI={10.1609/aaai.v37i1.25110}, abstractNote={Neural scene representation and rendering methods have shown promise in learning the implicit form of scene structure without supervision. However, the implicit representation learned in most existing methods is non-expandable and cannot be inferred online for novel scenes, which makes the learned representation difficult to be applied across different reinforcement learning (RL) tasks. In this work, we introduce Scene Memory Network (SMN) to achieve online spatial memory construction and expansion for view rendering in novel scenes. SMN models the camera projection and back-projection as spatially aware memory control processes, where the memory values store the information of the partial 3D area, and the memory keys indicate the position of that area. The memory controller can learn the geometry property from observations without the camera\u2019s intrinsic parameters and depth supervision. We further apply the memory constructed by SMN to exploration and navigation tasks. The experimental results reveal the generalization ability of our proposed SMN in large-scale scene synthesis and its potential to improve the performance of spatial RL tasks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Wen-Cheng and Chen, Chu-Song and Chiu, Wei-Chen and Hu, Min-Chun}, year={2023}, month={Jun.}, pages={369-377} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25110/24882", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25110", + "pdf_size": 5332416, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9137496292232395883&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;csie.ntu.edu.tw;cs.nctu.edu.tw;cs.nthu.edu.tw", + "email": "gmail.com;csie.ntu.edu.tw;cs.nctu.edu.tw;cs.nthu.edu.tw", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "National Cheng Kung University;National Taiwan University;National Yang Ming Chiao Tung University;National Tsing Hua University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ncku.edu.tw;https://www.ntu.edu.tw;https://www.nycu.edu.tw;https://www.nthu.edu.tw", + "aff_unique_abbr": "NCKU;NTU;NYCU;NTHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25880", + "title": "Scalable Spatiotemporal Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Neural forecasting of spatiotemporal time series drives both research and industrial innovation in several relevant application domains. Graph neural networks (GNNs) are often the core component of the forecasting architecture. However, in most spatiotemporal GNNs, the computational complexity scales up to a quadratic factor with the length of the sequence times the number of links in the graph, hence hindering the application of these models to large graphs and long temporal sequences. While methods to improve scalability have been proposed in the context of static graphs, few research efforts have been devoted to the spatiotemporal case. To fill this gap, we propose a scalable architecture that exploits an efficient encoding of both temporal and spatial dynamics. In particular, we use a randomized recurrent neural network to embed the history of the input time series into high-dimensional state representations encompassing multi-scale temporal dynamics. Such representations are then propagated along the spatial dimension using different powers of the graph adjacency matrix to generate node embeddings characterized by a rich pool of spatiotemporal features. The resulting node embeddings can be efficiently pre-computed in an unsupervised manner, before being fed to a feed-forward decoder that learns to map the multi-scale spatiotemporal representations to predictions. The training procedure can then be parallelized node-wise by sampling the node embeddings without breaking any dependency, thus enabling scalability to large networks. Empirical results on relevant datasets show that our approach achieves results competitive with the state of the art, while dramatically reducing the computational burden.", + "primary_area": "machine learning i", + "author": "Andrea Cini; Ivan Marisca; Filippo Maria Bianchi; Cesare Alippi", + "authorids": "", + "aff": "The Swiss AI Lab IDSIA, Universit `a della Svizzera italiana; The Swiss AI Lab IDSIA, Universit `a della Svizzera italiana; UiT the Arctic University of Norway+NORCE Norwegian Research Centre; The Swiss AI Lab IDSIA, Universit `a della Svizzera italiana+Politecnico di Milano", + "bibtex": "@article{Cini_Marisca_Bianchi_Alippi_2023, title={Scalable Spatiotemporal Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25880}, DOI={10.1609/aaai.v37i6.25880}, abstractNote={Neural forecasting of spatiotemporal time series drives both research and industrial innovation in several relevant application domains. Graph neural networks (GNNs) are often the core component of the forecasting architecture. However, in most spatiotemporal GNNs, the computational complexity scales up to a quadratic factor with the length of the sequence times the number of links in the graph, hence hindering the application of these models to large graphs and long temporal sequences. While methods to improve scalability have been proposed in the context of static graphs, few research efforts have been devoted to the spatiotemporal case. To fill this gap, we propose a scalable architecture that exploits an efficient encoding of both temporal and spatial dynamics. In particular, we use a randomized recurrent neural network to embed the history of the input time series into high-dimensional state representations encompassing multi-scale temporal dynamics. Such representations are then propagated along the spatial dimension using different powers of the graph adjacency matrix to generate node embeddings characterized by a rich pool of spatiotemporal features. The resulting node embeddings can be efficiently pre-computed in an unsupervised manner, before being fed to a feed-forward decoder that learns to map the multi-scale spatiotemporal representations to predictions. The training procedure can then be parallelized node-wise by sampling the node embeddings without breaking any dependency, thus enabling scalability to large networks. Empirical results on relevant datasets show that our approach achieves results competitive with the state of the art, while dramatically reducing the computational burden.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cini, Andrea and Marisca, Ivan and Bianchi, Filippo Maria and Alippi, Cesare}, year={2023}, month={Jun.}, pages={7218-7226} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25880/25652", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25880", + "pdf_size": 498392, + "gs_citation": 68, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18330174375468927016&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "usi.ch;usi.ch;uit.no;usi.ch", + "email": "usi.ch;usi.ch;uit.no;usi.ch", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+2;0+3", + "aff_unique_norm": "Universit \u00e0 della Svizzera italiana;UiT the Arctic University of Norway;NORCE Norwegian Research Centre;Politecnico di Milano", + "aff_unique_dep": "Swiss AI Lab IDSIA;;;", + "aff_unique_url": "https://www.idsia.ch/;https://www.uit.no;https://www.norce.no;https://www.polimi.it", + "aff_unique_abbr": "IDSIA;UiT;NORCE;Polimi", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1+1;0+2", + "aff_country_unique": "Switzerland;Norway;Italy" + }, + { + "id": "article-25839", + "title": "Scalable Theory-Driven Regularization of Scene Graph Generation Models", + "track": "main", + "status": "Technical", + "abstract": "Several techniques have recently aimed to improve the performance of deep learning models for Scene Graph Generation (SGG) by incorporating background knowledge. State-of-the-art techniques can be divided into two families: one where the background knowledge is incorporated into the model in a subsymbolic fashion, and another in which the background knowledge is maintained in symbolic form. Despite promising results, both families of techniques face several shortcomings: the first one requires ad-hoc, more complex neural architectures increasing the training or inference cost; the second one suffers from limited scalability w.r.t. the size of the background knowledge. Our work introduces a regularization technique for injecting symbolic background knowledge into neural SGG models that overcomes the limitations of prior art. Our technique is model-agnostic, does not incur any cost at inference time, and scales to previously unmanageable background knowledge sizes. We demonstrate that our technique can improve the accuracy of state-of-the-art SGG models, by up to 33%.", + "primary_area": "machine learning i", + "author": "Davide Buffelli; Efthymia Tsamoura", + "authorids": "", + "aff": "University of Padova; Samsung AI", + "bibtex": "@article{Buffelli_Tsamoura_2023, title={Scalable Theory-Driven Regularization of Scene Graph Generation Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25839}, DOI={10.1609/aaai.v37i6.25839}, abstractNote={Several techniques have recently aimed to improve the performance of deep learning models for Scene Graph Generation (SGG) by incorporating background knowledge. State-of-the-art techniques can be divided into two families: one where the background knowledge is incorporated into the model in a subsymbolic fashion, and another in which the background knowledge is maintained in symbolic form. Despite promising results, both families of techniques face several shortcomings: the first one requires ad-hoc, more complex neural architectures increasing the training or inference cost; the second one suffers from limited scalability w.r.t. the size of the background knowledge. Our work introduces a regularization technique for injecting symbolic background knowledge into neural SGG models that overcomes the limitations of prior art. Our technique is model-agnostic, does not incur any cost at inference time, and scales to previously unmanageable background knowledge sizes. We demonstrate that our technique can improve the accuracy of state-of-the-art SGG models, by up to 33%.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Buffelli, Davide and Tsamoura, Efthymia}, year={2023}, month={Jun.}, pages={6850-6859} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25839/25611", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25839", + "pdf_size": 1081790, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15118603238605540643&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "phd.unipd.it;samsung.com", + "email": "phd.unipd.it;samsung.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Padova;Samsung", + "aff_unique_dep": ";Samsung AI", + "aff_unique_url": "https://www.unipd.it;https://www.samsung.com", + "aff_unique_abbr": "UNIPD;Samsung AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Italy;South Korea" + }, + { + "id": "article-25568", + "title": "Scalable and Effective Conductance-Based Graph Clustering", + "track": "main", + "status": "Technical", + "abstract": "Conductance-based graph clustering has been recognized as a fundamental operator in numerous graph analysis applications. Despite the significant success of conductance-based graph clustering, existing algorithms are either hard to obtain satisfactory clustering qualities, or have high time and space complexity to achieve provable clustering qualities. To overcome these limitations, we devise a powerful peeling-based graph clustering framework PCon. We show that many existing solutions can be reduced to our framework. Namely, they first define a score function for each vertex, then iteratively remove the vertex with the smallest score. Finally, they output the result with the smallest conductance during the peeling process. Based on our framework, we propose two novel algorithms PCon_core and PCon_de with linear time and space complexity, which can efficiently and effectively identify clusters from massive graphs with more than a few billion edges. Surprisingly, we prove that PCon_de can identify clusters with near-constant approximation ratio, resulting in an important theoretical improvement over the well-known quadratic Cheeger bound. Empirical results on real-life and synthetic datasets show that our algorithms can achieve 5~42 times speedup with a high clustering accuracy, while using 1.4~7.8 times less memory than the baseline algorithms.", + "primary_area": "data mining and knowledge management", + "author": "Longlong Lin; Ronghua Li; Tao Jia", + "authorids": "", + "aff": "College of Computer and Information Science, Southwest University, Chongqing 400715, China; Beijing Institute of Technology, China; College of Computer and Information Science, Southwest University, Chongqing 400715, China", + "bibtex": "@article{Lin_Li_Jia_2023, title={Scalable and Effective Conductance-Based Graph Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25568}, DOI={10.1609/aaai.v37i4.25568}, abstractNote={Conductance-based graph clustering has been recognized as a fundamental operator in numerous graph analysis applications. Despite the significant success of conductance-based graph clustering, existing algorithms are either hard to obtain satisfactory clustering qualities, or have high time and space complexity to achieve provable clustering qualities. To overcome these limitations, we devise a powerful peeling-based graph clustering framework PCon. We show that many existing solutions can be reduced to our framework. Namely, they first define a score function for each vertex, then iteratively remove the vertex with the smallest score. Finally, they output the result with the smallest conductance during the peeling process. Based on our framework, we propose two novel algorithms PCon_core and PCon_de with linear time and space complexity, which can efficiently and effectively identify clusters from massive graphs with more than a few billion edges. Surprisingly, we prove that PCon_de can identify clusters with near-constant approximation ratio, resulting in an important theoretical improvement over the well-known quadratic Cheeger bound. Empirical results on real-life and synthetic datasets show that our algorithms can achieve 5~42 times speedup with a high clustering accuracy, while using 1.4~7.8 times less memory than the baseline algorithms.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Longlong and Li, Ronghua and Jia, Tao}, year={2023}, month={Jun.}, pages={4471-4478} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25568/25340", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25568", + "pdf_size": 283217, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7281508408804657401&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 9, + "aff_domain": "swu.edu.cn;126.com;swu.edu.cn", + "email": "swu.edu.cn;126.com;swu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Southwest University;Beijing Institute of Technology", + "aff_unique_dep": "College of Computer and Information Science;", + "aff_unique_url": ";http://www.bit.edu.cn/", + "aff_unique_abbr": ";BIT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chongqing;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25857", + "title": "Scalable and Globally Optimal Generalized L\u2081 K-center Clustering via Constraint Generation in Mixed Integer Linear Programming", + "track": "main", + "status": "Technical", + "abstract": "The k-center clustering algorithm, introduced over 35 years ago, is known to be robust to class imbalance prevalent in many clustering problems and has various applications such as data summarization, document clustering, and facility location determination. Unfortunately, existing k-center algorithms provide highly suboptimal solutions that can limit their practical application, reproducibility, and clustering quality. In this paper, we provide a novel scalable and globally optimal solution to a popular variant of the k-center problem known as generalized L_1 k-center clustering that uses L_1 distance and allows the selection of arbitrary vectors as cluster centers. We show that this clustering objective can be reduced to a mixed-integer linear program (MILP) that facilitates globally optimal clustering solutions. However, solving such a MILP may be intractable for large datasets; to remedy this, we present a scalable algorithm that leverages constraint generation to efficiently and provably converge to its global optimum. We further enhance outlier handling through a simple but elegant extension to our MILP objective. We first evaluate our algorithm on a variety of synthetic datasets to better understand its properties and then validate on 20 real benchmark datasets where we compare its performance to both traditional L_1 distance k-center and k-medians baselines. Our results demonstrate significant suboptimality of existing algorithms in comparison to our approach and further demonstrate that we can find optimal generalized L_1 k-center clustering solutions up to an unprecedented 1,000,000 data points.", + "primary_area": "machine learning i", + "author": "Aravinth Chembu; Scott Sanner; Hassan Khurram; Akshat Kumar", + "authorids": "", + "aff": "University of Toronto, Toronto; University of Toronto, Toronto + Vector Institute, Toronto, Canada; University of Toronto, Toronto; Singapore Management University, Singapore", + "bibtex": "@article{Chembu_Sanner_Khurram_Kumar_2023, title={Scalable and Globally Optimal Generalized L\u2081 K-center Clustering via Constraint Generation in Mixed Integer Linear Programming}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25857}, DOI={10.1609/aaai.v37i6.25857}, abstractNote={The k-center clustering algorithm, introduced over 35 years ago, is known to be robust to class imbalance prevalent in many clustering problems and has various applications such as data summarization, document clustering, and facility location determination. Unfortunately, existing k-center algorithms provide highly suboptimal solutions that can limit their practical application, reproducibility, and clustering quality. In this paper, we provide a novel scalable and globally optimal solution to a popular variant of the k-center problem known as generalized L_1 k-center clustering that uses L_1 distance and allows the selection of arbitrary vectors as cluster centers. We show that this clustering objective can be reduced to a mixed-integer linear program (MILP) that facilitates globally optimal clustering solutions. However, solving such a MILP may be intractable for large datasets; to remedy this, we present a scalable algorithm that leverages constraint generation to efficiently and provably converge to its global optimum. We further enhance outlier handling through a simple but elegant extension to our MILP objective. We first evaluate our algorithm on a variety of synthetic datasets to better understand its properties and then validate on 20 real benchmark datasets where we compare its performance to both traditional L_1 distance k-center and k-medians baselines. Our results demonstrate significant suboptimality of existing algorithms in comparison to our approach and further demonstrate that we can find optimal generalized L_1 k-center clustering solutions up to an unprecedented 1,000,000 data points.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chembu, Aravinth and Sanner, Scott and Khurram, Hassan and Kumar, Akshat}, year={2023}, month={Jun.}, pages={7015-7023} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25857/25629", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25857", + "pdf_size": 210619, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17127827986574016084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.utoronto.ca;mie.utoronto.ca;mail.utoronto.ca;smu.edu.sg", + "email": "mail.utoronto.ca;mie.utoronto.ca;mail.utoronto.ca;smu.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;0;2", + "aff_unique_norm": "University of Toronto;Vector Institute;Singapore Management University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai;https://www.smu.edu.sg", + "aff_unique_abbr": "U of T;Vector Institute;SMU", + "aff_campus_unique_index": "0;0+0;0", + "aff_campus_unique": "Toronto;", + "aff_country_unique_index": "0;0+0;0;1", + "aff_country_unique": "Canada;Singapore" + }, + { + "id": "article-25582", + "title": "Scaling Law for Recommendation Models: Towards General-Purpose User Representations", + "track": "main", + "status": "Technical", + "abstract": "Recent advancement of large-scale pretrained models such as BERT, GPT-3, CLIP, and Gopher, has shown astonishing achievements across various task domains. Unlike vision recognition and language models, studies on general-purpose user representation at scale still remain underexplored. Here we explore the possibility of general-purpose user representation learning by training a universal user encoder at large scales. We demonstrate that the scaling law is present in user representation learning areas, where the training error scales as a power-law with the amount of computation. Our Contrastive Learning User Encoder (CLUE), optimizes task-agnostic objectives, and the resulting user embeddings stretch our expectation of what is possible to do in various downstream tasks. CLUE also shows great transferability to other domains and companies, as performances on an online experiment shows significant improvements in Click-Through-Rate (CTR). Furthermore, we also investigate how the model performance is influenced by the scale factors, such as training data size, model capacity, sequence length, and batch size. Finally, we discuss the broader impacts of CLUE in general.", + "primary_area": "data mining and knowledge management", + "author": "Kyuyong Shin; Hanock Kwak; Su Young Kim; Max Nihl\u00e9n Ramstr\u00f6m; Jisu Jeong; Jung-Woo Ha; Kyung-Min Kim", + "authorids": "", + "aff": "NAVER+NAVER AI Lab; NAVER+NAVER AI Lab; NAVER; NAVER; NAVER+NAVER AI Lab; NAVER+NAVER AI Lab; NAVER+NAVER AI Lab", + "bibtex": "@article{Shin_Kwak_Kim_Ramstr\u00f6m_Jeong_Ha_Kim_2023, title={Scaling Law for Recommendation Models: Towards General-Purpose User Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25582}, DOI={10.1609/aaai.v37i4.25582}, abstractNote={Recent advancement of large-scale pretrained models such as BERT, GPT-3, CLIP, and Gopher, has shown astonishing achievements across various task domains. Unlike vision recognition and language models, studies on general-purpose user representation at scale still remain underexplored. Here we explore the possibility of general-purpose user representation learning by training a universal user encoder at large scales. We demonstrate that the scaling law is present in user representation learning areas, where the training error scales as a power-law with the amount of computation. Our Contrastive Learning User Encoder (CLUE), optimizes task-agnostic objectives, and the resulting user embeddings stretch our expectation of what is possible to do in various downstream tasks. CLUE also shows great transferability to other domains and companies, as performances on an online experiment shows significant improvements in Click-Through-Rate (CTR). Furthermore, we also investigate how the model performance is influenced by the scale factors, such as training data size, model capacity, sequence length, and batch size. Finally, we discuss the broader impacts of CLUE in general.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shin, Kyuyong and Kwak, Hanock and Kim, Su Young and Ramstr\u00f6m, Max Nihl\u00e9n and Jeong, Jisu and Ha, Jung-Woo and Kim, Kyung-Min}, year={2023}, month={Jun.}, pages={4596-4604} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25582/25354", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25582", + "pdf_size": 271876, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10662802498819111884&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "navercorp.com;navercorp.com; ; ; ; ; ", + "email": "navercorp.com;navercorp.com; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;0+0;0;0;0+0;0+0;0+0", + "aff_unique_norm": "NAVER Corporation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.naver.com", + "aff_unique_abbr": "NAVER", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0+0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26128", + "title": "Scaling Marginalized Importance Sampling to High-Dimensional State-Spaces via State Abstraction", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of off-policy evaluation (OPE) in reinforcement learning (RL), where the goal is to estimate the performance of an evaluation policy, pie, using a fixed dataset, D, collected by one or more policies that may be different from pie. Current OPE algorithms may produce poor OPE estimates under policy distribution shift i.e., when the probability of a particular state-action pair occurring under pie is very different from the probability of that same pair occurring in D. In this work, we propose to improve the accuracy of OPE estimators by projecting the high-dimensional state-space into a low-dimensional state-space using concepts from the state abstraction literature. Specifically, we consider marginalized importance sampling (MIS) OPE algorithms which compute state-action distribution correction ratios to produce their OPE estimate. In the original ground state-space, these ratios may have high variance which may lead to high variance OPE. However, we prove that in the lower-dimensional abstract state-space the ratios can have lower variance resulting in lower variance OPE. We then highlight the challenges that arise when estimating the abstract ratios from data, identify sufficient conditions to overcome these issues, and present a minimax optimization problem whose solution yields these abstract ratios. Finally, our empirical evaluation on difficult, high-dimensional state-space OPE tasks shows that the abstract ratios can make MIS OPE estimators achieve lower mean-squared error and more robust to hyperparameter tuning than the ground ratios.", + "primary_area": "machine learning iii", + "author": "Brahma S. Pavse; Josiah P. Hanna", + "authorids": "", + "aff": "University of Wisconsin \u2013 Madison; University of Wisconsin \u2013 Madison", + "bibtex": "@article{Pavse_Hanna_2023, title={Scaling Marginalized Importance Sampling to High-Dimensional State-Spaces via State Abstraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26128}, DOI={10.1609/aaai.v37i8.26128}, abstractNote={We consider the problem of off-policy evaluation (OPE) in reinforcement learning (RL), where the goal is to estimate the performance of an evaluation policy, pie, using a fixed dataset, D, collected by one or more policies that may be different from pie. Current OPE algorithms may produce poor OPE estimates under policy distribution shift i.e., when the probability of a particular state-action pair occurring under pie is very different from the probability of that same pair occurring in D. In this work, we propose to improve the accuracy of OPE estimators by projecting the high-dimensional state-space into a low-dimensional state-space using concepts from the state abstraction literature. Specifically, we consider marginalized importance sampling (MIS) OPE algorithms which compute state-action distribution correction ratios to produce their OPE estimate. In the original ground state-space, these ratios may have high variance which may lead to high variance OPE. However, we prove that in the lower-dimensional abstract state-space the ratios can have lower variance resulting in lower variance OPE. We then highlight the challenges that arise when estimating the abstract ratios from data, identify sufficient conditions to overcome these issues, and present a minimax optimization problem whose solution yields these abstract ratios. Finally, our empirical evaluation on difficult, high-dimensional state-space OPE tasks shows that the abstract ratios can make MIS OPE estimators achieve lower mean-squared error and more robust to hyperparameter tuning than the ground ratios.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pavse, Brahma S. and Hanna, Josiah P.}, year={2023}, month={Jun.}, pages={9417-9425} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26128/25900", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26128", + "pdf_size": 412655, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5238547508704400321&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "wisc.edu;cs.wisc.edu", + "email": "wisc.edu;cs.wisc.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Wisconsin-Madison", + "aff_unique_dep": "", + "aff_unique_url": "https://www.wisc.edu", + "aff_unique_abbr": "UW-Madison", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Madison", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26034", + "title": "Scaling Up Dynamic Graph Representation Learning via Spiking Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Recent years have seen a surge in research on dynamic graph representation learning, which aims to model temporal graphs that are dynamic and evolving constantly over time. However, current work typically models graph dynamics with recurrent neural networks (RNNs), making them suffer seriously from computation and memory overheads on large temporal graphs. So far, scalability of dynamic graph representation learning on large temporal graphs remains one of the major challenges. In this paper, we present a scalable framework, namely SpikeNet, to efficiently capture the temporal and structural patterns of temporal graphs. We explore a new direction in that we can capture the evolving dynamics of temporal graphs with spiking neural networks (SNNs) instead of RNNs. As a low-power alternative to RNNs, SNNs explicitly model graph dynamics as spike trains of neuron populations and enable spike-based propagation in an efficient way. Experiments on three large real-world temporal graph datasets demonstrate that SpikeNet outperforms strong baselines on the temporal node classification task with lower computational costs. Particularly, SpikeNet generalizes to a large temporal graph (2.7M nodes and 13.9M edges) with significantly fewer parameters and computation overheads.", + "primary_area": "machine learning ii", + "author": "Jintang Li; Zhouxin Yu; Zulun Zhu; Liang Chen; Qi Yu; Zibin Zheng; Sheng Tian; Ruofan Wu; Changhua Meng", + "authorids": "", + "aff": "Sun Yat-sen University; Sun Yat-sen University; Rochester Institute of Technology; Sun Yat-sen University; Rochester Institute of Technology; Sun Yat-sen University; Ant Group; Ant Group; Ant Group", + "bibtex": "@article{Li_Yu_Zhu_Chen_Yu_Zheng_Tian_Wu_Meng_2023, title={Scaling Up Dynamic Graph Representation Learning via Spiking Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26034}, DOI={10.1609/aaai.v37i7.26034}, abstractNote={Recent years have seen a surge in research on dynamic graph representation learning, which aims to model temporal graphs that are dynamic and evolving constantly over time. However, current work typically models graph dynamics with recurrent neural networks (RNNs), making them suffer seriously from computation and memory overheads on large temporal graphs. So far, scalability of dynamic graph representation learning on large temporal graphs remains one of the major challenges. In this paper, we present a scalable framework, namely SpikeNet, to efficiently capture the temporal and structural patterns of temporal graphs. We explore a new direction in that we can capture the evolving dynamics of temporal graphs with spiking neural networks (SNNs) instead of RNNs. As a low-power alternative to RNNs, SNNs explicitly model graph dynamics as spike trains of neuron populations and enable spike-based propagation in an efficient way. Experiments on three large real-world temporal graph datasets demonstrate that SpikeNet outperforms strong baselines on the temporal node classification task with lower computational costs. Particularly, SpikeNet generalizes to a large temporal graph (2.7M nodes and 13.9M edges) with significantly fewer parameters and computation overheads.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jintang and Yu, Zhouxin and Zhu, Zulun and Chen, Liang and Yu, Qi and Zheng, Zibin and Tian, Sheng and Wu, Ruofan and Meng, Changhua}, year={2023}, month={Jun.}, pages={8588-8596} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26034/25806", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26034", + "pdf_size": 805762, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=352555751277520886&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail2.sysu.edu;mail2.sysu.edu;g.rit.edu;mail.sysu.edu;rit.edu;mail.sysu.edu;antgroup.com;antgroup.com;antgroup.com", + "email": "mail2.sysu.edu;mail2.sysu.edu;g.rit.edu;mail.sysu.edu;rit.edu;mail.sysu.edu;antgroup.com;antgroup.com;antgroup.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;1;0;2;2;2", + "aff_unique_norm": "Sun Yat-sen University;Rochester Institute of Technology;Ant Group", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.sysu.edu.cn/;https://www.rit.edu;https://www.antgroup.com", + "aff_unique_abbr": "SYSU;RIT;Ant Group", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;1;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25086", + "title": "ScatterFormer: Locally-Invariant Scattering Transformer for Patient-Independent Multispectral Detection of Epileptiform Discharges", + "track": "main", + "status": "Technical", + "abstract": "Patient-independent detection of epileptic activities based on visual spectral representation of continuous EEG (cEEG) has been widely used for diagnosing epilepsy. However, precise detection remains a considerable challenge due to subtle variabilities across subjects, channels and time points. Thus, capturing fine-grained, discriminative features of EEG patterns, which is associated with high-frequency textural information, is yet to be resolved. In this work, we propose Scattering Transformer (ScatterFormer), an invariant scattering transform-based hierarchical Transformer that specifically pays attention to subtle features. In particular, the disentangled frequency-aware attention (FAA) enables the Transformer to capture clinically informative high-frequency components, offering a novel clinical explainability based on visual encoding of multichannel EEG signals. Evaluations on two distinct tasks of epileptiform detection demonstrate\nthe effectiveness our method. Our proposed model achieves median AUCROC and accuracy of 98.14%, 96.39% in patients with Rolandic epilepsy. On a neonatal seizure detection benchmark, it outperforms the state-of-the-art by 9% in terms of average AUCROC.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Ruizhe Zheng; Jun Li; Yi Wang; Tian Luo; Yuguo Yu", + "authorids": "", + "aff": "Research Institute of Intelligent and Complex Systems, Fudan University+State Key Laboratory of Medical Neurobiology, Fudan University+MOE Frontiers Center for Brain Science, Fudan University+Institutes of Brain Science, Fudan University+Institute of Science and Technology for Brain-Inspired Intelligence, Fudan University+Shanghai Artificial Intelligence Laboratory; Research Institute of Intelligent and Complex Systems, Fudan University+State Key Laboratory of Medical Neurobiology, Fudan University+MOE Frontiers Center for Brain Science, Fudan University+Institutes of Brain Science, Fudan University+Institute of Science and Technology for Brain-Inspired Intelligence, Fudan University+Shanghai Artificial Intelligence Laboratory; Department of Neurology, Children\u2019s Hospital of Fudan University; Department of Neurology, Children\u2019s Hospital of Fudan University; Research Institute of Intelligent and Complex Systems, Fudan University+State Key Laboratory of Medical Neurobiology, Fudan University+MOE Frontiers Center for Brain Science, Fudan University+Institutes of Brain Science, Fudan University+Institute of Science and Technology for Brain-Inspired Intelligence, Fudan University+Shanghai Artificial Intelligence Laboratory", + "bibtex": "@article{Zheng_Li_Wang_Luo_Yu_2023, title={ScatterFormer: Locally-Invariant Scattering Transformer for Patient-Independent Multispectral Detection of Epileptiform Discharges}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25086}, DOI={10.1609/aaai.v37i1.25086}, abstractNote={Patient-independent detection of epileptic activities based on visual spectral representation of continuous EEG (cEEG) has been widely used for diagnosing epilepsy. However, precise detection remains a considerable challenge due to subtle variabilities across subjects, channels and time points. Thus, capturing fine-grained, discriminative features of EEG patterns, which is associated with high-frequency textural information, is yet to be resolved. In this work, we propose Scattering Transformer (ScatterFormer), an invariant scattering transform-based hierarchical Transformer that specifically pays attention to subtle features. In particular, the disentangled frequency-aware attention (FAA) enables the Transformer to capture clinically informative high-frequency components, offering a novel clinical explainability based on visual encoding of multichannel EEG signals. Evaluations on two distinct tasks of epileptiform detection demonstrate\nthe effectiveness our method. Our proposed model achieves median AUCROC and accuracy of 98.14%, 96.39% in patients with Rolandic epilepsy. On a neonatal seizure detection benchmark, it outperforms the state-of-the-art by 9% in terms of average AUCROC.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Ruizhe and Li, Jun and Wang, Yi and Luo, Tian and Yu, Yuguo}, year={2023}, month={Jun.}, pages={148-158} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25086/24858", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25086", + "pdf_size": 4716390, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10569278198454749736&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;shmu.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;shmu.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+0+0+0+1;0+0+0+0+0+1;0;0;0+0+0+0+0+1", + "aff_unique_norm": "Fudan University;Shanghai Artificial Intelligence Laboratory", + "aff_unique_dep": "Research Institute of Intelligent and Complex Systems;", + "aff_unique_url": "https://www.fudan.edu.cn;http://www.shailab.org/", + "aff_unique_abbr": "Fudan;Shanghai AI Lab", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0+0+0+0;0+0+0+0+0+0;0;0;0+0+0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25387", + "title": "Scene Graph to Image Synthesis via Knowledge Consensus", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we study graph-to-image generation conditioned exclusively on scene graphs, in which we seek to disentangle the veiled semantics between knowledge graphs and images. While most existing research resorts to laborious auxiliary information such as object layouts or segmentation masks, it is also of interest to unveil the generality of the model with limited supervision, moreover, avoiding extra cross-modal alignments. To tackle this challenge, we delve into the causality of the adversarial generation process, and reason out a new principle to realize a simultaneous semantic disentanglement with an alignment on target and model distributions. This principle is named knowledge consensus, which explicitly describes a triangle causal dependency among observed images, graph semantics and hidden visual representations. The consensus also determines a new graph-to-image generation framework, carried on several adversarial optimization objectives. Extensive experimental results demonstrate that, even conditioned only on scene graphs, our model surprisingly achieves superior performance on semantics-aware image generation, without losing the competence on manipulating the generation through knowledge graphs.", + "primary_area": "computer vision iii", + "author": "Yang Wu; Pengxu Wei; Liang Lin", + "authorids": "", + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University + Key Laboratory of Information Security Technology, GuangDong Province; School of Computer Science and Engineering, Sun Yat-sen University + Key Laboratory of Information Security Technology, GuangDong Province", + "bibtex": "@article{Wu_Wei_Lin_2023, title={Scene Graph to Image Synthesis via Knowledge Consensus}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25387}, DOI={10.1609/aaai.v37i3.25387}, abstractNote={In this paper, we study graph-to-image generation conditioned exclusively on scene graphs, in which we seek to disentangle the veiled semantics between knowledge graphs and images. While most existing research resorts to laborious auxiliary information such as object layouts or segmentation masks, it is also of interest to unveil the generality of the model with limited supervision, moreover, avoiding extra cross-modal alignments. To tackle this challenge, we delve into the causality of the adversarial generation process, and reason out a new principle to realize a simultaneous semantic disentanglement with an alignment on target and model distributions. This principle is named knowledge consensus, which explicitly describes a triangle causal dependency among observed images, graph semantics and hidden visual representations. The consensus also determines a new graph-to-image generation framework, carried on several adversarial optimization objectives. Extensive experimental results demonstrate that, even conditioned only on scene graphs, our model surprisingly achieves superior performance on semantics-aware image generation, without losing the competence on manipulating the generation through knowledge graphs.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yang and Wei, Pengxu and Lin, Liang}, year={2023}, month={Jun.}, pages={2856-2865} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25387/25159", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25387", + "pdf_size": 9627166, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4852928387209181378&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;ieee.org", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;ieee.org", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0+1", + "aff_unique_norm": "Sun Yat-sen University;Key Laboratory of Information Security Technology", + "aff_unique_dep": "School of Computer Science and Engineering;Information Security Technology", + "aff_unique_url": "http://www.sysu.edu.cn;", + "aff_unique_abbr": "SYSU;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25141", + "title": "Scene-Level Sketch-Based Image Retrieval with Minimal Pairwise Supervision", + "track": "main", + "status": "Technical", + "abstract": "The sketch-based image retrieval (SBIR) task has long been researched at the instance level, where both query sketches and candidate images are assumed to contain only one dominant object. This strong assumption constrains its application, especially with the increasingly popular intelligent terminals and human-computer interaction technology. In this work, a more general scene-level SBIR task is explored, where sketches and images can both contain multiple object instances. The new general task is extremely challenging due to several factors: (i) scene-level SBIR inherently shares sketch-specific difficulties with instance-level SBIR (e.g., sparsity, abstractness, and diversity), (ii) the cross-modal similarity is measured between two partially aligned domains (i.e., not all objects in images are drawn in scene sketches), and (iii) besides instance-level visual similarity, a more complex multi-dimensional scene-level feature matching problem is imposed (including appearance, semantics, layout, etc.). Addressing these challenges, a novel Conditional Graph Autoencoder model is proposed to deal with scene-level sketch-images retrieval. More importantly, the model can be trained with only pairwise supervision, which distinguishes our study from others in that elaborate instance-level annotations (for example, bounding boxes) are no longer required. Extensive experiments confirm the ability of our model to robustly retrieve multiple related objects at the scene level and exhibit superior performance beyond strong competitors.", + "primary_area": "computer vision i", + "author": "Ce Ge; Jingyu Wang; Qi Qi; Haifeng Sun; Tong Xu; Jianxin Liao", + "authorids": "", + "aff": "State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications", + "bibtex": "@article{Ge_Wang_Qi_Sun_Xu_Liao_2023, title={Scene-Level Sketch-Based Image Retrieval with Minimal Pairwise Supervision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25141}, DOI={10.1609/aaai.v37i1.25141}, abstractNote={The sketch-based image retrieval (SBIR) task has long been researched at the instance level, where both query sketches and candidate images are assumed to contain only one dominant object. This strong assumption constrains its application, especially with the increasingly popular intelligent terminals and human-computer interaction technology. In this work, a more general scene-level SBIR task is explored, where sketches and images can both contain multiple object instances. The new general task is extremely challenging due to several factors: (i) scene-level SBIR inherently shares sketch-specific difficulties with instance-level SBIR (e.g., sparsity, abstractness, and diversity), (ii) the cross-modal similarity is measured between two partially aligned domains (i.e., not all objects in images are drawn in scene sketches), and (iii) besides instance-level visual similarity, a more complex multi-dimensional scene-level feature matching problem is imposed (including appearance, semantics, layout, etc.). Addressing these challenges, a novel Conditional Graph Autoencoder model is proposed to deal with scene-level sketch-images retrieval. More importantly, the model can be trained with only pairwise supervision, which distinguishes our study from others in that elaborate instance-level annotations (for example, bounding boxes) are no longer required. Extensive experiments confirm the ability of our model to robustly retrieve multiple related objects at the scene level and exhibit superior performance beyond strong competitors.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ge, Ce and Wang, Jingyu and Qi, Qi and Sun, Haifeng and Xu, Tong and Liao, Jianxin}, year={2023}, month={Jun.}, pages={650-657} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25141/24913", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25141", + "pdf_size": 3074709, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11229895646155197320&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;ebupt.com;gmail.com", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;ebupt.com;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory of Networking and Switching Technology", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26437", + "title": "Score-Based Learning of Graphical Event Models with Background Knowledge Augmentation", + "track": "main", + "status": "Technical", + "abstract": "Graphical event models (GEMs) are representations of temporal point process dynamics between different event types. Many real-world applications however involve limited event stream data, making it challenging to learn GEMs from data alone. In this paper, we introduce approaches that can work together in a score-based learning paradigm, to augment data with potentially different types of background knowledge. We propose novel scores for learning an important parametric class of GEMs; in particular, we propose a Bayesian score for leveraging prior information as well as a more practical simplification that involves fewer parameters, analogous to Bayesian networks. We also introduce a framework for incorporating easily assessed qualitative background knowledge from domain experts, in the form of statements such as `event X depends on event Y' or `event Y makes event X more likely'. The proposed framework has Bayesian interpretations and can be deployed by any score-based learner. Through an extensive empirical investigation, we demonstrate the practical benefits of background knowledge augmentation while learning GEMs for applications in the low-data regime.", + "primary_area": "reasoning under uncertainty", + "author": "Debarun Bhattacharjya; Tian Gao; Dharmashankar Subramanian; Xiao Shou", + "authorids": "", + "aff": "Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY, USA; Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY, USA; Research AI, IBM T. J. Watson Research Center, Yorktown Heights, NY, USA; Department of Mathematical Sciences, Rensselaer Polytechnic Institute, Troy, NY, USA+Department of Computer Science, Rensselaer Polytechnic Institute, Troy, NY, USA", + "bibtex": "@article{Bhattacharjya_Gao_Subramanian_Shou_2023, title={Score-Based Learning of Graphical Event Models with Background Knowledge Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26437}, DOI={10.1609/aaai.v37i10.26437}, abstractNote={Graphical event models (GEMs) are representations of temporal point process dynamics between different event types. Many real-world applications however involve limited event stream data, making it challenging to learn GEMs from data alone. In this paper, we introduce approaches that can work together in a score-based learning paradigm, to augment data with potentially different types of background knowledge. We propose novel scores for learning an important parametric class of GEMs; in particular, we propose a Bayesian score for leveraging prior information as well as a more practical simplification that involves fewer parameters, analogous to Bayesian networks. We also introduce a framework for incorporating easily assessed qualitative background knowledge from domain experts, in the form of statements such as `event X depends on event Y\u2019 or `event Y makes event X more likely\u2019. The proposed framework has Bayesian interpretations and can be deployed by any score-based learner. Through an extensive empirical investigation, we demonstrate the practical benefits of background knowledge augmentation while learning GEMs for applications in the low-data regime.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bhattacharjya, Debarun and Gao, Tian and Subramanian, Dharmashankar and Shou, Xiao}, year={2023}, month={Jun.}, pages={12189-12197} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26437/26209", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26437", + "pdf_size": 354495, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=727001794045338719&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "us.ibm.com;us.ibm.com;us.ibm.com;rpi.edu", + "email": "us.ibm.com;us.ibm.com;us.ibm.com;rpi.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1+1", + "aff_unique_norm": "IBM T. J. Watson Research Center;Rensselaer Polytechnic Institute", + "aff_unique_dep": "Research AI;Department of Mathematical Sciences", + "aff_unique_url": "https://www.ibm.com/research/watson;https://www.rpi.edu", + "aff_unique_abbr": "IBM Watson;RPI", + "aff_campus_unique_index": "0;0;0;1+1", + "aff_campus_unique": "Yorktown Heights;Troy", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-26901", + "title": "Scratch for Sports: Athletic Drills as a Platform for Experiencing, Understanding, and Developing AI-Driven Apps", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Culturally relevant and sustaining implementations of computing education are increasingly leveraging young learners' passion for sports as a platform for building interest in different STEM (Science, Technology, Engineering, and Math) concepts. Numerous disciplines spanning physics, engineering, data science, and especially AI based computing are not only authentically used in professional sports in today's world, but can also be productively introduced to introduce young learnres to these disciplines and facilitate deep engagement with the same in the context of sports. In this work, we present a curriculum that includes a constellation of proprietary apps and tools we show student athletes learning sports like basketball and soccer that use AI methods like pose detection and IMU-based gesture detection to track activity and provide feedback. We also share Scratch extensions which enable rich access to sports related pose, object, and gesture detection algorithms that youth can then tinker around with and develop their own sports drill applications. We present early findings from pilot implementations of portions of these tools and curricula, which also fostered discussion relating to the failings, risks, and social harms associated with many of these different AI methods \u2013 noticeable in professional sports contexts, and relevant to youths' lives as active users of AI technologies as well as potential future creators of the same.", + "primary_area": "", + "author": "Vishesh Kumar; Marcelo Worsley", + "authorids": "", + "aff": "School of Education and Social Policy, Northwestern University; School of Education and Social Policy, Northwestern University", + "bibtex": "@article{Kumar_Worsley_2024, title={Scratch for Sports: Athletic Drills as a Platform for Experiencing, Understanding, and Developing AI-Driven Apps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26901}, DOI={10.1609/aaai.v37i13.26901}, abstractNote={Culturally relevant and sustaining implementations of computing education are increasingly leveraging young learners\u2019 passion for sports as a platform for building interest in different STEM (Science, Technology, Engineering, and Math) concepts. Numerous disciplines spanning physics, engineering, data science, and especially AI based computing are not only authentically used in professional sports in today\u2019s world, but can also be productively introduced to introduce young learnres to these disciplines and facilitate deep engagement with the same in the context of sports. In this work, we present a curriculum that includes a constellation of proprietary apps and tools we show student athletes learning sports like basketball and soccer that use AI methods like pose detection and IMU-based gesture detection to track activity and provide feedback. We also share Scratch extensions which enable rich access to sports related pose, object, and gesture detection algorithms that youth can then tinker around with and develop their own sports drill applications. We present early findings from pilot implementations of portions of these tools and curricula, which also fostered discussion relating to the failings, risks, and social harms associated with many of these different AI methods \u2013 noticeable in professional sports contexts, and relevant to youths\u2019 lives as active users of AI technologies as well as potential future creators of the same.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Vishesh and Worsley, Marcelo}, year={2024}, month={Jul.}, pages={16011-16016} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26901/26673", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26901", + "pdf_size": 1797666, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1512511454165750603&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "northwestern.edu;northwestern.edu", + "email": "northwestern.edu;northwestern.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Northwestern University", + "aff_unique_dep": "School of Education and Social Policy", + "aff_unique_url": "https://www.northwestern.edu", + "aff_unique_abbr": "Northwestern", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26528", + "title": "Script, Language, and Labels: Overcoming Three Discrepancies for Low-Resource Language Specialization", + "track": "main", + "status": "Technical", + "abstract": "Although multilingual pretrained models (mPLMs) enabled support of various natural language processing in diverse languages, its limited coverage of 100+ languages lets 6500+ languages remain \u2018unseen\u2019. One common approach for an unseen language is specializing the model for it as target, by performing additional masked language modeling (MLM) with the target language corpus. However, we argue that, due to the discrepancy from multilingual MLM pretraining, a naive specialization as such can be suboptimal. Specifically, we pose three discrepancies to overcome. Script and linguistic discrepancy of the target language from the related seen languages, hinder a positive transfer, for which we propose to maximize representation similarity, unlike existing approaches maximizing overlaps. In addition, label space for MLM prediction can vary across languages, for which we propose to reinitialize top layers for a more effective adaptation. Experiments over four different language families and three tasks shows that our method improves the task performance of unseen languages with statistical significance, while previous approach fails to.", + "primary_area": "speech natural language processing", + "author": "Jaeseong Lee; Dohyeon Lee; Seung-won Hwang", + "authorids": "", + "aff": "Computer Science and Engineering, Seoul National University; Computer Science and Engineering, Seoul National University; Computer Science and Engineering, Seoul National University", + "bibtex": "@article{Lee_Lee_Hwang_2023, title={Script, Language, and Labels: Overcoming Three Discrepancies for Low-Resource Language Specialization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26528}, DOI={10.1609/aaai.v37i11.26528}, abstractNote={Although multilingual pretrained models (mPLMs) enabled support of various natural language processing in diverse languages, its limited coverage of 100+ languages lets 6500+ languages remain \u2018unseen\u2019. One common approach for an unseen language is specializing the model for it as target, by performing additional masked language modeling (MLM) with the target language corpus. However, we argue that, due to the discrepancy from multilingual MLM pretraining, a naive specialization as such can be suboptimal. Specifically, we pose three discrepancies to overcome. Script and linguistic discrepancy of the target language from the related seen languages, hinder a positive transfer, for which we propose to maximize representation similarity, unlike existing approaches maximizing overlaps. In addition, label space for MLM prediction can vary across languages, for which we propose to reinitialize top layers for a more effective adaptation. Experiments over four different language families and three tasks shows that our method improves the task performance of unseen languages with statistical significance, while previous approach fails to.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Jaeseong and Lee, Dohyeon and Hwang, Seung-won}, year={2023}, month={Jun.}, pages={13004-13013} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26528/26300", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26528", + "pdf_size": 616326, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:rLygYpw2A_cJ:scholar.google.com/&scioq=Script,+Language,+and+Labels:+Overcoming+Three+Discrepancies+for+Low-Resource+Language+Specialization&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Seoul National University", + "aff_unique_dep": "Computer Science and Engineering", + "aff_unique_url": "https://www.snu.ac.kr", + "aff_unique_abbr": "SNU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26523", + "title": "SeDepTTS: Enhancing the Naturalness via Semantic Dependency and Local Convolution for Text-to-Speech Synthesis", + "track": "main", + "status": "Technical", + "abstract": "Self-attention-based networks have obtained impressive performance in parallel training and global context modeling. However, it is weak in local dependency capturing, especially for data with strong local correlations such as utterances. Therefore, we will mine linguistic information of the original text based on a semantic dependency and the semantic relationship between nodes is regarded as prior knowledge to revise the distribution of self-attention. On the other hand, given the strong correlation between input characters, we introduce a one-dimensional (1-D) convolution neural network (CNN) producing query(Q) and value(V) in the self-attention mechanism for a better fusion of local contextual information. Then, we migrate this variant of the self-attention networks to speech synthesis tasks and propose a non-autoregressive (NAR) neural Text-to-Speech (TTS): SeDepTTS. Experimental results show that our model yields good performance in speech synthesis. Specifically, the proposed method yields significant improvement for the processing of pause, stress, and intonation in speech.", + "primary_area": "speech natural language processing", + "author": "Chenglong Jiang; Ying Gao; Wing W.Y. Ng; Jiyong Zhou; Jinghui Zhong; Hongzhong Zhen", + "authorids": "", + "aff": "School of Computer and Engineering, South China University of Technology, Guangzhou, China; School of Computer and Engineering, South China University of Technology, Guangzhou, China; School of Computer and Engineering, South China University of Technology, Guangzhou, China; School of Computer and Engineering, South China University of Technology, Guangzhou, China; School of Computer and Engineering, South China University of Technology, Guangzhou, China; School of Computer and Engineering, South China University of Technology, Guangzhou, China", + "bibtex": "@article{Jiang_Gao_Ng_Zhou_Zhong_Zhen_2023, title={SeDepTTS: Enhancing the Naturalness via Semantic Dependency and Local Convolution for Text-to-Speech Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26523}, DOI={10.1609/aaai.v37i11.26523}, abstractNote={Self-attention-based networks have obtained impressive performance in parallel training and global context modeling. However, it is weak in local dependency capturing, especially for data with strong local correlations such as utterances. Therefore, we will mine linguistic information of the original text based on a semantic dependency and the semantic relationship between nodes is regarded as prior knowledge to revise the distribution of self-attention. On the other hand, given the strong correlation between input characters, we introduce a one-dimensional (1-D) convolution neural network (CNN) producing query(Q) and value(V) in the self-attention mechanism for a better fusion of local contextual information. Then, we migrate this variant of the self-attention networks to speech synthesis tasks and propose a non-autoregressive (NAR) neural Text-to-Speech (TTS): SeDepTTS. Experimental results show that our model yields good performance in speech synthesis. Specifically, the proposed method yields significant improvement for the processing of pause, stress, and intonation in speech.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Chenglong and Gao, Ying and Ng, Wing W.Y. and Zhou, Jiyong and Zhong, Jinghui and Zhen, Hongzhong}, year={2023}, month={Jun.}, pages={12959-12967} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26523/26295", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26523", + "pdf_size": 1956122, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6896316339190224867&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;scut.edu.cn;scut.edu.cn;ieee.org", + "email": "mail.scut.edu.cn;mail.scut.edu.cn;mail.scut.edu.cn;scut.edu.cn;scut.edu.cn;ieee.org", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Computer and Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25515", + "title": "Second-Order Quantified Boolean Logic", + "track": "main", + "status": "Technical", + "abstract": "Second-order quantified Boolean formulas (SOQBFs) generalize quantified Boolean formulas (QBFs) by admitting second-order quantifiers on function variables in addition to first-order quantifiers on atomic variables. Recent endeavors establish that the complexity of SOQBF satisfiability corresponds to the exponential-time hierarchy (EXPH), similar to that of QBF satisfiability corresponding to the polynomial-time hierarchy (PH). This fact reveals the succinct expression power of SOQBFs in encoding decision problems not efficiently doable by QBFs. In this paper, we investigate the second-order quantified Boolean logic with the following main results: First, we present a procedure of quantifier elimination converting SOQBFs to QBFs and a game interpretation of SOQBF semantics. Second, we devise a sound and complete refutation-proof system for SOQBF. Third, we develop an algorithm for countermodel extraction from a refutation proof. Finally, we show potential applications of SOQBFs in system design and multi-agent planning. With these advances, we anticipate practical tools for development.", + "primary_area": "constraint satisfaction and optimization", + "author": "Jie-Hong R. Jiang", + "authorids": "", + "aff": "Graduate Institute of Electronics Engineering, National Taiwan University, Taipei, Taiwan+Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", + "bibtex": "@article{Jiang_2023, title={Second-Order Quantified Boolean Logic}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25515}, DOI={10.1609/aaai.v37i4.25515}, abstractNote={Second-order quantified Boolean formulas (SOQBFs) generalize quantified Boolean formulas (QBFs) by admitting second-order quantifiers on function variables in addition to first-order quantifiers on atomic variables. Recent endeavors establish that the complexity of SOQBF satisfiability corresponds to the exponential-time hierarchy (EXPH), similar to that of QBF satisfiability corresponding to the polynomial-time hierarchy (PH). This fact reveals the succinct expression power of SOQBFs in encoding decision problems not efficiently doable by QBFs. In this paper, we investigate the second-order quantified Boolean logic with the following main results: First, we present a procedure of quantifier elimination converting SOQBFs to QBFs and a game interpretation of SOQBF semantics. Second, we devise a sound and complete refutation-proof system for SOQBF. Third, we develop an algorithm for countermodel extraction from a refutation proof. Finally, we show potential applications of SOQBFs in system design and multi-agent planning. With these advances, we anticipate practical tools for development.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Jie-Hong R.}, year={2023}, month={Jun.}, pages={4007-4015} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25515/25287", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25515", + "pdf_size": 223679, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11147808529922057874&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "ntu.edu.tw", + "email": "ntu.edu.tw", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "Graduate Institute of Electronics Engineering", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "0+0", + "aff_campus_unique": "Taipei", + "aff_country_unique_index": "0+0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25720", + "title": "Securing Lifelines: Safe Delivery of Critical Services in Areas with Volatile Security Situation via a Stackelberg Game Approach", + "track": "main", + "status": "Technical", + "abstract": "Vaccine delivery in under-resourced locations with security risks is not just challenging but also life threatening. The COVID pandemic and the need to vaccinate added even more urgency to this issue. Motivated by this problem, we propose a general framework to set-up limited temporary (vaccination) centers that balance physical security and desired (vaccine) service coverage with limited resources. We set-up the problem as a Stackelberg game between the centers operator (defender) and an adversary, where the set of centers is not fixed a priori but is part of the decision output. This results in a mixed combinatorial and continuous optimization problem. As part of our scalable approximation solution, we provide a fundamental contribution by identifying general duality conditions of switching max and min when both discrete and continuous variables are involved. Via detailed experiments, we show that the solution proposed is scalable in practice.", + "primary_area": "game theory and economic paradigms", + "author": "Tien Mai; Arunesh Sinha", + "authorids": "", + "aff": "Singapore Management University; Rutgers University", + "bibtex": "@article{Mai_Sinha_2023, title={Securing Lifelines: Safe Delivery of Critical Services in Areas with Volatile Security Situation via a Stackelberg Game Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25720}, DOI={10.1609/aaai.v37i5.25720}, abstractNote={Vaccine delivery in under-resourced locations with security risks is not just challenging but also life threatening. The COVID pandemic and the need to vaccinate added even more urgency to this issue. Motivated by this problem, we propose a general framework to set-up limited temporary (vaccination) centers that balance physical security and desired (vaccine) service coverage with limited resources. We set-up the problem as a Stackelberg game between the centers operator (defender) and an adversary, where the set of centers is not fixed a priori but is part of the decision output. This results in a mixed combinatorial and continuous optimization problem. As part of our scalable approximation solution, we provide a fundamental contribution by identifying general duality conditions of switching max and min when both discrete and continuous variables are involved. Via detailed experiments, we show that the solution proposed is scalable in practice.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mai, Tien and Sinha, Arunesh}, year={2023}, month={Jun.}, pages={5805-5813} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25720/25492", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25720", + "pdf_size": 338235, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9808387284046113067&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "smu.edu.sg;rutgers.edu", + "email": "smu.edu.sg;rutgers.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Singapore Management University;Rutgers University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.smu.edu.sg;https://www.rutgers.edu", + "aff_unique_abbr": "SMU;Rutgers", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "article-26177", + "title": "Securing Secure Aggregation: Mitigating Multi-Round Privacy Leakage in Federated Learning", + "track": "main", + "status": "Technical", + "abstract": "Secure aggregation is a critical component in federated learning (FL), which enables the server to learn the aggregate model of the users without observing their local models. Conventionally, secure aggregation algorithms focus only on ensuring the privacy of individual users in a single training round. We contend that such designs can lead to significant privacy leakages over multiple training rounds, due to partial user selection/participation at each round of FL. In fact, we show that the conventional random user selection strategies in FL lead to leaking users' individual models within number of rounds that is linear in the number of users.\nTo address this challenge, we introduce a secure aggregation framework, Multi-RoundSecAgg, with multi-round privacy guarantees.\nIn particular, we introduce a new metric to quantify the privacy guarantees of FL over multiple training rounds, and develop a structured user selection strategy that guarantees the long-term privacy of each user (over any number of training rounds). \nOur framework also carefully accounts for the fairness and the average number of participating users at each round.\nOur experiments on MNIST, CIFAR-10 and CIFAR-100 datasets in the IID and the non-IID settings demonstrate the performance improvement over the baselines, both in terms of privacy protection and test accuracy.", + "primary_area": "machine learning iii", + "author": "Jinhyun So; Ramy E. Ali; Ba\u015fak G\u00fcler; Jiantao Jiao; A. Salman Avestimehr", + "authorids": "", + "aff": "University of Southern California (USC); University of Southern California (USC); University of California, Riverside; University of California, Berkeley; University of Southern California (USC)", + "bibtex": "@article{So_E. Ali_G\u00fcler_Jiao_Avestimehr_2023, title={Securing Secure Aggregation: Mitigating Multi-Round Privacy Leakage in Federated Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26177}, DOI={10.1609/aaai.v37i8.26177}, abstractNote={Secure aggregation is a critical component in federated learning (FL), which enables the server to learn the aggregate model of the users without observing their local models. Conventionally, secure aggregation algorithms focus only on ensuring the privacy of individual users in a single training round. We contend that such designs can lead to significant privacy leakages over multiple training rounds, due to partial user selection/participation at each round of FL. In fact, we show that the conventional random user selection strategies in FL lead to leaking users\u2019 individual models within number of rounds that is linear in the number of users.\nTo address this challenge, we introduce a secure aggregation framework, Multi-RoundSecAgg, with multi-round privacy guarantees.\nIn particular, we introduce a new metric to quantify the privacy guarantees of FL over multiple training rounds, and develop a structured user selection strategy that guarantees the long-term privacy of each user (over any number of training rounds). Our framework also carefully accounts for the fairness and the average number of participating users at each round.\nOur experiments on MNIST, CIFAR-10 and CIFAR-100 datasets in the IID and the non-IID settings demonstrate the performance improvement over the baselines, both in terms of privacy protection and test accuracy.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={So, Jinhyun and E. Ali, Ramy and G\u00fcler, Ba\u015fak and Jiao, Jiantao and Avestimehr, A. Salman}, year={2023}, month={Jun.}, pages={9864-9873} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26177/25949", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26177", + "pdf_size": 1357936, + "gs_citation": 101, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9343118248491669405&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "samsung.com;samsung.com;ece.ucr.edu;eecs.berkeley.edu;usc.edu", + "email": "samsung.com;samsung.com;ece.ucr.edu;eecs.berkeley.edu;usc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "University of Southern California;University of California, Riverside;University of California, Berkeley", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usc.edu;https://www.ucr.edu;https://www.berkeley.edu", + "aff_unique_abbr": "USC;UCR;UC Berkeley", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Riverside;Berkeley", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26609", + "title": "See How You Read? Multi-Reading Habits Fusion Reasoning for Multi-Modal Fake News Detection", + "track": "main", + "status": "Technical", + "abstract": "The existing approaches based on different neural networks automatically capture and fuse the multimodal semantics of news, which have achieved great success for fake news detection. However, they still suffer from the limitations of both shallow fusion of multimodal features and less attention to the inconsistency between different modalities. To overcome them, we propose multi-reading habits fusion reasoning networks (MRHFR) for multi-modal fake news detection. In MRHFR, inspired by people's different reading habits for multimodal news, we summarize three basic cognitive reading habits and put forward cognition-aware fusion layer to learn the dependencies between multimodal features of news, so as to deepen their semantic-level integration. To explore the inconsistency of different modalities of news, we develop coherence constraint reasoning layer from two perspectives, which first measures the semantic consistency between the comments and different modal features of the news, and then probes the semantic deviation caused by unimodal features to the multimodal news content through constraint strategy. Experiments on two public datasets not only demonstrate that MRHFR not only achieves the excellent performance but also provides a new paradigm for capturing inconsistencies between multi-modal news.", + "primary_area": "speech natural language processing", + "author": "Lianwei Wu; Pusheng Liu; Yanning Zhang", + "authorids": "", + "aff": "National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science, Northwestern Polytechnical University, China+Research & Development Institute of Northwestern Polytechnical University in Shenzhen, China+Chongqing Science and Technology Innovation Center of Northwestern Polytechnical University, China; National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science, Northwestern Polytechnical University, China; National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science, Northwestern Polytechnical University, China", + "bibtex": "@article{Wu_Liu_Zhang_2023, title={See How You Read? Multi-Reading Habits Fusion Reasoning for Multi-Modal Fake News Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26609}, DOI={10.1609/aaai.v37i11.26609}, abstractNote={The existing approaches based on different neural networks automatically capture and fuse the multimodal semantics of news, which have achieved great success for fake news detection. However, they still suffer from the limitations of both shallow fusion of multimodal features and less attention to the inconsistency between different modalities. To overcome them, we propose multi-reading habits fusion reasoning networks (MRHFR) for multi-modal fake news detection. In MRHFR, inspired by people\u2019s different reading habits for multimodal news, we summarize three basic cognitive reading habits and put forward cognition-aware fusion layer to learn the dependencies between multimodal features of news, so as to deepen their semantic-level integration. To explore the inconsistency of different modalities of news, we develop coherence constraint reasoning layer from two perspectives, which first measures the semantic consistency between the comments and different modal features of the news, and then probes the semantic deviation caused by unimodal features to the multimodal news content through constraint strategy. Experiments on two public datasets not only demonstrate that MRHFR not only achieves the excellent performance but also provides a new paradigm for capturing inconsistencies between multi-modal news.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Lianwei and Liu, Pusheng and Zhang, Yanning}, year={2023}, month={Jun.}, pages={13736-13744} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26609/26381", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26609", + "pdf_size": 478812, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9919317640452729959&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "email": "nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+0;0;0", + "aff_unique_norm": "Northwestern Polytechnical University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.nwpu.edu.cn", + "aff_unique_abbr": "NWPU", + "aff_campus_unique_index": "1+2", + "aff_campus_unique": ";Shenzhen;Chongqing", + "aff_country_unique_index": "0+0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25272", + "title": "See Your Emotion from Gait Using Unlabeled Skeleton Data", + "track": "main", + "status": "Technical", + "abstract": "This paper focuses on contrastive learning for gait-based emotion recognition. The existing contrastive learning approaches are rarely suitable for learning skeleton-based gait representations, which suffer from limited gait diversity and inconsistent semantics. In this paper, we propose a Cross-coordinate contrastive learning framework utilizing Ambiguity samples for self-supervised Gait-based Emotion representation (CAGE). First, we propose ambiguity transform to push positive samples into ambiguous semantic space. By learning similarities between ambiguity samples and positive samples, our model can learn higher-level semantics of the gait sequences and maintain semantic diversity. Second, to encourage learning the semantic invariance, we uniquely propose cross-coordinate contrastive learning between the Cartesian coordinate and the Spherical coordinate, which brings rich supervisory signals to learn the intrinsic semantic consistency information. Exhaustive experiments show that CAGE improves existing self-supervised methods by 5%\u201310% accuracy, and it achieves comparable or even superior performance to supervised methods.", + "primary_area": "computer vision ii", + "author": "Haifeng Lu; Xiping Hu; Bin Hu", + "authorids": "", + "aff": "School of Information Science and Engineering, Lanzhou University + School of Medical Technology, Beijing Institute of Technology; School of Information Science and Engineering, Lanzhou University + School of Medical Technology, Beijing Institute of Technology; School of Information Science and Engineering, Lanzhou University + School of Medical Technology, Beijing Institute of Technology", + "bibtex": "@article{Lu_Hu_Hu_2023, title={See Your Emotion from Gait Using Unlabeled Skeleton Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25272}, DOI={10.1609/aaai.v37i2.25272}, abstractNote={This paper focuses on contrastive learning for gait-based emotion recognition. The existing contrastive learning approaches are rarely suitable for learning skeleton-based gait representations, which suffer from limited gait diversity and inconsistent semantics. In this paper, we propose a Cross-coordinate contrastive learning framework utilizing Ambiguity samples for self-supervised Gait-based Emotion representation (CAGE). First, we propose ambiguity transform to push positive samples into ambiguous semantic space. By learning similarities between ambiguity samples and positive samples, our model can learn higher-level semantics of the gait sequences and maintain semantic diversity. Second, to encourage learning the semantic invariance, we uniquely propose cross-coordinate contrastive learning between the Cartesian coordinate and the Spherical coordinate, which brings rich supervisory signals to learn the intrinsic semantic consistency information. Exhaustive experiments show that CAGE improves existing self-supervised methods by 5%\u201310% accuracy, and it achieves comparable or even superior performance to supervised methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Haifeng and Hu, Xiping and Hu, Bin}, year={2023}, month={Jun.}, pages={1826-1834} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25272/25044", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25272", + "pdf_size": 1514203, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4910624234182403004&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "lzu.edu.cn;bit.edu.cn;bit.edu.cn", + "email": "lzu.edu.cn;bit.edu.cn;bit.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Lanzhou University;Beijing Institute of Technology", + "aff_unique_dep": "School of Information Science and Engineering;School of Medical Technology", + "aff_unique_url": "http://www.lzu.edu.cn;http://www.bit.edu.cn/", + "aff_unique_abbr": ";BIT", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26477", + "title": "SegFormer: A Topic Segmentation Model with Controllable Range of Attention", + "track": "main", + "status": "Technical", + "abstract": "Topic segmentation aims to reveal the latent structure of a document and divide it into multiple parts. However, current neural solutions are limited in the context modeling of sentences and feature representation of candidate boundaries. This causes the model to suffer from inefficient sentence context encoding and noise information interference. In this paper, we design a new text segmentation model SegFormer with unidirectional attention blocks to better model sentence representations. To alleviate the problem of noise information interference, SegFormer uses a novel additional context aggregator and a topic classification loss to guide the model to aggregate the information within the appropriate range. In addition, SegFormer applies an iterative prediction algorithm to search for optimal boundaries progressively. We evaluate SegFormer's generalization ability, multilingual ability, and application ability on multiple challenging real-world datasets. Experiments show that our model significantly improves the performance by 7.5% on the benchmark WIKI-SECTION compared to several strong baselines. The application of SegFormer to a real-world dataset to separate normal and advertisement segments in product marketing essays also achieves superior performance in the evaluation with other cutting-edge models.", + "primary_area": "speech natural language processing", + "author": "Haitao Bai; Pinghui Wang; Ruofei Zhang; Zhou Su", + "authorids": "", + "aff": "MOE Key Laboratory for Intelligent Networks and Network Security, Xi\u2019an Jiaotong University; MOE Key Laboratory for Intelligent Networks and Network Security, Xi\u2019an Jiaotong University; MOE Key Laboratory for Intelligent Networks and Network Security, Xi\u2019an Jiaotong University; MOE Key Laboratory for Intelligent Networks and Network Security, Xi\u2019an Jiaotong University", + "bibtex": "@article{Bai_Wang_Zhang_Su_2023, title={SegFormer: A Topic Segmentation Model with Controllable Range of Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26477}, DOI={10.1609/aaai.v37i11.26477}, abstractNote={Topic segmentation aims to reveal the latent structure of a document and divide it into multiple parts. However, current neural solutions are limited in the context modeling of sentences and feature representation of candidate boundaries. This causes the model to suffer from inefficient sentence context encoding and noise information interference. In this paper, we design a new text segmentation model SegFormer with unidirectional attention blocks to better model sentence representations. To alleviate the problem of noise information interference, SegFormer uses a novel additional context aggregator and a topic classification loss to guide the model to aggregate the information within the appropriate range. In addition, SegFormer applies an iterative prediction algorithm to search for optimal boundaries progressively. We evaluate SegFormer\u2019s generalization ability, multilingual ability, and application ability on multiple challenging real-world datasets. Experiments show that our model significantly improves the performance by 7.5% on the benchmark WIKI-SECTION compared to several strong baselines. The application of SegFormer to a real-world dataset to separate normal and advertisement segments in product marketing essays also achieves superior performance in the evaluation with other cutting-edge models.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bai, Haitao and Wang, Pinghui and Zhang, Ruofei and Su, Zhou}, year={2023}, month={Jun.}, pages={12545-12552} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26477/26249", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26477", + "pdf_size": 592055, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=861877859831460578&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;gmail.com;ieee.org", + "email": "stu.xjtu.edu.cn;mail.xjtu.edu.cn;gmail.com;ieee.org", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Xi'an Jiaotong University", + "aff_unique_dep": "MOE Key Laboratory for Intelligent Networks and Network Security", + "aff_unique_url": "http://www.xjtu.edu.cn", + "aff_unique_abbr": "XJTU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25247", + "title": "SelectAugment: Hierarchical Deterministic Sample Selection for Data Augmentation", + "track": "main", + "status": "Technical", + "abstract": "Data augmentation (DA) has been extensively studied to facilitate model optimization in many tasks. Prior DA works focus on designing augmentation operations themselves, while leaving selecting suitable samples for augmentation out of consideration. This might incur visual ambiguities and further induce training biases. In this paper, we propose an effective approach, dubbed SelectAugment, to select samples for augmentation in a deterministic and online manner based on the sample contents and the network training status. To facilitate the policy learning, in each batch, we exploit the hierarchy of this task by first determining the augmentation ratio and then deciding whether to augment each training sample under this ratio. We model this process as two-step decision-making and adopt Hierarchical Reinforcement Learning (HRL) to learn the selection policy. In this way, the negative effects of the randomness in selecting samples to augment can be effectively alleviated and the effectiveness of DA is improved. Extensive experiments demonstrate that our proposed SelectAugment significantly improves various off-the-shelf DA methods on image classification and fine-grained image recognition.", + "primary_area": "computer vision ii", + "author": "Shiqi Lin; Zhizheng Zhang; Xin Li; Zhibo Chen", + "authorids": "", + "aff": "University of Science and Technology of China; Microsoft Research Asia; University of Science and Technology of China; University of Science and Technology of China", + "bibtex": "@article{Lin_Zhang_Li_Chen_2023, title={SelectAugment: Hierarchical Deterministic Sample Selection for Data Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25247}, DOI={10.1609/aaai.v37i2.25247}, abstractNote={Data augmentation (DA) has been extensively studied to facilitate model optimization in many tasks. Prior DA works focus on designing augmentation operations themselves, while leaving selecting suitable samples for augmentation out of consideration. This might incur visual ambiguities and further induce training biases. In this paper, we propose an effective approach, dubbed SelectAugment, to select samples for augmentation in a deterministic and online manner based on the sample contents and the network training status. To facilitate the policy learning, in each batch, we exploit the hierarchy of this task by first determining the augmentation ratio and then deciding whether to augment each training sample under this ratio. We model this process as two-step decision-making and adopt Hierarchical Reinforcement Learning (HRL) to learn the selection policy. In this way, the negative effects of the randomness in selecting samples to augment can be effectively alleviated and the effectiveness of DA is improved. Extensive experiments demonstrate that our proposed SelectAugment significantly improves various off-the-shelf DA methods on image classification and fine-grained image recognition.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Shiqi and Zhang, Zhizheng and Li, Xin and Chen, Zhibo}, year={2023}, month={Jun.}, pages={1604-1612} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25247/25019", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25247", + "pdf_size": 803925, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=361403127805764631&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.ustc.edu.cn;microsoft.com;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;microsoft.com;mail.ustc.edu.cn;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "USTC;MSR Asia", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26555", + "title": "Selective Knowledge Distillation for Non-Autoregressive Neural Machine Translation", + "track": "main", + "status": "Technical", + "abstract": "Benefiting from the sequence-level knowledge distillation, the Non-Autoregressive Transformer (NAT) achieves great success in neural machine translation tasks. \nHowever, existing knowledge distillation has side effects, such as propagating errors from the teacher to NAT students, which may limit further improvements of NAT models and are rarely discussed in existing research.\nIn this paper, we introduce selective knowledge distillation by introducing an NAT evaluator to select NAT-friendly targets that are of high quality and easy to learn.\nIn addition, we introduce a simple yet effective progressive distillation method to boost NAT performance. \nExperiment results on multiple WMT language directions and several representative NAT models show that our approach can realize a flexible trade-off between the quality and complexity of training data for NAT models, achieving strong performances.\nFurther analysis shows that distilling only 5% of the raw translations can help an NAT outperform its counterpart trained on raw data by about 2.4 BLEU.", + "primary_area": "speech natural language processing", + "author": "Min Liu; Yu Bao; Chengqi Zhao; Shujian Huang", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University+ByteDance AI Lab; ByteDance AI Lab; ByteDance AI Lab; National Key Laboratory for Novel Software Technology, Nanjing University+Collaborative Innovation Center of Novel Software Technology and Industrialization", + "bibtex": "@article{Liu_Bao_Zhao_Huang_2023, title={Selective Knowledge Distillation for Non-Autoregressive Neural Machine Translation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26555}, DOI={10.1609/aaai.v37i11.26555}, abstractNote={Benefiting from the sequence-level knowledge distillation, the Non-Autoregressive Transformer (NAT) achieves great success in neural machine translation tasks. However, existing knowledge distillation has side effects, such as propagating errors from the teacher to NAT students, which may limit further improvements of NAT models and are rarely discussed in existing research.\nIn this paper, we introduce selective knowledge distillation by introducing an NAT evaluator to select NAT-friendly targets that are of high quality and easy to learn.\nIn addition, we introduce a simple yet effective progressive distillation method to boost NAT performance. Experiment results on multiple WMT language directions and several representative NAT models show that our approach can realize a flexible trade-off between the quality and complexity of training data for NAT models, achieving strong performances.\nFurther analysis shows that distilling only 5% of the raw translations can help an NAT outperform its counterpart trained on raw data by about 2.4 BLEU.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Min and Bao, Yu and Zhao, Chengqi and Huang, Shujian}, year={2023}, month={Jun.}, pages={13246-13254} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26555/26327", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26555", + "pdf_size": 699433, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12368507691609042760&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "smail.nju.edu.cn;bytedance.com;bytedance.com;nju.edu.cn", + "email": "smail.nju.edu.cn;bytedance.com;bytedance.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0+2", + "aff_unique_norm": "Nanjing University;ByteDance;Collaborative Innovation Center of Novel Software Technology and Industrialization", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;AI Lab;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.bytedance.com;", + "aff_unique_abbr": "Nanjing University;ByteDance;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-26622", + "title": "Selector-Enhancer: Learning Dynamic Selection of Local and Non-local Attention Operation for Speech Enhancement", + "track": "main", + "status": "Technical", + "abstract": "Attention mechanisms, such as local and non-local attention, play a fundamental role in recent deep learning based speech enhancement (SE) systems. However, a natural speech contains many fast-changing and relatively briefly acoustic events, therefore, capturing the most informative speech features by indiscriminately using local and non-local attention is challenged. We observe that the noise type and speech feature vary within a sequence of speech and the local and non-local can respectively process different types of corrupted speech regions. To leverage this, we propose Selector-Enhancer, a dual-attention based convolution neural network (CNN) with a feature-filter that can dynamically select regions from low-resolution speech features and feed them to local or non-local attention operations. In particular, the proposed feature-filter is trained by using reinforcement learning (RL) with a developed difficulty-regulated reward that related to network performance, model complexity and \u201cthe difficulty of the SE task\u201d. The results show that our method achieves comparable or superior performance to existing approaches. In particular, Selector-Enhancer is effective for real-world denoising, where the number and types of noise are varies on a single noisy mixture.", + "primary_area": "speech natural language processing", + "author": "Xinmeng Xu; Weiping Tu; Yuhong Yang", + "authorids": "", + "aff": "National Engineering Research Center for Multimedia Software, School of Computer Science, Wuhan University, China+Hubei Luojia Laboratory, China; National Engineering Research Center for Multimedia Software, School of Computer Science, Wuhan University, China+Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China; National Engineering Research Center for Multimedia Software, School of Computer Science, Wuhan University, China+Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China", + "bibtex": "@article{Xu_Tu_Yang_2023, title={Selector-Enhancer: Learning Dynamic Selection of Local and Non-local Attention Operation for Speech Enhancement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26622}, DOI={10.1609/aaai.v37i11.26622}, abstractNote={Attention mechanisms, such as local and non-local attention, play a fundamental role in recent deep learning based speech enhancement (SE) systems. However, a natural speech contains many fast-changing and relatively briefly acoustic events, therefore, capturing the most informative speech features by indiscriminately using local and non-local attention is challenged. We observe that the noise type and speech feature vary within a sequence of speech and the local and non-local can respectively process different types of corrupted speech regions. To leverage this, we propose Selector-Enhancer, a dual-attention based convolution neural network (CNN) with a feature-filter that can dynamically select regions from low-resolution speech features and feed them to local or non-local attention operations. In particular, the proposed feature-filter is trained by using reinforcement learning (RL) with a developed difficulty-regulated reward that related to network performance, model complexity and \u201cthe difficulty of the SE task\u201d. The results show that our method achieves comparable or superior performance to existing approaches. In particular, Selector-Enhancer is effective for real-world denoising, where the number and types of noise are varies on a single noisy mixture.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Xinmeng and Tu, Weiping and Yang, Yuhong}, year={2023}, month={Jun.}, pages={13853-13860} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26622/26394", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26622", + "pdf_size": 14848687, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3379515186173502467&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "whu.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+0;0+0", + "aff_unique_norm": "Wuhan University;Hubei Luojia Laboratory", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "http://www.whu.edu.cn;", + "aff_unique_abbr": "WHU;", + "aff_campus_unique_index": "0;0+0;0+0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25408", + "title": "Self Correspondence Distillation for End-to-End Weakly-Supervised Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Efficiently training accurate deep models for weakly supervised semantic segmentation (WSSS) with image-level labels is challenging and important. Recently, end-to-end WSSS methods have become the focus of research due to their high training efficiency. However, current methods suffer from insufficient extraction of comprehensive semantic information, resulting in low-quality pseudo-labels and sub-optimal solutions for end-to-end WSSS. To this end, we propose a simple and novel Self Correspondence Distillation (SCD) method to refine pseudo-labels without introducing external supervision. Our SCD enables the network to utilize feature correspondence derived from itself as a distillation target, which can enhance the network's feature learning process by complementing semantic information. In addition, to further improve the segmentation accuracy, we design a Variation-aware Refine Module to enhance the local consistency of pseudo-labels by computing pixel-level variation. Finally, we present an efficient end-to-end Transformer-based framework (TSCD) via SCD and Variation-aware Refine Module for the accurate WSSS task. Extensive experiments on the PASCAL VOC 2012 and MS COCO 2014 datasets demonstrate that our method significantly outperforms other state-of-the-art methods. Our code is available at https://github.com/Rongtao-Xu/RepresentationLearning/tree/main/SCD-AAAI2023.", + "primary_area": "computer vision iii", + "author": "Rongtao Xu; Changwei Wang; Jiaxi Sun; Shibiao Xu; Weiliang Meng; Xiaopeng Zhang", + "authorids": "", + "aff": "NLPR, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences; School of Artificial Intelligence, Beijing University of Posts and Telecommunications; NLPR, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences; NLPR, Institute of Automation, Chinese Academy of Sciences+School of Artificial Intelligence, University of Chinese Academy of Sciences", + "bibtex": "@article{Xu_Wang_Sun_Xu_Meng_Zhang_2023, title={Self Correspondence Distillation for End-to-End Weakly-Supervised Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25408}, DOI={10.1609/aaai.v37i3.25408}, abstractNote={Efficiently training accurate deep models for weakly supervised semantic segmentation (WSSS) with image-level labels is challenging and important. Recently, end-to-end WSSS methods have become the focus of research due to their high training efficiency. However, current methods suffer from insufficient extraction of comprehensive semantic information, resulting in low-quality pseudo-labels and sub-optimal solutions for end-to-end WSSS. To this end, we propose a simple and novel Self Correspondence Distillation (SCD) method to refine pseudo-labels without introducing external supervision. Our SCD enables the network to utilize feature correspondence derived from itself as a distillation target, which can enhance the network\u2019s feature learning process by complementing semantic information. In addition, to further improve the segmentation accuracy, we design a Variation-aware Refine Module to enhance the local consistency of pseudo-labels by computing pixel-level variation. Finally, we present an efficient end-to-end Transformer-based framework (TSCD) via SCD and Variation-aware Refine Module for the accurate WSSS task. Extensive experiments on the PASCAL VOC 2012 and MS COCO 2014 datasets demonstrate that our method significantly outperforms other state-of-the-art methods. Our code is available at https://github.com/Rongtao-Xu/RepresentationLearning/tree/main/SCD-AAAI2023.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Rongtao and Wang, Changwei and Sun, Jiaxi and Xu, Shibiao and Meng, Weiliang and Zhang, Xiaopeng}, year={2023}, month={Jun.}, pages={3045-3053} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25408/25180", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25408", + "pdf_size": 1771348, + "gs_citation": 59, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11500107972877290969&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;bupt.edu.cn;ia.ac.cn;ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;bupt.edu.cn;ia.ac.cn;ia.ac.cn", + "github": "https://github.com/Rongtao-Xu/RepresentationLearning/tree/main/SCD-AAAI2023", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;2;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Beijing University of Posts and Telecommunications", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn;http://www.bupt.edu.cn/", + "aff_unique_abbr": "CAS;UCAS;BUPT", + "aff_campus_unique_index": ";;;1;;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25420", + "title": "Self-Asymmetric Invertible Network for Compression-Aware Image Rescaling", + "track": "main", + "status": "Technical", + "abstract": "High-resolution (HR) images are usually downscaled to low-resolution (LR) ones for better display and afterward upscaled back to the original size to recover details. Recent work in image rescaling formulates downscaling and upscaling as a unified task and learns a bijective mapping between HR and LR via invertible networks. However, in real-world applications (e.g., social media), most images are compressed for transmission. Lossy compression will lead to irreversible information loss on LR images, hence damaging the inverse upscaling procedure and degrading the reconstruction accuracy. In this paper, we propose the Self-Asymmetric Invertible Network (SAIN) for compression-aware image rescaling. To tackle the distribution shift, we first develop an end-to-end asymmetric framework with two separate bijective mappings for high-quality and compressed LR images, respectively. Then, based on empirical analysis of this framework, we model the distribution of the lost information (including downscaling and compression) using isotropic Gaussian mixtures and propose the Enhanced Invertible Block to derive high-quality/compressed LR images in one forward pass. Besides, we design a set of losses to regularize the learned LR images and enhance the invertibility. Extensive experiments demonstrate the consistent improvements of SAIN across various image rescaling datasets in terms of both quantitative and qualitative evaluation under standard image compression formats (i.e., JPEG and WebP). Code is available at https://github.com/yang-jin-hai/SAIN.", + "primary_area": "computer vision iii", + "author": "Jinhai Yang; Mengxi Guo; Shijie Zhao; Junlin Li; Li Zhang", + "authorids": "", + "aff": "Bytedance Inc., Shenzhen, China; Bytedance Inc., Shenzhen, China; Bytedance Inc., Shenzhen, China; Bytedance Inc., San Diego, CA, 92122 USA; Bytedance Inc., San Diego, CA, 92122 USA", + "bibtex": "@article{Yang_Guo_Zhao_Li_Zhang_2023, title={Self-Asymmetric Invertible Network for Compression-Aware Image Rescaling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25420}, DOI={10.1609/aaai.v37i3.25420}, abstractNote={High-resolution (HR) images are usually downscaled to low-resolution (LR) ones for better display and afterward upscaled back to the original size to recover details. Recent work in image rescaling formulates downscaling and upscaling as a unified task and learns a bijective mapping between HR and LR via invertible networks. However, in real-world applications (e.g., social media), most images are compressed for transmission. Lossy compression will lead to irreversible information loss on LR images, hence damaging the inverse upscaling procedure and degrading the reconstruction accuracy. In this paper, we propose the Self-Asymmetric Invertible Network (SAIN) for compression-aware image rescaling. To tackle the distribution shift, we first develop an end-to-end asymmetric framework with two separate bijective mappings for high-quality and compressed LR images, respectively. Then, based on empirical analysis of this framework, we model the distribution of the lost information (including downscaling and compression) using isotropic Gaussian mixtures and propose the Enhanced Invertible Block to derive high-quality/compressed LR images in one forward pass. Besides, we design a set of losses to regularize the learned LR images and enhance the invertibility. Extensive experiments demonstrate the consistent improvements of SAIN across various image rescaling datasets in terms of both quantitative and qualitative evaluation under standard image compression formats (i.e., JPEG and WebP). Code is available at https://github.com/yang-jin-hai/SAIN.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Jinhai and Guo, Mengxi and Zhao, Shijie and Li, Junlin and Zhang, Li}, year={2023}, month={Jun.}, pages={3155-3163} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25420/25192", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25420", + "pdf_size": 10285216, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=600477293911327770&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "email": "bytedance.com;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "github": "https://github.com/yang-jin-hai/SAIN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Bytedance Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.bytedance.com", + "aff_unique_abbr": "Bytedance", + "aff_campus_unique_index": "0;0;0;1;1", + "aff_campus_unique": "Shenzhen;San Diego", + "aff_country_unique_index": "0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25091", + "title": "Self-Contrastive Learning: Single-Viewed Supervised Contrastive Framework Using Sub-network", + "track": "main", + "status": "Technical", + "abstract": "Contrastive loss has significantly improved performance in supervised classification tasks by using a multi-viewed framework that leverages augmentation and label information. The augmentation enables contrast with another view of a single image but enlarges training time and memory usage. To exploit the strength of multi-views while avoiding the high computation cost, we introduce a multi-exit architecture that outputs multiple features of a single image in a single-viewed framework. To this end, we propose Self-Contrastive (SelfCon) learning, which self-contrasts within multiple outputs from the different levels of a single network. The multi-exit architecture efficiently replaces multi-augmented images and leverages various information from different layers of a network. We demonstrate that SelfCon learning improves the classification performance of the encoder network, and empirically analyze its advantages in terms of the single-view and the sub-network. Furthermore, we provide theoretical evidence of the performance increase based on the mutual information bound. For ImageNet classification on ResNet-50, SelfCon improves accuracy by +0.6% with 59% memory and 48% time of Supervised Contrastive learning, and a simple ensemble of multi-exit outputs boosts performance up to +1.5%. Our code is available at https://github.com/raymin0223/self-contrastive-learning.", + "primary_area": "computer vision i", + "author": "Sangmin Bae; Sungnyun Kim; Jongwoo Ko; Gihun Lee; Seungjong Noh; Se-Young Yun", + "authorids": "", + "aff": "Graduate School of Artificial Intelligence, KAIST; Graduate School of Artificial Intelligence, KAIST; Graduate School of Artificial Intelligence, KAIST; Graduate School of Artificial Intelligence, KAIST; SK Hynix; Graduate School of Artificial Intelligence, KAIST", + "bibtex": "@article{Bae_Kim_Ko_Lee_Noh_Yun_2023, title={Self-Contrastive Learning: Single-Viewed Supervised Contrastive Framework Using Sub-network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25091}, DOI={10.1609/aaai.v37i1.25091}, abstractNote={Contrastive loss has significantly improved performance in supervised classification tasks by using a multi-viewed framework that leverages augmentation and label information. The augmentation enables contrast with another view of a single image but enlarges training time and memory usage. To exploit the strength of multi-views while avoiding the high computation cost, we introduce a multi-exit architecture that outputs multiple features of a single image in a single-viewed framework. To this end, we propose Self-Contrastive (SelfCon) learning, which self-contrasts within multiple outputs from the different levels of a single network. The multi-exit architecture efficiently replaces multi-augmented images and leverages various information from different layers of a network. We demonstrate that SelfCon learning improves the classification performance of the encoder network, and empirically analyze its advantages in terms of the single-view and the sub-network. Furthermore, we provide theoretical evidence of the performance increase based on the mutual information bound. For ImageNet classification on ResNet-50, SelfCon improves accuracy by +0.6% with 59% memory and 48% time of Supervised Contrastive learning, and a simple ensemble of multi-exit outputs boosts performance up to +1.5%. Our code is available at https://github.com/raymin0223/self-contrastive-learning.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bae, Sangmin and Kim, Sungnyun and Ko, Jongwoo and Lee, Gihun and Noh, Seungjong and Yun, Se-Young}, year={2023}, month={Jun.}, pages={197-205} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25091/24863", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25091", + "pdf_size": 1273130, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17807807236980783676&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 5, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;sk.com;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;sk.com;kaist.ac.kr", + "github": "https://github.com/raymin0223/self-contrastive-learning", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "KAIST;SK Hynix Inc.", + "aff_unique_dep": "Graduate School of Artificial Intelligence;", + "aff_unique_url": "https://www.kaist.edu;https://www.skhynix.com", + "aff_unique_abbr": "KAIST;SK Hynix", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25266", + "title": "Self-Decoupling and Ensemble Distillation for Efficient Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Knowledge distillation (KD) is a promising teacher-student learning paradigm that transfers information from a cumbersome teacher to a student network. To avoid the training cost of a large teacher network, the recent studies propose to distill knowledge from the student itself, called Self-KD. However, due to the limitations of the performance and capacity of the student, the soft-labels or features distilled by the student barely provide reliable guidance. Moreover, most of the Self-KD algorithms are specific to classification tasks based on soft-labels, and not suitable for semantic segmentation. To alleviate these contradictions, we revisit the label and feature distillation problem in segmentation, and propose Self-Decoupling and Ensemble Distillation for Efficient Segmentation (SDES). Specifically, we design a decoupled prediction ensemble distillation (DPED) algorithm that generates reliable soft-labels with multiple expert decoders, and a decoupled feature ensemble distillation (DFED) mechanism to utilize more important channel-wise feature maps for encoder learning. The extensive experiments on three public segmentation datasets demonstrate the superiority of our approach and the efficacy of each component in the framework through the ablation study.", + "primary_area": "computer vision ii", + "author": "Yuang Liu; Wei Zhang; Jun Wang", + "authorids": "", + "aff": "School of Computer Science and Technology, East China Normal University; School of Computer Science and Technology, East China Normal University; School of Computer Science and Technology, East China Normal University", + "bibtex": "@article{Liu_Zhang_Wang_2023, title={Self-Decoupling and Ensemble Distillation for Efficient Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25266}, DOI={10.1609/aaai.v37i2.25266}, abstractNote={Knowledge distillation (KD) is a promising teacher-student learning paradigm that transfers information from a cumbersome teacher to a student network. To avoid the training cost of a large teacher network, the recent studies propose to distill knowledge from the student itself, called Self-KD. However, due to the limitations of the performance and capacity of the student, the soft-labels or features distilled by the student barely provide reliable guidance. Moreover, most of the Self-KD algorithms are specific to classification tasks based on soft-labels, and not suitable for semantic segmentation. To alleviate these contradictions, we revisit the label and feature distillation problem in segmentation, and propose Self-Decoupling and Ensemble Distillation for Efficient Segmentation (SDES). Specifically, we design a decoupled prediction ensemble distillation (DPED) algorithm that generates reliable soft-labels with multiple expert decoders, and a decoupled feature ensemble distillation (DFED) mechanism to utilize more important channel-wise feature maps for encoder learning. The extensive experiments on three public segmentation datasets demonstrate the superiority of our approach and the efficacy of each component in the framework through the ablation study.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yuang and Zhang, Wei and Wang, Jun}, year={2023}, month={Jun.}, pages={1772-1780} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25266/25038", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25266", + "pdf_size": 2002624, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=371170181752951135&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25164", + "title": "Self-Emphasizing Network for Continuous Sign Language Recognition", + "track": "main", + "status": "Technical", + "abstract": "Hand and face play an important role in expressing sign language. Their features are usually especially leveraged to improve system performance. However, to effectively extract visual representations and capture trajectories for hands and face, previous methods always come at high computations with increased training complexity. They usually employ extra heavy pose-estimation networks to locate human body keypoints or rely on additional pre-extracted heatmaps for supervision. To relieve this problem, we propose a self-emphasizing network (SEN) to emphasize informative spatial regions in a self-motivated way, with few extra computations and without additional expensive supervision. Specifically, SEN first employs a lightweight subnetwork to incorporate local spatial-temporal features to identify informative regions, and then dynamically augment original features via attention maps. It's also observed that not all frames contribute equally to recognition. We present a temporal self-emphasizing module to adaptively emphasize those discriminative frames and suppress redundant ones. A comprehensive comparison with previous methods equipped with hand and face features demonstrates the superiority of our method, even though they always require huge computations and rely on expensive extra supervision. Remarkably, with few extra computations, SEN achieves new state-of-the-art accuracy on four large-scale datasets, PHOENIX14, PHOENIX14-T, CSL-Daily, and CSL. Visualizations verify the effects of SEN on emphasizing informative spatial and temporal features. Code is available at https://github.com/hulianyuyy/SEN_CSLR", + "primary_area": "computer vision i", + "author": "Lianyu Hu; Liqing Gao; Zekang Liu; Wei Feng", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin 300350, China; College of Intelligence and Computing, Tianjin University, Tianjin 300350, China; College of Intelligence and Computing, Tianjin University, Tianjin 300350, China; College of Intelligence and Computing, Tianjin University, Tianjin 300350, China", + "bibtex": "@article{Hu_Gao_Liu_Feng_2023, title={Self-Emphasizing Network for Continuous Sign Language Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25164}, DOI={10.1609/aaai.v37i1.25164}, abstractNote={Hand and face play an important role in expressing sign language. Their features are usually especially leveraged to improve system performance. However, to effectively extract visual representations and capture trajectories for hands and face, previous methods always come at high computations with increased training complexity. They usually employ extra heavy pose-estimation networks to locate human body keypoints or rely on additional pre-extracted heatmaps for supervision. To relieve this problem, we propose a self-emphasizing network (SEN) to emphasize informative spatial regions in a self-motivated way, with few extra computations and without additional expensive supervision. Specifically, SEN first employs a lightweight subnetwork to incorporate local spatial-temporal features to identify informative regions, and then dynamically augment original features via attention maps. It\u2019s also observed that not all frames contribute equally to recognition. We present a temporal self-emphasizing module to adaptively emphasize those discriminative frames and suppress redundant ones. A comprehensive comparison with previous methods equipped with hand and face features demonstrates the superiority of our method, even though they always require huge computations and rely on expensive extra supervision. Remarkably, with few extra computations, SEN achieves new state-of-the-art accuracy on four large-scale datasets, PHOENIX14, PHOENIX14-T, CSL-Daily, and CSL. Visualizations verify the effects of SEN on emphasizing informative spatial and temporal features. Code is available at https://github.com/hulianyuyy/SEN_CSLR}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Lianyu and Gao, Liqing and Liu, Zekang and Feng, Wei}, year={2023}, month={Jun.}, pages={854-862} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25164/24936", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25164", + "pdf_size": 1087691, + "gs_citation": 76, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8371475249043403030&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;ieee.org", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;ieee.org", + "github": "https://github.com/hulianyuyy/SEN CSLR", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "College of Intelligence and Computing", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "Tianjin University", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Tianjin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25587", + "title": "Self-Organization Preserved Graph Structure Learning with Principle of Relevant Information", + "track": "main", + "status": "Technical", + "abstract": "Most Graph Neural Networks follow the message-passing paradigm, assuming the observed structure depicts the ground-truth node relationships. However, this fundamental assumption cannot always be satisfied, as real-world graphs are always incomplete, noisy, or redundant. How to reveal the inherent graph structure in a unified way remains under-explored. \nWe proposed PRI-GSL, a Graph Structure Learning framework guided by the Principle of Relevant Information, providing a simple and unified framework for identifying the self-organization and revealing the hidden structure. PRI-GSL learns a structure that contains the most relevant yet least redundant information quantified by von Neumann entropy and Quantum Jensen Shannon divergence. PRI-GSL incorporates the evolution of quantum continuous walk with graph wavelets to encode node structural roles, showing in which way the nodes interplay and self-organize with the graph structure. Extensive experiments demonstrate the superior effectiveness and robustness of PRI-GSL.", + "primary_area": "data mining and knowledge management", + "author": "Qingyun Sun; Jianxin Li; Beining Yang; Xingcheng Fu; Hao Peng; Philip S. Yu", + "authorids": "", + "aff": "Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China+School of Computer Science and Engineering, Beihang University, Beijing 100191, China; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China+School of Computer Science and Engineering, Beihang University, Beijing 100191, China; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China+School of Computer Science and Engineering, Beihang University, Beijing 100191, China; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China+School of Computer Science and Engineering, Beihang University, Beijing 100191, China; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China; Department of Computer Science, University of Illinois at Chicago, Chicago, USA", + "bibtex": "@article{Sun_Li_Yang_Fu_Peng_Yu_2023, title={Self-Organization Preserved Graph Structure Learning with Principle of Relevant Information}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25587}, DOI={10.1609/aaai.v37i4.25587}, abstractNote={Most Graph Neural Networks follow the message-passing paradigm, assuming the observed structure depicts the ground-truth node relationships. However, this fundamental assumption cannot always be satisfied, as real-world graphs are always incomplete, noisy, or redundant. How to reveal the inherent graph structure in a unified way remains under-explored. We proposed PRI-GSL, a Graph Structure Learning framework guided by the Principle of Relevant Information, providing a simple and unified framework for identifying the self-organization and revealing the hidden structure. PRI-GSL learns a structure that contains the most relevant yet least redundant information quantified by von Neumann entropy and Quantum Jensen Shannon divergence. PRI-GSL incorporates the evolution of quantum continuous walk with graph wavelets to encode node structural roles, showing in which way the nodes interplay and self-organize with the graph structure. Extensive experiments demonstrate the superior effectiveness and robustness of PRI-GSL.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Qingyun and Li, Jianxin and Yang, Beining and Fu, Xingcheng and Peng, Hao and Yu, Philip S.}, year={2023}, month={Jun.}, pages={4643-4651} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25587/25359", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25587", + "pdf_size": 6261015, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9000070346740452485&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uic.edu", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;uic.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;0;1", + "aff_unique_norm": "Beihang University;University of Illinois at Chicago", + "aff_unique_dep": "Beijing Advanced Innovation Center for Big Data and Brain Computing;Department of Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.uic.edu", + "aff_unique_abbr": "Beihang;UIC", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0;1", + "aff_campus_unique": "Beijing;Chicago", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26954", + "title": "Self-Paced Learning Based Graph Convolutional Neural Network for Mixed Integer Programming (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Graph convolutional neural network (GCN) based methods have achieved noticeable performance in solving mixed integer programming problems (MIPs). However, the generalization of existing work is limited due to the problem structure. This paper proposes a self-paced learning (SPL) based GCN network (SPGCN) with curriculum learning (CL) to make the utmost of samples. SPGCN employs a GCN model to imitate the branching variable selection during the branch and bound process, while the training process is conducted in a self-paced fashion. Specifically, SPGCN contains a loss-based automatic difficulty measurer, where the training loss of the sample represents the difficulty level. In each iteration, a dynamic training dataset is constructed according to the difficulty level for GCN model training. Experiments on four NP-hard datasets verify that CL can lead to generalization improvement and convergence speedup in solving MIPs, where SPL performs better than predefined CL methods.", + "primary_area": "", + "author": "Li Chen; Hua Xu; Ziteng Wang; Chengming Wang; Yu Jiang", + "authorids": "", + "aff": "State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China; State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China; Meituan Inc., Block F&G, Wangjing International R&D Park, No.6 Wang Jing East Rd, Chaoyang District, Beijing, 100102, China; Meituan Inc., Block F&G, Wangjing International R&D Park, No.6 Wang Jing East Rd, Chaoyang District, Beijing, 100102, China", + "bibtex": "@article{Chen_Xu_Wang_Wang_Jiang_2024, title={Self-Paced Learning Based Graph Convolutional Neural Network for Mixed Integer Programming (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26954}, DOI={10.1609/aaai.v37i13.26954}, abstractNote={Graph convolutional neural network (GCN) based methods have achieved noticeable performance in solving mixed integer programming problems (MIPs). However, the generalization of existing work is limited due to the problem structure. This paper proposes a self-paced learning (SPL) based GCN network (SPGCN) with curriculum learning (CL) to make the utmost of samples. SPGCN employs a GCN model to imitate the branching variable selection during the branch and bound process, while the training process is conducted in a self-paced fashion. Specifically, SPGCN contains a loss-based automatic difficulty measurer, where the training loss of the sample represents the difficulty level. In each iteration, a dynamic training dataset is constructed according to the difficulty level for GCN model training. Experiments on four NP-hard datasets verify that CL can lead to generalization improvement and convergence speedup in solving MIPs, where SPL performs better than predefined CL methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Li and Xu, Hua and Wang, Ziteng and Wang, Chengming and Jiang, Yu}, year={2024}, month={Jul.}, pages={16188-16189} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26954/26726", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26954", + "pdf_size": 460574, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10899015540196248539&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;meituan.com;meituan.com", + "email": "mails.tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;meituan.com;meituan.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;1", + "aff_unique_norm": "Tsinghua University;Meituan Inc.", + "aff_unique_dep": "Department of Computer Science and Technology;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.meituan.com", + "aff_unique_abbr": "Tsinghua;Meituan", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25495", + "title": "Self-Supervised Action Representation Learning from Partial Spatio-Temporal Skeleton Sequences", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised learning has demonstrated remarkable capability in representation learning for skeleton-based action recognition. Existing methods mainly focus on applying global data augmentation to generate different views of the skeleton sequence for contrastive learning. However, due to the rich action clues in the skeleton sequences, existing methods may only take a global perspective to learn to discriminate different skeletons without thoroughly leveraging the local relationship between different skeleton joints and video frames, which is essential for real-world applications. In this work, we propose a Partial Spatio-Temporal Learning (PSTL) framework to exploit the local relationship from a partial skeleton sequences built by a unique spatio-temporal masking strategy. Specifically, we construct a negative-sample-free triplet steam structure that is composed of an anchor stream without any masking, a spatial masking stream with Central Spatial Masking (CSM), and a temporal masking stream with Motion Attention Temporal Masking (MATM). The feature cross-correlation matrix is measured between the anchor stream and the other two masking streams, respectively. (1) Central Spatial Masking discards selected joints from the feature calculation process, where the joints with a higher degree of centrality have a higher possibility of being selected. (2) Motion Attention Temporal Masking leverages the motion of action and remove frames that move faster with a higher possibility. Our method achieves state-of-the-art performance on NTURGB+D 60, NTURGB+D 120 and PKU-MMD under various downstream tasks. Furthermore, to simulate the real-world scenarios, a practical evaluation is performed where some skeleton joints are lost in downstream tasks.In contrast to previous methods that suffer from large performance drops, our PSTL can still achieve remarkable results under this challenging setting, validating the robustness of our method.", + "primary_area": "computer vision iii", + "author": "Yujie Zhou; Haodong Duan; Anyi Rao; Bing Su; Jiaqi Wang", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China + Shanghai AI Laboratory; Chinese University of Hong Kong; Chinese University of Hong Kong; Gaoling School of Artificial Intelligence, Renmin University of China + Beijing Key Laboratory of Big Data Management and Analysis Methods; Shanghai AI Laboratory", + "bibtex": "@article{Zhou_Duan_Rao_Su_Wang_2023, title={Self-Supervised Action Representation Learning from Partial Spatio-Temporal Skeleton Sequences}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25495}, DOI={10.1609/aaai.v37i3.25495}, abstractNote={Self-supervised learning has demonstrated remarkable capability in representation learning for skeleton-based action recognition. Existing methods mainly focus on applying global data augmentation to generate different views of the skeleton sequence for contrastive learning. However, due to the rich action clues in the skeleton sequences, existing methods may only take a global perspective to learn to discriminate different skeletons without thoroughly leveraging the local relationship between different skeleton joints and video frames, which is essential for real-world applications. In this work, we propose a Partial Spatio-Temporal Learning (PSTL) framework to exploit the local relationship from a partial skeleton sequences built by a unique spatio-temporal masking strategy. Specifically, we construct a negative-sample-free triplet steam structure that is composed of an anchor stream without any masking, a spatial masking stream with Central Spatial Masking (CSM), and a temporal masking stream with Motion Attention Temporal Masking (MATM). The feature cross-correlation matrix is measured between the anchor stream and the other two masking streams, respectively. (1) Central Spatial Masking discards selected joints from the feature calculation process, where the joints with a higher degree of centrality have a higher possibility of being selected. (2) Motion Attention Temporal Masking leverages the motion of action and remove frames that move faster with a higher possibility. Our method achieves state-of-the-art performance on NTURGB+D 60, NTURGB+D 120 and PKU-MMD under various downstream tasks. Furthermore, to simulate the real-world scenarios, a practical evaluation is performed where some skeleton joints are lost in downstream tasks.In contrast to previous methods that suffer from large performance drops, our PSTL can still achieve remarkable results under this challenging setting, validating the robustness of our method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Yujie and Duan, Haodong and Rao, Anyi and Su, Bing and Wang, Jiaqi}, year={2023}, month={Jun.}, pages={3825-3833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25495/25267", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25495", + "pdf_size": 332601, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1894372346635626695&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff_domain": "ruc.edu.cn;gmail.com;link.cuhk.edu.hk;gmail.com;gmail.com", + "email": "ruc.edu.cn;gmail.com;link.cuhk.edu.hk;gmail.com;gmail.com", + "github": "https://github.com/YujieOuO/PSTL.git", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;2;0+3;1", + "aff_unique_norm": "Renmin University of China;Shanghai AI Laboratory;Chinese University of Hong Kong;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;;;Big Data Management and Analysis", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.shanghai-ai-lab.com;https://www.cuhk.edu.hk;", + "aff_unique_abbr": "RUC;SAIL;CUHK;", + "aff_campus_unique_index": "0;2;2;0", + "aff_campus_unique": "Beijing;;Shatin", + "aff_country_unique_index": "0+0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26162", + "title": "Self-Supervised Audio-Visual Representation Learning with Relaxed Cross-Modal Synchronicity", + "track": "main", + "status": "Technical", + "abstract": "We present CrissCross, a self-supervised framework for learning audio-visual representations. A novel notion is introduced in our framework whereby in addition to learning the intra-modal and standard 'synchronous' cross-modal relations, CrissCross also learns 'asynchronous' cross-modal relationships. We perform in-depth studies showing that by relaxing the temporal synchronicity between the audio and visual modalities, the network learns strong generalized representations useful for a variety of downstream tasks. To pretrain our proposed solution, we use 3 different datasets with varying sizes, Kinetics-Sound, Kinetics400, and AudioSet. The learned representations are evaluated on a number of downstream tasks namely action recognition, sound classification, and action retrieval. Our experiments show that CrissCross either outperforms or achieves performances on par with the current state-of-the-art self-supervised methods on action recognition and action retrieval with UCF101 and HMDB51, as well as sound classification with ESC50 and DCASE. Moreover, CrissCross outperforms fully-supervised pretraining while pretrained on Kinetics-Sound.", + "primary_area": "machine learning iii", + "author": "Pritam Sarkar; Ali Etemad", + "authorids": "", + "aff": "Queen\u2019s University, Canada + Vector Institute; Queen\u2019s University, Canada", + "bibtex": "@article{Sarkar_Etemad_2023, title={Self-Supervised Audio-Visual Representation Learning with Relaxed Cross-Modal Synchronicity}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26162}, DOI={10.1609/aaai.v37i8.26162}, abstractNote={We present CrissCross, a self-supervised framework for learning audio-visual representations. A novel notion is introduced in our framework whereby in addition to learning the intra-modal and standard \u2019synchronous\u2019 cross-modal relations, CrissCross also learns \u2019asynchronous\u2019 cross-modal relationships. We perform in-depth studies showing that by relaxing the temporal synchronicity between the audio and visual modalities, the network learns strong generalized representations useful for a variety of downstream tasks. To pretrain our proposed solution, we use 3 different datasets with varying sizes, Kinetics-Sound, Kinetics400, and AudioSet. The learned representations are evaluated on a number of downstream tasks namely action recognition, sound classification, and action retrieval. Our experiments show that CrissCross either outperforms or achieves performances on par with the current state-of-the-art self-supervised methods on action recognition and action retrieval with UCF101 and HMDB51, as well as sound classification with ESC50 and DCASE. Moreover, CrissCross outperforms fully-supervised pretraining while pretrained on Kinetics-Sound.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sarkar, Pritam and Etemad, Ali}, year={2023}, month={Jun.}, pages={9723-9732} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26162/25934", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26162", + "pdf_size": 2286282, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18148474084287775335&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "queensu.ca;queensu.ca", + "email": "queensu.ca;queensu.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0", + "aff_unique_norm": "Queen's University;Vector Institute", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.queensu.ca;https://vectorinstitute.ai/", + "aff_unique_abbr": "Queen's U;Vector Institute", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25943", + "title": "Self-Supervised Bidirectional Learning for Graph Matching", + "track": "main", + "status": "Technical", + "abstract": "Deep learning methods have demonstrated promising performance on the NP-hard Graph Matching (GM) problems. However, the state-of-the-art methods usually require the ground-truth labels, which may take extensive human efforts or be impractical to collect. In this paper, we present a robust self-supervised bidirectional learning method (IA-SSGM) to tackle GM in an unsupervised manner. It involves an affinity learning component and a classic GM solver. Specifically, we adopt the Hungarian solver to generate pseudo correspondence labels for the simple probabilistic relaxation of the affinity matrix. In addition, a bidirectional recycling consistency module is proposed to generate pseudo samples by recycling the pseudo correspondence back to permute the input. It imposes a consistency constraint between the pseudo affinity and the original one, which is theoretically supported to help reduce the matching error. Our method further develops a graph contrastive learning jointly with the affinity learning to enhance its robustness against the noise and outliers in real applications. Experiments deliver superior performance over the previous state-of-the-arts on five real-world benchmarks, especially under the more difficult outlier scenarios, demon- strating the effectiveness of our method.", + "primary_area": "machine learning i", + "author": "Wenqi Guo; Lin Zhang; Shikui Tu; Lei Xu", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University", + "bibtex": "@article{Guo_Zhang_Tu_Xu_2023, title={Self-Supervised Bidirectional Learning for Graph Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25943}, DOI={10.1609/aaai.v37i6.25943}, abstractNote={Deep learning methods have demonstrated promising performance on the NP-hard Graph Matching (GM) problems. However, the state-of-the-art methods usually require the ground-truth labels, which may take extensive human efforts or be impractical to collect. In this paper, we present a robust self-supervised bidirectional learning method (IA-SSGM) to tackle GM in an unsupervised manner. It involves an affinity learning component and a classic GM solver. Specifically, we adopt the Hungarian solver to generate pseudo correspondence labels for the simple probabilistic relaxation of the affinity matrix. In addition, a bidirectional recycling consistency module is proposed to generate pseudo samples by recycling the pseudo correspondence back to permute the input. It imposes a consistency constraint between the pseudo affinity and the original one, which is theoretically supported to help reduce the matching error. Our method further develops a graph contrastive learning jointly with the affinity learning to enhance its robustness against the noise and outliers in real applications. Experiments deliver superior performance over the previous state-of-the-arts on five real-world benchmarks, especially under the more difficult outlier scenarios, demon- strating the effectiveness of our method.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Wenqi and Zhang, Lin and Tu, Shikui and Xu, Lei}, year={2023}, month={Jun.}, pages={7784-7792} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25943/25715", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25943", + "pdf_size": 316008, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15436291728494892028&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25586", + "title": "Self-Supervised Continual Graph Learning in Adaptive Riemannian Spaces", + "track": "main", + "status": "Technical", + "abstract": "Continual graph learning routinely finds its role in a variety of real-world applications where the graph data with different tasks come sequentially. Despite the success of prior works, it still faces great challenges. On the one hand, existing methods work with the zero-curvature Euclidean space, and largely ignore the fact that curvature varies over the com- ing graph sequence. On the other hand, continual learners in the literature rely on abundant labels, but labeling graph in practice is particularly hard especially for the continuously emerging graphs on-the-fly. To address the aforementioned challenges, we propose to explore a challenging yet practical problem, the self-supervised continual graph learning in adaptive Riemannian spaces. In this paper, we propose a novel self-supervised Riemannian Graph Continual Learner (RieGrace). In RieGrace, we first design an Adaptive Riemannian GCN (AdaRGCN), a unified GCN coupled with a neural curvature adapter, so that Riemannian space is shaped by the learnt curvature adaptive to each graph. Then, we present a Label-free Lorentz Distillation approach, in which we create teacher-student AdaRGCN for the graph sequence. The student successively performs intra-distillation from itself and inter-distillation from the teacher so as to consolidate knowledge without catastrophic forgetting. In particular, we propose a theoretically grounded Generalized Lorentz Projection for the contrastive distillation in Riemannian space. Extensive experiments on the benchmark datasets show the superiority of RieGrace, and additionally, we investigate on how curvature changes over the graph sequence.", + "primary_area": "data mining and knowledge management", + "author": "Li Sun; Junda Ye; Hao Peng; Feiyang Wang; Philip S. Yu", + "authorids": "", + "aff": "School of Control and Computer Engineering, North China Electric Power University, Beijing 102206, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing 100876, China; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing 100876, China; Department of Computer Science, University of Illinois at Chicago, IL, USA", + "bibtex": "@article{Sun_Ye_Peng_Wang_Yu_2023, title={Self-Supervised Continual Graph Learning in Adaptive Riemannian Spaces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25586}, DOI={10.1609/aaai.v37i4.25586}, abstractNote={Continual graph learning routinely finds its role in a variety of real-world applications where the graph data with different tasks come sequentially. Despite the success of prior works, it still faces great challenges. On the one hand, existing methods work with the zero-curvature Euclidean space, and largely ignore the fact that curvature varies over the com- ing graph sequence. On the other hand, continual learners in the literature rely on abundant labels, but labeling graph in practice is particularly hard especially for the continuously emerging graphs on-the-fly. To address the aforementioned challenges, we propose to explore a challenging yet practical problem, the self-supervised continual graph learning in adaptive Riemannian spaces. In this paper, we propose a novel self-supervised Riemannian Graph Continual Learner (RieGrace). In RieGrace, we first design an Adaptive Riemannian GCN (AdaRGCN), a unified GCN coupled with a neural curvature adapter, so that Riemannian space is shaped by the learnt curvature adaptive to each graph. Then, we present a Label-free Lorentz Distillation approach, in which we create teacher-student AdaRGCN for the graph sequence. The student successively performs intra-distillation from itself and inter-distillation from the teacher so as to consolidate knowledge without catastrophic forgetting. In particular, we propose a theoretically grounded Generalized Lorentz Projection for the contrastive distillation in Riemannian space. Extensive experiments on the benchmark datasets show the superiority of RieGrace, and additionally, we investigate on how curvature changes over the graph sequence.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Li and Ye, Junda and Peng, Hao and Wang, Feiyang and Yu, Philip S.}, year={2023}, month={Jun.}, pages={4633-4642} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25586/25358", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25586", + "pdf_size": 991119, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10279049356562260699&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "ncepu.edu.cn;bupt.edu.cn;bupt.edu.cn;buaa.edu.cn;uic.edu", + "email": "ncepu.edu.cn;bupt.edu.cn;bupt.edu.cn;buaa.edu.cn;uic.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;3", + "aff_unique_norm": "North China Electric Power University;Beijing University of Posts and Telecommunications;Beihang University;University of Illinois at Chicago", + "aff_unique_dep": "School of Control and Computer Engineering;School of Computer Science;Beijing Advanced Innovation Center for Big Data and Brain Computing;Department of Computer Science", + "aff_unique_url": ";http://www.bupt.edu.cn/;http://www.buaa.edu.cn;https://www.uic.edu", + "aff_unique_abbr": ";BUPT;Beihang;UIC", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Beijing;Chicago", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25960", + "title": "Self-Supervised Graph Attention Networks for Deep Weighted Multi-View Clustering", + "track": "main", + "status": "Technical", + "abstract": "As one of the most important research topics in the unsupervised learning field, Multi-View Clustering (MVC) has been widely studied in the past decade and numerous MVC methods have been developed. Among these methods, the recently emerged Graph Neural Networks (GNN) shine a light on modeling both topological structure and node attributes in the form of graphs, to guide unified embedding learning and clustering. However, the effectiveness of existing GNN-based MVC methods is still limited due to the insufficient consideration in utilizing the self-supervised information and graph information, which can be reflected from the following two aspects: 1) most of these models merely use the self-supervised information to guide the feature learning and fail to realize that such information can be also applied in graph learning and sample weighting; 2) the usage of graph information is generally limited to the feature aggregation in these models, yet it also provides valuable evidence in detecting noisy samples. To this end, in this paper we propose Self-Supervised Graph Attention Networks for Deep Weighted Multi-View Clustering (SGDMC), which promotes the performance of GNN-based deep MVC models by making full use of the self-supervised information and graph information. Specifically, a novel attention-allocating approach that considers both the similarity of node attributes and the self-supervised information is developed to comprehensively evaluate the relevance among different nodes. Meanwhile, to alleviate the negative impact caused by noisy samples and the discrepancy of cluster structures, we further design a sample-weighting strategy based on the attention graph as well as the discrepancy between the global pseudo-labels and the local cluster assignment. Experimental results on multiple real-world datasets demonstrate the effectiveness of our method over existing approaches.", + "primary_area": "machine learning ii", + "author": "Zongmo Huang; Yazhou Ren; Xiaorong Pu; Shudong Huang; Zenglin Xu; Lifang He", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China+Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China+Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China+Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China; College of Computer Science, Sichuan University, Chengdu, China; School of Computer Science and Technology, Harbin Institute of Technology Shenzhen, Shenzhen, China; Department of Computer Science and Engineering, Lehigh Univerisity, Bethlehem, USA", + "bibtex": "@article{Huang_Ren_Pu_Huang_Xu_He_2023, title={Self-Supervised Graph Attention Networks for Deep Weighted Multi-View Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25960}, DOI={10.1609/aaai.v37i7.25960}, abstractNote={As one of the most important research topics in the unsupervised learning field, Multi-View Clustering (MVC) has been widely studied in the past decade and numerous MVC methods have been developed. Among these methods, the recently emerged Graph Neural Networks (GNN) shine a light on modeling both topological structure and node attributes in the form of graphs, to guide unified embedding learning and clustering. However, the effectiveness of existing GNN-based MVC methods is still limited due to the insufficient consideration in utilizing the self-supervised information and graph information, which can be reflected from the following two aspects: 1) most of these models merely use the self-supervised information to guide the feature learning and fail to realize that such information can be also applied in graph learning and sample weighting; 2) the usage of graph information is generally limited to the feature aggregation in these models, yet it also provides valuable evidence in detecting noisy samples. To this end, in this paper we propose Self-Supervised Graph Attention Networks for Deep Weighted Multi-View Clustering (SGDMC), which promotes the performance of GNN-based deep MVC models by making full use of the self-supervised information and graph information. Specifically, a novel attention-allocating approach that considers both the similarity of node attributes and the self-supervised information is developed to comprehensively evaluate the relevance among different nodes. Meanwhile, to alleviate the negative impact caused by noisy samples and the discrepancy of cluster structures, we further design a sample-weighting strategy based on the attention graph as well as the discrepancy between the global pseudo-labels and the local cluster assignment. Experimental results on multiple real-world datasets demonstrate the effectiveness of our method over existing approaches.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Zongmo and Ren, Yazhou and Pu, Xiaorong and Huang, Shudong and Xu, Zenglin and He, Lifang}, year={2023}, month={Jun.}, pages={7936-7943} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25960/25732", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25960", + "pdf_size": 2137132, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2049548315794010560&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;uestc.edu.cn;uestc.edu.cn;scu.edu.cn;gmail.com;lehigh.edu", + "email": "gmail.com;uestc.edu.cn;uestc.edu.cn;scu.edu.cn;gmail.com;lehigh.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;1;2;3", + "aff_unique_norm": "University of Electronic Science and Technology of China;Sichuan University;Harbin Institute of Technology;Lehigh University", + "aff_unique_dep": "School of Computer Science and Engineering;College of Computer Science;School of Computer Science and Technology;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.uestc.edu.cn;https://www.scu.edu.cn;http://en.hust.edu.cn/;https://www.lehigh.edu", + "aff_unique_abbr": "UESTC;SCU;HIT;Lehigh", + "aff_campus_unique_index": "0+1;0+1;0+1;0;1;2", + "aff_campus_unique": "Chengdu;Shenzhen;Bethlehem", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25082", + "title": "Self-Supervised Graph Learning for Long-Tailed Cognitive Diagnosis", + "track": "main", + "status": "Technical", + "abstract": "Cognitive diagnosis is a fundamental yet critical research task in the field of intelligent education, which aims to discover the proficiency level of different students on specific knowledge concepts. Despite the effectiveness of existing efforts, previous methods always considered the mastery level on the whole students, so they still suffer from the Long Tail Effect. A large number of students who have sparse interaction records are usually wrongly diagnosed during inference. To relieve the situation, we proposed a Self-supervised Cognitive Diagnosis (SCD) framework which leverages the self-supervised manner to assist the graph-based cognitive diagnosis, then the performance on those students with sparse data can be improved. Specifically, we came up with a graph confusion method that drops edges under some special rules to generate different sparse views of the graph. By maximizing the cross-view consistency of node representations, our model could pay more attention on long-tailed students. Additionally, we proposed an importance-based view generation rule to improve the influence of long-tailed students. Extensive experiments on real-world datasets show the effectiveness of our approach, especially on the students with much sparser interaction records. Our code is available at https://github.com/zeng-zhen/SCD.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Shanshan Wang; Zhen Zeng; Xun Yang; Xingyi Zhang", + "authorids": "", + "aff": "Anhui University, HeFei, China+Institute of Artificial Intelligence, Hefei Comprehensive National Science Center, HeFei, China; Anhui University, HeFei, China; University of Science and Technology of China, HeFei, China; Anhui University, HeFei, China", + "bibtex": "@article{Wang_Zeng_Yang_Zhang_2023, title={Self-Supervised Graph Learning for Long-Tailed Cognitive Diagnosis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25082}, DOI={10.1609/aaai.v37i1.25082}, abstractNote={Cognitive diagnosis is a fundamental yet critical research task in the field of intelligent education, which aims to discover the proficiency level of different students on specific knowledge concepts. Despite the effectiveness of existing efforts, previous methods always considered the mastery level on the whole students, so they still suffer from the Long Tail Effect. A large number of students who have sparse interaction records are usually wrongly diagnosed during inference. To relieve the situation, we proposed a Self-supervised Cognitive Diagnosis (SCD) framework which leverages the self-supervised manner to assist the graph-based cognitive diagnosis, then the performance on those students with sparse data can be improved. Specifically, we came up with a graph confusion method that drops edges under some special rules to generate different sparse views of the graph. By maximizing the cross-view consistency of node representations, our model could pay more attention on long-tailed students. Additionally, we proposed an importance-based view generation rule to improve the influence of long-tailed students. Extensive experiments on real-world datasets show the effectiveness of our approach, especially on the students with much sparser interaction records. Our code is available at https://github.com/zeng-zhen/SCD.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Shanshan and Zeng, Zhen and Yang, Xun and Zhang, Xingyi}, year={2023}, month={Jun.}, pages={110-118} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25082/24854", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25082", + "pdf_size": 293932, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6141721035319018173&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ahu.edu.cn;stu.ahu.edu.cn;ustc.edu.cn;gmail.com", + "email": "ahu.edu.cn;stu.ahu.edu.cn;ustc.edu.cn;gmail.com", + "github": "https://github.com/zeng-zhen/SCD", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;2;0", + "aff_unique_norm": "Anhui University;Hefei Comprehensive National Science Center;University of Science and Technology of China", + "aff_unique_dep": ";Institute of Artificial Intelligence;", + "aff_unique_url": "http://www.ahu.edu.cn/;;http://www.ustc.edu.cn", + "aff_unique_abbr": ";;USTC", + "aff_campus_unique_index": "0+0;0;0;0", + "aff_campus_unique": "HeFei", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25245", + "title": "Self-Supervised Image Denoising Using Implicit Deep Denoiser Prior", + "track": "main", + "status": "Technical", + "abstract": "We devise a new regularization for denoising with self-supervised learning. The regularization uses a deep image prior learned by the network, rather than a traditional predefined prior. Specifically, we treat the output of the network as a ``prior'' that we again denoise after ``re-noising.'' The network is updated to minimize the discrepancy between the twice-denoised image and its prior. We demonstrate that this regularization enables the network to learn to denoise even if it has not seen any clean images. The effectiveness of our method is based on the fact that CNNs naturally tend to capture low-level image statistics. Since our method utilizes the image prior implicitly captured by the deep denoising CNN to guide denoising, we refer to this training strategy as an Implicit Deep Denoiser Prior (IDDP). IDDP can be seen as a mixture of learning-based methods and traditional model-based denoising methods, in which regularization is adaptively formulated using the output of the network. We apply IDDP to various denoising tasks using only observed corrupted data and show that it achieves better denoising results than other self-supervised denoising methods.", + "primary_area": "computer vision ii", + "author": "Huangxing Lin; Yihong Zhuang; Xinghao Ding; Delu Zeng; Yue Huang; Xiaotong Tu; John Paisley", + "authorids": "", + "aff": "School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; School of Mathematics, South China University of Technology, China; School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; Department of Electrical Engineering, Columbia University, USA", + "bibtex": "@article{Lin_Zhuang_Ding_Zeng_Huang_Tu_Paisley_2023, title={Self-Supervised Image Denoising Using Implicit Deep Denoiser Prior}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25245}, DOI={10.1609/aaai.v37i2.25245}, abstractNote={We devise a new regularization for denoising with self-supervised learning. The regularization uses a deep image prior learned by the network, rather than a traditional predefined prior. Specifically, we treat the output of the network as a ``prior\u2019\u2019 that we again denoise after ``re-noising.\u2019\u2019 The network is updated to minimize the discrepancy between the twice-denoised image and its prior. We demonstrate that this regularization enables the network to learn to denoise even if it has not seen any clean images. The effectiveness of our method is based on the fact that CNNs naturally tend to capture low-level image statistics. Since our method utilizes the image prior implicitly captured by the deep denoising CNN to guide denoising, we refer to this training strategy as an Implicit Deep Denoiser Prior (IDDP). IDDP can be seen as a mixture of learning-based methods and traditional model-based denoising methods, in which regularization is adaptively formulated using the output of the network. We apply IDDP to various denoising tasks using only observed corrupted data and show that it achieves better denoising results than other self-supervised denoising methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Huangxing and Zhuang, Yihong and Ding, Xinghao and Zeng, Delu and Huang, Yue and Tu, Xiaotong and Paisley, John}, year={2023}, month={Jun.}, pages={1586-1594} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25245/25017", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25245", + "pdf_size": 16455650, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11387866947753266644&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;scut.edu.cn;xmu.edu.cn;xmu.edu.cn;columbia.edu", + "email": "stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn;scut.edu.cn;xmu.edu.cn;xmu.edu.cn;columbia.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;0;2", + "aff_unique_norm": "Xiamen University;South China University of Technology;Columbia University", + "aff_unique_dep": "School of Informatics;School of Mathematics;Department of Electrical Engineering", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.scut.edu.cn/en/;https://www.columbia.edu", + "aff_unique_abbr": "XMU;SCUT;Columbia", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25095", + "title": "Self-Supervised Image Local Forgery Detection by JPEG Compression Trace", + "track": "main", + "status": "Technical", + "abstract": "For image local forgery detection, the existing methods require a large amount of labeled data for training, and most of them cannot detect multiple types of forgery simultaneously. In this paper, we firstly analyzed the JPEG compression traces which are mainly caused by different JPEG compression chains, and designed a trace extractor to learn such traces. Then, we utilized the trace extractor as the backbone and trained self-supervised to strengthen the discrimination ability of learned traces. With its benefits, regions with different JPEG compression chains can easily be distinguished within a forged image. Furthermore, our method does not rely on a large amount of training data, and even does not require any forged images for training. Experiments show that the proposed method can detect image local forgery on different datasets without re-training, and keep stable performance over various types of image local forgery.", + "primary_area": "computer vision i", + "author": "Xiuli Bi; Wuqing Yan; Bo Liu; Bin Xiao; Weisheng Li; Xinbo Gao", + "authorids": "", + "aff": "Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications; Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications; Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications; Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications; Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications; Department of Computer Science and Technology, Chongqing University of Posts and Telecommunications", + "bibtex": "@article{Bi_Yan_Liu_Xiao_Li_Gao_2023, title={Self-Supervised Image Local Forgery Detection by JPEG Compression Trace}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25095}, DOI={10.1609/aaai.v37i1.25095}, abstractNote={For image local forgery detection, the existing methods require a large amount of labeled data for training, and most of them cannot detect multiple types of forgery simultaneously. In this paper, we firstly analyzed the JPEG compression traces which are mainly caused by different JPEG compression chains, and designed a trace extractor to learn such traces. Then, we utilized the trace extractor as the backbone and trained self-supervised to strengthen the discrimination ability of learned traces. With its benefits, regions with different JPEG compression chains can easily be distinguished within a forged image. Furthermore, our method does not rely on a large amount of training data, and even does not require any forged images for training. Experiments show that the proposed method can detect image local forgery on different datasets without re-training, and keep stable performance over various types of image local forgery.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bi, Xiuli and Yan, Wuqing and Liu, Bo and Xiao, Bin and Li, Weisheng and Gao, Xinbo}, year={2023}, month={Jun.}, pages={232-240} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25095/24867", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25095", + "pdf_size": 5691987, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4750901986517967555&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;stu.cqupt.edu.cn", + "email": "cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;cqupt.edu.cn;stu.cqupt.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Chongqing University of Posts and Telecommunications", + "aff_unique_dep": "Department of Computer Science and Technology", + "aff_unique_url": "http://www.cqupt.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25584", + "title": "Self-Supervised Interest Transfer Network via Prototypical Contrastive Learning for Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain recommendation has attracted increasing attention from industry and academia recently. However, most existing methods do not exploit the interest invariance between domains, which would yield sub-optimal solutions. In this paper, we propose a cross-domain recommendation method: Self-supervised Interest Transfer Network (SITN), which can effectively transfer invariant knowledge between domains via prototypical contrastive learning. Specifically, we perform two levels of cross-domain contrastive learning: 1) instance-to-instance contrastive learning, 2) instance-to-cluster contrastive learning. Not only that, we also take into account users' multi-granularity and multi-view interests. With this paradigm, SITN can explicitly learn the invariant knowledge of interest clusters between domains and accurately capture users' intents and preferences. We conducted extensive experiments on a public dataset and a large-scale industrial dataset collected from one of the world's leading e-commerce corporations. The experimental results indicate that SITN achieves significant improvements over state-of-the-art recommendation methods. Additionally, SITN has been deployed on a micro-video recommendation platform, and the online A/B testing results further demonstrate its practical value. Supplement is available at: https://github.com/fanqieCoffee/SITN-Supplement.", + "primary_area": "data mining and knowledge management", + "author": "Guoqiang Sun; Yibin Shen; Sijin Zhou; Xiang Chen; Hongyan Liu; Chunming Wu; Chenyi Lei; Xianhui Wei; Fei Fang", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University, China+Alibaba Group, China; Alibaba Group, China; Alibaba Group, China; College of Computer Science and Technology, Zhejiang University, China; College of Computer Science and Technology, Zhejiang University, China; College of Computer Science and Technology, Zhejiang University, China; Alibaba Group, China; Alibaba Group, China; Alibaba Group, China", + "bibtex": "@article{Sun_Shen_Zhou_Chen_Liu_Wu_Lei_Wei_Fang_2023, title={Self-Supervised Interest Transfer Network via Prototypical Contrastive Learning for Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25584}, DOI={10.1609/aaai.v37i4.25584}, abstractNote={Cross-domain recommendation has attracted increasing attention from industry and academia recently. However, most existing methods do not exploit the interest invariance between domains, which would yield sub-optimal solutions. In this paper, we propose a cross-domain recommendation method: Self-supervised Interest Transfer Network (SITN), which can effectively transfer invariant knowledge between domains via prototypical contrastive learning. Specifically, we perform two levels of cross-domain contrastive learning: 1) instance-to-instance contrastive learning, 2) instance-to-cluster contrastive learning. Not only that, we also take into account users\u2019 multi-granularity and multi-view interests. With this paradigm, SITN can explicitly learn the invariant knowledge of interest clusters between domains and accurately capture users\u2019 intents and preferences. We conducted extensive experiments on a public dataset and a large-scale industrial dataset collected from one of the world\u2019s leading e-commerce corporations. The experimental results indicate that SITN achieves significant improvements over state-of-the-art recommendation methods. Additionally, SITN has been deployed on a micro-video recommendation platform, and the online A/B testing results further demonstrate its practical value. Supplement is available at: https://github.com/fanqieCoffee/SITN-Supplement.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Guoqiang and Shen, Yibin and Zhou, Sijin and Chen, Xiang and Liu, Hongyan and Wu, Chunming and Lei, Chenyi and Wei, Xianhui and Fang, Fei}, year={2023}, month={Jun.}, pages={4614-4622} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25584/25356", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25584", + "pdf_size": 810553, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15809345584700259272&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "zju.edu.cn;alibaba-inc.com;alibaba-inc.com;zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "zju.edu.cn;alibaba-inc.com;alibaba-inc.com;zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/fanqieCoffee/SITN-Supplement", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;1;1;0;0;0;1;1;1", + "aff_unique_norm": "Zhejiang University;Alibaba Group", + "aff_unique_dep": "College of Computer Science and Technology;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ZJU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25108", + "title": "Self-Supervised Joint Dynamic Scene Reconstruction and Optical Flow Estimation for Spiking Camera", + "track": "main", + "status": "Technical", + "abstract": "Spiking camera, a novel retina-inspired vision sensor, has shown its great potential for capturing high-speed dynamic scenes with a sampling rate of 40,000 Hz. The spiking camera abandons the concept of exposure window, with each of its photosensitive units continuously capturing photons and firing spikes asynchronously. However, the special sampling mechanism prevents the frame-based algorithm from being used to spiking camera. It remains to be a challenge to reconstruct dynamic scenes and perform common computer vision tasks for spiking camera. In this paper, we propose a self-supervised joint learning framework for optical flow estimation and reconstruction of spiking camera. The framework reconstructs clean frame-based spiking representations in a self-supervised manner, and then uses them to train the optical flow networks. We also propose an optical flow based inverse rendering process to achieve self-supervision by minimizing the difference with respect to the original spiking temporal aggregation image. The experimental results demonstrate that our method bridges the gap between synthetic and real-world scenes and achieves desired results in real-world scenarios. To the best of our knowledge, this is the first attempt to jointly reconstruct dynamic scenes and estimate optical flow for spiking camera from a self-supervised learning perspective.", + "primary_area": "computer vision i", + "author": "Shiyan Chen; Zhaofei Yu; Tiejun Huang", + "authorids": "", + "aff": "School of Electronic and Computer Engineering, Peking University + Institute for Artificial Intelligence, Peking University + School of Computer Science, Peking University; School of Electronic and Computer Engineering, Peking University + Institute for Artificial Intelligence, Peking University + School of Computer Science, Peking University; School of Electronic and Computer Engineering, Peking University + Institute for Artificial Intelligence, Peking University + School of Computer Science, Peking University", + "bibtex": "@article{Chen_Yu_Huang_2023, title={Self-Supervised Joint Dynamic Scene Reconstruction and Optical Flow Estimation for Spiking Camera}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25108}, DOI={10.1609/aaai.v37i1.25108}, abstractNote={Spiking camera, a novel retina-inspired vision sensor, has shown its great potential for capturing high-speed dynamic scenes with a sampling rate of 40,000 Hz. The spiking camera abandons the concept of exposure window, with each of its photosensitive units continuously capturing photons and firing spikes asynchronously. However, the special sampling mechanism prevents the frame-based algorithm from being used to spiking camera. It remains to be a challenge to reconstruct dynamic scenes and perform common computer vision tasks for spiking camera. In this paper, we propose a self-supervised joint learning framework for optical flow estimation and reconstruction of spiking camera. The framework reconstructs clean frame-based spiking representations in a self-supervised manner, and then uses them to train the optical flow networks. We also propose an optical flow based inverse rendering process to achieve self-supervision by minimizing the difference with respect to the original spiking temporal aggregation image. The experimental results demonstrate that our method bridges the gap between synthetic and real-world scenes and achieves desired results in real-world scenarios. To the best of our knowledge, this is the first attempt to jointly reconstruct dynamic scenes and estimate optical flow for spiking camera from a self-supervised learning perspective.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Shiyan and Yu, Zhaofei and Huang, Tiejun}, year={2023}, month={Jun.}, pages={350-358} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25108/24880", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25108", + "pdf_size": 3723914, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13084013215040413015&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+0;0+0+0;0+0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Electronic and Computer Engineering", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25952", + "title": "Self-Supervised Learning for Anomalous Channel Detection in EEG Graphs: Application to Seizure Analysis", + "track": "main", + "status": "Technical", + "abstract": "Electroencephalogram (EEG) signals are effective tools towards seizure analysis where one of the most important challenges is accurate detection of seizure events and brain regions in which seizure happens or initiates. However, all existing machine learning-based algorithms for seizure analysis require access to the labeled seizure data while acquiring labeled data is very labor intensive, expensive, as well as clinicians dependent given the subjective nature of the visual qualitative interpretation of EEG signals. In this paper, we propose to detect seizure channels and clips in a self-supervised manner where no access to the seizure data is needed. The proposed method considers local structural and contextual information embedded in EEG graphs by employing positive and negative sub-graphs. We train our method through minimizing contrastive and generative losses. The employ of local EEG sub-graphs makes the algorithm an appropriate choice when accessing to the all EEG channels is impossible due to complications such as skull fractures. We conduct an extensive set of experiments on the largest seizure dataset and demonstrate that our proposed framework outperforms the state-of-the-art methods in the EEG-based seizure study. The proposed method is the only study that requires no access to the seizure data in its training phase, yet establishes a new state-of-the-art to the field, and outperforms all related supervised methods.", + "primary_area": "machine learning ii", + "author": "Thi Kieu Khanh Ho; Narges Armanfard", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, McGill University + Mila - Quebec AI Institute, Montreal, QC, Canada; Department of Electrical and Computer Engineering, McGill University + Mila - Quebec AI Institute, Montreal, QC, Canada", + "bibtex": "@article{Ho_Armanfard_2023, title={Self-Supervised Learning for Anomalous Channel Detection in EEG Graphs: Application to Seizure Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25952}, DOI={10.1609/aaai.v37i7.25952}, abstractNote={Electroencephalogram (EEG) signals are effective tools towards seizure analysis where one of the most important challenges is accurate detection of seizure events and brain regions in which seizure happens or initiates. However, all existing machine learning-based algorithms for seizure analysis require access to the labeled seizure data while acquiring labeled data is very labor intensive, expensive, as well as clinicians dependent given the subjective nature of the visual qualitative interpretation of EEG signals. In this paper, we propose to detect seizure channels and clips in a self-supervised manner where no access to the seizure data is needed. The proposed method considers local structural and contextual information embedded in EEG graphs by employing positive and negative sub-graphs. We train our method through minimizing contrastive and generative losses. The employ of local EEG sub-graphs makes the algorithm an appropriate choice when accessing to the all EEG channels is impossible due to complications such as skull fractures. We conduct an extensive set of experiments on the largest seizure dataset and demonstrate that our proposed framework outperforms the state-of-the-art methods in the EEG-based seizure study. The proposed method is the only study that requires no access to the seizure data in its training phase, yet establishes a new state-of-the-art to the field, and outperforms all related supervised methods.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ho, Thi Kieu Khanh and Armanfard, Narges}, year={2023}, month={Jun.}, pages={7866-7874} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25952/25724", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25952", + "pdf_size": 18548274, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2965664552071336157&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mcgill.ca;mcgill.ca", + "email": "mcgill.ca;mcgill.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "McGill University;Quebec AI Institute", + "aff_unique_dep": "Department of Electrical and Computer Engineering;AI Institute", + "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec", + "aff_unique_abbr": "McGill;Mila", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-25163", + "title": "Self-Supervised Learning for Multilevel Skeleton-Based Forgery Detection via Temporal-Causal Consistency of Actions", + "track": "main", + "status": "Technical", + "abstract": "Skeleton-based human action recognition and analysis have become increasingly attainable in many areas, such as security surveillance and anomaly detection. Given the prevalence of skeleton-based applications, tampering attacks on human skeletal features have emerged very recently. In particular, checking the temporal inconsistency and/or incoherence (TII) in the skeletal sequence of human action is a principle of forgery detection. To this end, we propose an approach to self-supervised learning of the temporal causality behind human action, which can effectively check TII in skeletal sequences. Especially, we design a multilevel skeleton-based forgery detection framework to recognize the forgery on frame level, clip level, and action level in terms of learning the corresponding temporal-causal skeleton representations for each level. Specifically, a hierarchical graph convolution network architecture is designed to learn low-level skeleton representations based on physical skeleton connections and high-level action representations based on temporal-causal dependencies for specific actions. Extensive experiments consistently show state-of-the-art results on multilevel forgery detection tasks and superior performance of our framework compared to current competing methods.", + "primary_area": "computer vision i", + "author": "Liang Hu; Dora D. Liu; Qi Zhang; Usman Naseem; Zhong Yuan Lai", + "authorids": "", + "aff": "Tongji University+DeepBlue Academy of Sciences; DeepBlue Academy of Sciences+BirenTech Research; University of Technology Sydney+DeepBlue Academy of Sciences; University of Sydney; DeepBlue Academy of Sciences", + "bibtex": "@article{Hu_Liu_Zhang_Naseem_Lai_2023, title={Self-Supervised Learning for Multilevel Skeleton-Based Forgery Detection via Temporal-Causal Consistency of Actions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25163}, DOI={10.1609/aaai.v37i1.25163}, abstractNote={Skeleton-based human action recognition and analysis have become increasingly attainable in many areas, such as security surveillance and anomaly detection. Given the prevalence of skeleton-based applications, tampering attacks on human skeletal features have emerged very recently. In particular, checking the temporal inconsistency and/or incoherence (TII) in the skeletal sequence of human action is a principle of forgery detection. To this end, we propose an approach to self-supervised learning of the temporal causality behind human action, which can effectively check TII in skeletal sequences. Especially, we design a multilevel skeleton-based forgery detection framework to recognize the forgery on frame level, clip level, and action level in terms of learning the corresponding temporal-causal skeleton representations for each level. Specifically, a hierarchical graph convolution network architecture is designed to learn low-level skeleton representations based on physical skeleton connections and high-level action representations based on temporal-causal dependencies for specific actions. Extensive experiments consistently show state-of-the-art results on multilevel forgery detection tasks and superior performance of our framework compared to current competing methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Liang and Liu, Dora D. and Zhang, Qi and Naseem, Usman and Lai, Zhong Yuan}, year={2023}, month={Jun.}, pages={844-853} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25163/24935", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25163", + "pdf_size": 642840, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1419524897333165511&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "tongji.edu.cn;163.com;student.uts.edu.au;sydney.edu.au;yahoo.com", + "email": "tongji.edu.cn;163.com;student.uts.edu.au;sydney.edu.au;yahoo.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1+2;3+1;4;1", + "aff_unique_norm": "Tongji University;DeepBlue Academy of Sciences;BirenTech Research;University of Technology Sydney;University of Sydney", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.tongji.edu.cn;http://www deepblue.ac.cn;;https://www.uts.edu.au;https://www.sydney.edu.au", + "aff_unique_abbr": "Tongji;;;UTS;USYD", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;2+0;2;0", + "aff_country_unique": "China;;Australia" + }, + { + "id": "article-26481", + "title": "Self-Supervised Logic Induction for Explainable Fuzzy Temporal Commonsense Reasoning", + "track": "main", + "status": "Technical", + "abstract": "Understanding temporal commonsense concepts, such as times of occurrence and durations is crucial for event-centric language understanding. Reasoning about such temporal concepts in a complex context requires reasoning over both the stated context and the world knowledge that underlines it. A recent study shows massive pre-trained LM still struggle with such temporal reasoning under complex contexts (e.g., dialog) because they only implicitly encode the relevant contexts and fail to explicitly uncover the underlying logical compositions for complex inference, thus may not be robust enough. In this work, we propose to augment LMs with the temporal logic induction ability, which frames the temporal reasoning by defining three modular components: temporal dependency inducer and temporal concept defuzzifier and logic validator. The former two components disentangle the explicit/implicit dependency between temporal concepts across context (before, after, ...) and the specific meaning of fuzzy temporal concepts, respectively, while the validator combines the intermediate reasoning clues for robust contextual reasoning about the temporal concepts. Extensive experimental results on TIMEDIAL, a challenging dataset for temporal reasoning over dialog, show that our method, Logic Induction Enhanced Contextualized TEmporal Reasoning (LECTER), can yield great improvements over the traditional language model for temporal reasoning.", + "primary_area": "speech natural language processing", + "author": "Bibo Cai; Xiao Ding; Zhouhao Sun; Bing Qin; Ting Liu; Baojun wang; Lifeng Shang", + "authorids": "", + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "bibtex": "@article{Cai_Ding_Sun_Qin_Liu_wang_Shang_2023, title={Self-Supervised Logic Induction for Explainable Fuzzy Temporal Commonsense Reasoning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26481}, DOI={10.1609/aaai.v37i11.26481}, abstractNote={Understanding temporal commonsense concepts, such as times of occurrence and durations is crucial for event-centric language understanding. Reasoning about such temporal concepts in a complex context requires reasoning over both the stated context and the world knowledge that underlines it. A recent study shows massive pre-trained LM still struggle with such temporal reasoning under complex contexts (e.g., dialog) because they only implicitly encode the relevant contexts and fail to explicitly uncover the underlying logical compositions for complex inference, thus may not be robust enough. In this work, we propose to augment LMs with the temporal logic induction ability, which frames the temporal reasoning by defining three modular components: temporal dependency inducer and temporal concept defuzzifier and logic validator. The former two components disentangle the explicit/implicit dependency between temporal concepts across context (before, after, ...) and the specific meaning of fuzzy temporal concepts, respectively, while the validator combines the intermediate reasoning clues for robust contextual reasoning about the temporal concepts. Extensive experimental results on TIMEDIAL, a challenging dataset for temporal reasoning over dialog, show that our method, Logic Induction Enhanced Contextualized TEmporal Reasoning (LECTER), can yield great improvements over the traditional language model for temporal reasoning.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Bibo and Ding, Xiao and Sun, Zhouhao and Qin, Bing and Liu, Ting and wang, Baojun and Shang, Lifeng}, year={2023}, month={Jun.}, pages={12580-12588} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26481/26253", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26481", + "pdf_size": 437372, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=556332069333887211&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;huawei.com", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;1", + "aff_unique_norm": "Harbin Institute of Technology;Huawei", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval;Noah\u2019s Ark Lab", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.huawei.com", + "aff_unique_abbr": "HIT;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25520", + "title": "Self-Supervised Primal-Dual Learning for Constrained Optimization", + "track": "main", + "status": "Technical", + "abstract": "This paper studies how to train machine-learning models that directly approximate the optimal solutions of constrained optimization problems. This is an empirical risk minimization under constraints, which is challenging as training must balance optimality and feasibility conditions. Supervised learning methods often approach this challenge by training the model on a large collection of pre-solved instances. This paper takes a different route and proposes the idea of Primal-Dual Learning (PDL), a self-supervised training method that does not require a set of pre-solved instances or an optimization solver for training and inference. Instead, PDL mimics the trajectory of an Augmented Lagrangian Method (ALM) and jointly trains primal and dual neural networks. Being a primal-dual method, PDL uses instance-specific penalties of the constraint terms in the loss function used to train the primal network. Experiments show that, on a set of nonlinear optimization benchmarks, PDL typically exhibits negligible constraint violations and minor optimality gaps, and is remarkably close to the ALM optimization. PDL also demonstrated improved or similar performance in terms of the optimality gaps, constraint violations, and training times compared to existing approaches.", + "primary_area": "constraint satisfaction and optimization", + "author": "Seonho Park; Pascal Van Hentenryck", + "authorids": "", + "aff": "H. Milton Stewart School of Industrial & Systems Engineering, Georgia Institute of Technology; H. Milton Stewart School of Industrial & Systems Engineering, Georgia Institute of Technology", + "bibtex": "@article{Park_Van Hentenryck_2023, title={Self-Supervised Primal-Dual Learning for Constrained Optimization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25520}, DOI={10.1609/aaai.v37i4.25520}, abstractNote={This paper studies how to train machine-learning models that directly approximate the optimal solutions of constrained optimization problems. This is an empirical risk minimization under constraints, which is challenging as training must balance optimality and feasibility conditions. Supervised learning methods often approach this challenge by training the model on a large collection of pre-solved instances. This paper takes a different route and proposes the idea of Primal-Dual Learning (PDL), a self-supervised training method that does not require a set of pre-solved instances or an optimization solver for training and inference. Instead, PDL mimics the trajectory of an Augmented Lagrangian Method (ALM) and jointly trains primal and dual neural networks. Being a primal-dual method, PDL uses instance-specific penalties of the constraint terms in the loss function used to train the primal network. Experiments show that, on a set of nonlinear optimization benchmarks, PDL typically exhibits negligible constraint violations and minor optimality gaps, and is remarkably close to the ALM optimization. PDL also demonstrated improved or similar performance in terms of the optimality gaps, constraint violations, and training times compared to existing approaches.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Park, Seonho and Van Hentenryck, Pascal}, year={2023}, month={Jun.}, pages={4052-4060} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25520/25292", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25520", + "pdf_size": 222846, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9093476122965091787&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "gatech.edu;isye_gatech.edu", + "email": "gatech.edu;isye_gatech.edu", + "github": "", + "project": "https://arxiv.org/pdf/2208.09046", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "H. Milton Stewart School of Industrial & Systems Engineering", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Atlanta", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25416", + "title": "Self-Supervised Video Representation Learning via Latent Time Navigation", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised video representation learning aimed at maximizing similarity between different temporal segments of one video, in order to enforce feature persistence over time. This leads to loss of pertinent information related to temporal relationships, rendering actions such as `enter' and `leave' to be indistinguishable. To mitigate this limitation, we propose Latent Time Navigation (LTN), a time parameterized contrastive learning strategy that is streamlined to capture fine-grained motions. Specifically, we maximize the representation similarity between different video segments from one video, while maintaining their representations time-aware along a subspace of the latent representation code including an orthogonal basis to represent temporal changes. Our extensive experimental analysis suggests that learning video representations by LTN consistently improves performance of action classification in fine-grained and human-oriented tasks (e.g., on Toyota Smarthome dataset). In addition, we demonstrate that our proposed model, when pre-trained on Kinetics-400, generalizes well onto the unseen real world video benchmark datasets UCF101 and HMDB51, achieving state-of-the-art performance in action recognition.", + "primary_area": "computer vision iii", + "author": "Di Yang; Yaohui Wang; Quan Kong; Antitza Dantcheva; Lorenzo Garattoni; Gianpiero Francesca; Fran\u00e7ois Br\u00e9mond", + "authorids": "", + "aff": "Inria+Universit\u00e9 C\u00f4te d\u2019Azur; Inria+Universit\u00e9 C\u00f4te d\u2019Azur+Shanghai AI Laboratory; Woven Planet Holdings; Inria+Universit\u00e9 C\u00f4te d\u2019Azur; Toyota Motor Europe; Toyota Motor Europe; Inria+Universit\u00e9 C\u00f4te d\u2019Azur", + "bibtex": "@article{Yang_Wang_Kong_Dantcheva_Garattoni_Francesca_Br\u00e9mond_2023, title={Self-Supervised Video Representation Learning via Latent Time Navigation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25416}, DOI={10.1609/aaai.v37i3.25416}, abstractNote={Self-supervised video representation learning aimed at maximizing similarity between different temporal segments of one video, in order to enforce feature persistence over time. This leads to loss of pertinent information related to temporal relationships, rendering actions such as `enter\u2019 and `leave\u2019 to be indistinguishable. To mitigate this limitation, we propose Latent Time Navigation (LTN), a time parameterized contrastive learning strategy that is streamlined to capture fine-grained motions. Specifically, we maximize the representation similarity between different video segments from one video, while maintaining their representations time-aware along a subspace of the latent representation code including an orthogonal basis to represent temporal changes. Our extensive experimental analysis suggests that learning video representations by LTN consistently improves performance of action classification in fine-grained and human-oriented tasks (e.g., on Toyota Smarthome dataset). In addition, we demonstrate that our proposed model, when pre-trained on Kinetics-400, generalizes well onto the unseen real world video benchmark datasets UCF101 and HMDB51, achieving state-of-the-art performance in action recognition.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Di and Wang, Yaohui and Kong, Quan and Dantcheva, Antitza and Garattoni, Lorenzo and Francesca, Gianpiero and Br\u00e9mond, Fran\u00e7ois}, year={2023}, month={Jun.}, pages={3118-3126} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25416/25188", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25416", + "pdf_size": 1990312, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4834823557986436633&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "inria.fr;inria.fr;woven-planet.global;inria.fr;toyota-europe.com;toyota-europe.com;inria.fr", + "email": "inria.fr;inria.fr;woven-planet.global;inria.fr;toyota-europe.com;toyota-europe.com;inria.fr", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1+2;3;0+1;4;4;0+1", + "aff_unique_norm": "Inria;Universit\u00e9 C\u00f4te d\u2019Azur;Shanghai AI Laboratory;Woven Planet Holdings;Toyota Motor Corporation", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.inria.fr;https://www.univ-cotedazur.fr;https://www.shanghai-ai-lab.com;https://www.wovenplanet.honda.com;https://www.toyota-europe.com", + "aff_unique_abbr": "Inria;UCA;SAIL;WPH;TME", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0+1;2;0+0;3;3;0+0", + "aff_country_unique": "France;China;Japan;Europe" + }, + { + "id": "article-25278", + "title": "Semantic 3D-Aware Portrait Synthesis and Manipulation Based on Compositional Neural Radiance Field", + "track": "main", + "status": "Technical", + "abstract": "Recently 3D-aware GAN methods with neural radiance field have developed rapidly. However, current methods model the whole image as an overall neural radiance field, which limits the partial semantic editability of synthetic results. Since NeRF renders an image pixel by pixel, it is possible to split NeRF in the spatial dimension. We propose a Compositional Neural Radiance Field (CNeRF) for semantic 3D-aware portrait synthesis and manipulation. CNeRF divides the image by semantic regions and learns an independent neural radiance field for each region, and finally fuses them and renders the complete image. Thus we can manipulate the synthesized semantic regions independently, while fixing the other parts unchanged. Furthermore, CNeRF is also designed to decouple shape and texture within each semantic region. Compared to state-of-the-art 3D-aware GAN methods, our approach enables fine-grained semantic region manipulation, while maintaining high-quality 3D-consistent synthesis. The ablation studies show the effectiveness of the structure and loss function used by our method. In addition real image inversion and cartoon portrait 3D editing experiments demonstrate the application potential of our method.", + "primary_area": "computer vision ii", + "author": "Tianxiang Ma; Bingchuan Li; Qian He; Jing Dong; Tieniu Tan", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences+CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences; ByteDance Ltd, Beijing, China; ByteDance Ltd, Beijing, China; CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences; CRIPAC & NLPR, Institute of Automation, Chinese Academy of Sciences+Nanjing University", + "bibtex": "@article{Ma_Li_He_Dong_Tan_2023, title={Semantic 3D-Aware Portrait Synthesis and Manipulation Based on Compositional Neural Radiance Field}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25278}, DOI={10.1609/aaai.v37i2.25278}, abstractNote={Recently 3D-aware GAN methods with neural radiance field have developed rapidly. However, current methods model the whole image as an overall neural radiance field, which limits the partial semantic editability of synthetic results. Since NeRF renders an image pixel by pixel, it is possible to split NeRF in the spatial dimension. We propose a Compositional Neural Radiance Field (CNeRF) for semantic 3D-aware portrait synthesis and manipulation. CNeRF divides the image by semantic regions and learns an independent neural radiance field for each region, and finally fuses them and renders the complete image. Thus we can manipulate the synthesized semantic regions independently, while fixing the other parts unchanged. Furthermore, CNeRF is also designed to decouple shape and texture within each semantic region. Compared to state-of-the-art 3D-aware GAN methods, our approach enables fine-grained semantic region manipulation, while maintaining high-quality 3D-consistent synthesis. The ablation studies show the effectiveness of the structure and loss function used by our method. In addition real image inversion and cartoon portrait 3D editing experiments demonstrate the application potential of our method.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Tianxiang and Li, Bingchuan and He, Qian and Dong, Jing and Tan, Tieniu}, year={2023}, month={Jun.}, pages={1878-1886} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25278/25050", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25278", + "pdf_size": 4256511, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6092376849069606622&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cripac.ia.ac.cn;bytedance.com;bytedance.com;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "cripac.ia.ac.cn;bytedance.com;bytedance.com;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;2;1;1+3", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;ByteDance Ltd;Nanjing University", + "aff_unique_dep": "School of Artificial Intelligence;Institute of Automation;;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ia.cas.cn;https://www.bytedance.com;https://www.nju.edu.cn", + "aff_unique_abbr": "UCAS;CAS;ByteDance;Nanjing U", + "aff_campus_unique_index": ";1;1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25196", + "title": "Semantic-Aware Superpixel for Weakly Supervised Semantic Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Weakly-supervised semantic segmentation aims to train a semantic segmentation network using weak labels. Among weak labels, image-level label has been the most popular choice due to its simplicity. However, since image-level labels lack accurate object region information, additional modules such as saliency detector have been exploited in weakly supervised semantic segmentation, which requires pixel-level label for training. In this paper, we explore a self-supervised vision transformer to mitigate the heavy efforts on generation of pixel-level annotations. By exploiting the features obtained from self-supervised vision transformer, our superpixel discovery method finds out the semantic-aware superpixels based on the feature similarity in an unsupervised manner. Once we obtain the superpixels, we train the semantic segmentation network using superpixel-guided seeded region growing method. Despite its simplicity, our approach achieves the competitive result with the state-of-the-arts on PASCAL VOC 2012 and MS-COCO 2014 semantic segmentation datasets for weakly supervised semantic segmentation. Our code is available at https://github.com/st17kim/semantic-aware-superpixel.", + "primary_area": "computer vision i", + "author": "Sangtae Kim; Daeyoung Park; Byonghyo Shim", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea; Department of Information and Communication, Inha University, Incheon, Korea; Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea", + "bibtex": "@article{Kim_Park_Shim_2023, title={Semantic-Aware Superpixel for Weakly Supervised Semantic Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25196}, DOI={10.1609/aaai.v37i1.25196}, abstractNote={Weakly-supervised semantic segmentation aims to train a semantic segmentation network using weak labels. Among weak labels, image-level label has been the most popular choice due to its simplicity. However, since image-level labels lack accurate object region information, additional modules such as saliency detector have been exploited in weakly supervised semantic segmentation, which requires pixel-level label for training. In this paper, we explore a self-supervised vision transformer to mitigate the heavy efforts on generation of pixel-level annotations. By exploiting the features obtained from self-supervised vision transformer, our superpixel discovery method finds out the semantic-aware superpixels based on the feature similarity in an unsupervised manner. Once we obtain the superpixels, we train the semantic segmentation network using superpixel-guided seeded region growing method. Despite its simplicity, our approach achieves the competitive result with the state-of-the-arts on PASCAL VOC 2012 and MS-COCO 2014 semantic segmentation datasets for weakly supervised semantic segmentation. Our code is available at https://github.com/st17kim/semantic-aware-superpixel.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Sangtae and Park, Daeyoung and Shim, Byonghyo}, year={2023}, month={Jun.}, pages={1142-1150} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25196/24968", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25196", + "pdf_size": 7983041, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16509634917641101541&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "islab.snu.ac.kr;inha.ac.kr;islab.snu.ac.kr", + "email": "islab.snu.ac.kr;inha.ac.kr;islab.snu.ac.kr", + "github": "https://github.com/st17kim/semantic-aware-superpixel", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Seoul National University;Inha University", + "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Information and Communication", + "aff_unique_url": "https://www.snu.ac.kr;http://www.inha.ac.kr", + "aff_unique_abbr": "SNU;Inha U", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Seoul;Incheon", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Korea" + }, + { + "id": "article-25841", + "title": "Semantic-Enhanced Image Clustering", + "track": "main", + "status": "Technical", + "abstract": "Image clustering is an important and open challenging task in computer vision. Although many methods have been proposed to solve the image clustering task, they only explore images and uncover clusters according to the image features, thus being unable to distinguish visually similar but semantically different images. In this paper, we propose to investigate the task of image clustering with the help of visual-language pre-training model. Different from the zero-shot setting, in which the class names are known, we only know the number of clusters in this setting. Therefore, how to map images to a proper semantic space and how to cluster images from both image and semantic spaces are two key problems. To solve the above problems, we propose a novel image clustering method guided by the visual-language pre-training model CLIP, named Semantic-Enhanced Image Clustering (SIC). In this new method, we propose a method to map the given images to a proper semantic space first and efficient methods to generate pseudo-labels according to the relationships between images and semantics. Finally, we propose to perform clustering with consistency learning in both image space and semantic space, in a self-supervised learning fashion. The theoretical result of convergence analysis shows that our proposed method can converge at a sublinear speed. Theoretical analysis of expectation risk also shows that we can reduce the expectation risk by improving neighborhood consistency, increasing prediction confidence, or reducing neighborhood imbalance. Experimental results on five benchmark datasets clearly show the superiority of our new method.", + "primary_area": "machine learning i", + "author": "Shaotian Cai; Liping Qiu; Xiaojun Chen; Qin Zhang; Longteng Chen", + "authorids": "", + "aff": "Shenzhen University, Shenzhen, China; Shenzhen University, Shenzhen, China; Shenzhen University, Shenzhen, China; Shenzhen University, Shenzhen, China; Shenzhen University, Shenzhen, China", + "bibtex": "@article{Cai_Qiu_Chen_Zhang_Chen_2023, title={Semantic-Enhanced Image Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25841}, DOI={10.1609/aaai.v37i6.25841}, abstractNote={Image clustering is an important and open challenging task in computer vision. Although many methods have been proposed to solve the image clustering task, they only explore images and uncover clusters according to the image features, thus being unable to distinguish visually similar but semantically different images. In this paper, we propose to investigate the task of image clustering with the help of visual-language pre-training model. Different from the zero-shot setting, in which the class names are known, we only know the number of clusters in this setting. Therefore, how to map images to a proper semantic space and how to cluster images from both image and semantic spaces are two key problems. To solve the above problems, we propose a novel image clustering method guided by the visual-language pre-training model CLIP, named Semantic-Enhanced Image Clustering (SIC). In this new method, we propose a method to map the given images to a proper semantic space first and efficient methods to generate pseudo-labels according to the relationships between images and semantics. Finally, we propose to perform clustering with consistency learning in both image space and semantic space, in a self-supervised learning fashion. The theoretical result of convergence analysis shows that our proposed method can converge at a sublinear speed. Theoretical analysis of expectation risk also shows that we can reduce the expectation risk by improving neighborhood consistency, increasing prediction confidence, or reducing neighborhood imbalance. Experimental results on five benchmark datasets clearly show the superiority of our new method.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cai, Shaotian and Qiu, Liping and Chen, Xiaojun and Zhang, Qin and Chen, Longteng}, year={2023}, month={Jun.}, pages={6869-6878} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25841/25613", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25841", + "pdf_size": 1772998, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13677303287498670110&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "foxmail.com;email.szu.edu.cn;szu.edu.cn;szu.edu.cn;163.com", + "email": "foxmail.com;email.szu.edu.cn;szu.edu.cn;szu.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Shenzhen University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.szu.edu.cn", + "aff_unique_abbr": "SZU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25428", + "title": "Semantics-Aware Dynamic Localization and Refinement for Referring Image Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Referring image segmentation segments an image from a language expression. With the aim of producing high-quality masks, existing methods often adopt iterative learning approaches that rely on RNNs or stacked attention layers to refine vision-language features. Despite their complexity, RNN-based methods are subject to specific encoder choices, while attention-based methods offer limited gains. In this work, we introduce a simple yet effective alternative for progressively learning discriminative multi-modal features. The core idea of our approach is to leverage a continuously updated query as the representation of the target object and at each iteration, strengthen multi-modal features strongly correlated to the query while weakening less related ones. As the query is initialized by language features and successively updated by object features, our algorithm gradually shifts from being localization-centric to segmentation-centric. This strategy enables the incremental recovery of missing object parts and/or removal of extraneous parts through iteration. Compared to its counterparts, our method is more versatile\u2014it can be plugged into prior arts straightforwardly and consistently bring improvements. Experimental results on the challenging datasets of RefCOCO, RefCOCO+, and G-Ref demonstrate its advantage with respect to the state-of-the-art methods.", + "primary_area": "computer vision iii", + "author": "Zhao Yang; Jiaqi Wang; Yansong Tang; Kai Chen; Hengshuang Zhao; Philip H.S. Torr", + "authorids": "", + "aff": "University of Oxford; Shanghai AI Laboratory; Tsinghua-Berkeley Shenzhen Institute, Tsinghua University; The University of Hong Kong; University of Oxford; University of Oxford", + "bibtex": "@article{Yang_Wang_Tang_Chen_Zhao_Torr_2023, title={Semantics-Aware Dynamic Localization and Refinement for Referring Image Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25428}, DOI={10.1609/aaai.v37i3.25428}, abstractNote={Referring image segmentation segments an image from a language expression. With the aim of producing high-quality masks, existing methods often adopt iterative learning approaches that rely on RNNs or stacked attention layers to refine vision-language features. Despite their complexity, RNN-based methods are subject to specific encoder choices, while attention-based methods offer limited gains. In this work, we introduce a simple yet effective alternative for progressively learning discriminative multi-modal features. The core idea of our approach is to leverage a continuously updated query as the representation of the target object and at each iteration, strengthen multi-modal features strongly correlated to the query while weakening less related ones. As the query is initialized by language features and successively updated by object features, our algorithm gradually shifts from being localization-centric to segmentation-centric. This strategy enables the incremental recovery of missing object parts and/or removal of extraneous parts through iteration. Compared to its counterparts, our method is more versatile\u2014it can be plugged into prior arts straightforwardly and consistently bring improvements. Experimental results on the challenging datasets of RefCOCO, RefCOCO+, and G-Ref demonstrate its advantage with respect to the state-of-the-art methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zhao and Wang, Jiaqi and Tang, Yansong and Chen, Kai and Zhao, Hengshuang and Torr, Philip H.S.}, year={2023}, month={Jun.}, pages={3222-3230} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25428/25200", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25428", + "pdf_size": 1946131, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14134260337222721518&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;0;0", + "aff_unique_norm": "University of Oxford;Shanghai AI Laboratory;Tsinghua University;The University of Hong Kong", + "aff_unique_dep": ";;Tsinghua-Berkeley Shenzhen Institute;", + "aff_unique_url": "https://www.ox.ac.uk;https://www.shanghai-ai-lab.com;http://www.tsinghua.edu.cn;https://www.hku.hk", + "aff_unique_abbr": "Oxford;SAIL;THU;HKU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;1;1;1;0;0", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "article-25890", + "title": "Semi-Supervised Deep Regression with Uncertainty Consistency and Variational Model Ensembling via Bayesian Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Deep regression is an important problem with numerous applications. These range from computer vision tasks such as age estimation from photographs, to medical tasks such as ejection fraction estimation from echocardiograms for disease tracking.\nSemi-supervised approaches for deep regression are notably under-explored compared to classification and segmentation tasks, however. \nUnlike classification tasks, which rely on thresholding functions for generating class pseudo-labels, regression tasks use real number target predictions directly as pseudo-labels, making them more sensitive to prediction quality.\nIn this work, we propose a novel approach to semi-supervised regression, namely Uncertainty-Consistent Variational Model Ensembling (UCVME), which improves training by generating high-quality pseudo-labels and uncertainty estimates for heteroscedastic regression.\nGiven that aleatoric uncertainty is only dependent on input data by definition and should be equal for the same inputs, we present a novel uncertainty consistency loss for co-trained models.\nOur consistency loss significantly improves uncertainty estimates and allows higher quality pseudo-labels to be assigned greater importance under heteroscedastic regression. \nFurthermore, we introduce a novel variational model ensembling approach to reduce prediction noise and generate more robust pseudo-labels. We analytically show our method generates higher quality targets for unlabeled data and further improves training. \nExperiments show that our method outperforms state-of-the-art alternatives on different tasks and can be competitive with supervised methods that use full labels. Code is available at https://github.com/xmed-lab/UCVME.", + "primary_area": "machine learning i", + "author": "Weihang Dai; Xiaomeng Li; Kwang-Ting Cheng", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology + Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology; Department of Computer Science and Engineering, The Hong Kong University of Science and Technology + Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology; Department of Computer Science and Engineering, The Hong Kong University of Science and Technology + Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology", + "bibtex": "@article{Dai_Li_Cheng_2023, title={Semi-Supervised Deep Regression with Uncertainty Consistency and Variational Model Ensembling via Bayesian Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25890}, DOI={10.1609/aaai.v37i6.25890}, abstractNote={Deep regression is an important problem with numerous applications. These range from computer vision tasks such as age estimation from photographs, to medical tasks such as ejection fraction estimation from echocardiograms for disease tracking.\nSemi-supervised approaches for deep regression are notably under-explored compared to classification and segmentation tasks, however. Unlike classification tasks, which rely on thresholding functions for generating class pseudo-labels, regression tasks use real number target predictions directly as pseudo-labels, making them more sensitive to prediction quality.\nIn this work, we propose a novel approach to semi-supervised regression, namely Uncertainty-Consistent Variational Model Ensembling (UCVME), which improves training by generating high-quality pseudo-labels and uncertainty estimates for heteroscedastic regression.\nGiven that aleatoric uncertainty is only dependent on input data by definition and should be equal for the same inputs, we present a novel uncertainty consistency loss for co-trained models.\nOur consistency loss significantly improves uncertainty estimates and allows higher quality pseudo-labels to be assigned greater importance under heteroscedastic regression. Furthermore, we introduce a novel variational model ensembling approach to reduce prediction noise and generate more robust pseudo-labels. We analytically show our method generates higher quality targets for unlabeled data and further improves training. Experiments show that our method outperforms state-of-the-art alternatives on different tasks and can be competitive with supervised methods that use full labels. Code is available at https://github.com/xmed-lab/UCVME.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Weihang and Li, Xiaomeng and Cheng, Kwang-Ting}, year={2023}, month={Jun.}, pages={7304-7313} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25890/25662", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25890", + "pdf_size": 1240866, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1542898804671426248&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "connect.ust.hk;ust.hk;ust.hk", + "email": "connect.ust.hk;ust.hk;ust.hk", + "github": "https://github.com/xmed-lab/UCVME", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "The Hong Kong University of Science and Technology", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.ust.hk", + "aff_unique_abbr": "HKUST", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25180", + "title": "Semi-attention Partition for Occluded Person Re-identification", + "track": "main", + "status": "Technical", + "abstract": "This paper proposes a Semi-Attention Partition (SAP) method to learn well-aligned part features for occluded person re-identification (re-ID). Currently, the mainstream methods employ either external semantic partition or attention-based partition, and the latter manner is usually better than the former one. Under this background, this paper explores a potential that the weak semantic partition can be a good teacher for the strong attention-based partition. In other words, the attention-based student can substantially surpass its noisy semantic-based teacher, contradicting the common sense that the student usually achieves inferior (or comparable) accuracy. A key to this effect is: the proposed SAP encourages the attention-based partition of the (transformer) student to be partially consistent with the semantic-based teacher partition through knowledge distillation, yielding the so-called semi-attention. Such partial consistency allows the student to have both consistency and reasonable conflict with the noisy teacher. More specifically, on the one hand, the attention is guided by the semantic partition from the teacher. On the other hand, the attention mechanism itself still has some degree of freedom to comply with the inherent similarity between different patches, thus gaining resistance against noisy supervision. Moreover, we integrate a battery of well-engineered designs into SAP to reinforce their cooperation (e.g., multiple forms of teacher-student consistency), as well as to promote reasonable conflict (e.g., mutual absorbing partition refinement and a supervision signal dropout strategy). Experimental results confirm that the transformer student achieves substantial improvement after this semi-attention learning scheme, and produces new state-of-the-art accuracy on several standard re-ID benchmarks.", + "primary_area": "computer vision i", + "author": "Mengxi Jia; Yifan Sun; Yunpeng Zhai; Xinhua Cheng; Yi Yang; Ying Li", + "authorids": "", + "aff": "School of Software and Microelectronic, Peking University, Beijing, China + Baidu Research; Baidu Research; Peking University, China; Peking University, China; College of Computer Science and Technology, Zhejiang University, China; National Engineering Center of Software Engineering, Peking University, Beijing, China", + "bibtex": "@article{Jia_Sun_Zhai_Cheng_Yang_Li_2023, title={Semi-attention Partition for Occluded Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25180}, DOI={10.1609/aaai.v37i1.25180}, abstractNote={This paper proposes a Semi-Attention Partition (SAP) method to learn well-aligned part features for occluded person re-identification (re-ID). Currently, the mainstream methods employ either external semantic partition or attention-based partition, and the latter manner is usually better than the former one. Under this background, this paper explores a potential that the weak semantic partition can be a good teacher for the strong attention-based partition. In other words, the attention-based student can substantially surpass its noisy semantic-based teacher, contradicting the common sense that the student usually achieves inferior (or comparable) accuracy. A key to this effect is: the proposed SAP encourages the attention-based partition of the (transformer) student to be partially consistent with the semantic-based teacher partition through knowledge distillation, yielding the so-called semi-attention. Such partial consistency allows the student to have both consistency and reasonable conflict with the noisy teacher. More specifically, on the one hand, the attention is guided by the semantic partition from the teacher. On the other hand, the attention mechanism itself still has some degree of freedom to comply with the inherent similarity between different patches, thus gaining resistance against noisy supervision. Moreover, we integrate a battery of well-engineered designs into SAP to reinforce their cooperation (e.g., multiple forms of teacher-student consistency), as well as to promote reasonable conflict (e.g., mutual absorbing partition refinement and a supervision signal dropout strategy). Experimental results confirm that the transformer student achieves substantial improvement after this semi-attention learning scheme, and produces new state-of-the-art accuracy on several standard re-ID benchmarks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Mengxi and Sun, Yifan and Zhai, Yunpeng and Cheng, Xinhua and Yang, Yi and Li, Ying}, year={2023}, month={Jun.}, pages={998-1006} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25180/24952", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25180", + "pdf_size": 893444, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3271170570046246647&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "pku.edu.cn; ;pku.edu.cn; ; ;pku.edu.cn", + "email": "pku.edu.cn; ;pku.edu.cn; ; ;pku.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;0;0;2;0", + "aff_unique_norm": "Peking University;Baidu;Zhejiang University", + "aff_unique_dep": "School of Software and Microelectronic;Baidu Research;College of Computer Science and Technology", + "aff_unique_url": "http://www.pku.edu.cn;https://research.baidu.com;http://www.zju.edu.cn", + "aff_unique_abbr": "PKU;Baidu;ZJU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25727", + "title": "Semi-random Impossibilities of Condorcet Criterion", + "track": "main", + "status": "Technical", + "abstract": "The Condorcet criterion (CC) is a classical and well-accepted criterion for voting. Unfortunately, it is incompatible with many other desiderata including participation (PAR), half-way monotonicity (HM), Maskin monotonicity (MM), and strategy-proofness (SP). Such incompatibilities are often known as impossibility theorems, and are proved by worst-case analysis. Previous work has investigated the likelihood for these impossibilities to occur under certain models, which are often criticized of being unrealistic.\n\nWe strengthen previous work by proving the first set of semi-random impossibilities for voting rules to satisfy CC and the more general, group versions of the four desiderata: for any sufficiently large number of voters n, any size of the group 1<= B<= \\sqrt n, any voting rule r, and under a large class of semi-random models that include Impartial Culture, the likelihood for r to satisfy CC and PAR, CC and HM, CC and MM, or CC and SP is 1-\\Omega(B/\\sqrt n). This matches existing lower bounds for CC&PAR (B=1) and CC&SP and CC&HM (B<=\\sqrt n), showing that many commonly-studied voting rules are already asymptotically optimal in such cases.", + "primary_area": "game theory and economic paradigms", + "author": "Lirong Xia", + "authorids": "", + "aff": "RPI, Troy, NY, USA", + "bibtex": "@article{Xia_2023, title={Semi-random Impossibilities of Condorcet Criterion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25727}, DOI={10.1609/aaai.v37i5.25727}, abstractNote={The Condorcet criterion (CC) is a classical and well-accepted criterion for voting. Unfortunately, it is incompatible with many other desiderata including participation (PAR), half-way monotonicity (HM), Maskin monotonicity (MM), and strategy-proofness (SP). Such incompatibilities are often known as impossibility theorems, and are proved by worst-case analysis. Previous work has investigated the likelihood for these impossibilities to occur under certain models, which are often criticized of being unrealistic. We strengthen previous work by proving the first set of semi-random impossibilities for voting rules to satisfy CC and the more general, group versions of the four desiderata: for any sufficiently large number of voters n, any size of the group 1<= B<= \\sqrt n, any voting rule r, and under a large class of semi-random models that include Impartial Culture, the likelihood for r to satisfy CC and PAR, CC and HM, CC and MM, or CC and SP is 1-\\Omega(B/\\sqrt n). This matches existing lower bounds for CC&PAR (B=1) and CC&SP and CC&HM (B<=\\sqrt n), showing that many commonly-studied voting rules are already asymptotically optimal in such cases.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xia, Lirong}, year={2023}, month={Jun.}, pages={5867-5875} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25727/25499", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25727", + "pdf_size": 264629, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11144467166135956840&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "gmail.com", + "email": "gmail.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Rensselaer Polytechnic Institute", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rpi.edu", + "aff_unique_abbr": "RPI", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Troy", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26702", + "title": "Semi-supervised Credit Card Fraud Detection via Attribute-Driven Graph Representation", + "track": "aaai special track", + "status": "Technical", + "abstract": "Credit card fraud incurs a considerable cost for both cardholders and issuing banks. Contemporary methods apply machine learning-based classifiers to detect fraudulent behavior from labeled transaction records. But labeled data are usually a small proportion of billions of real transactions due to expensive labeling costs, which implies that they do not well exploit many natural features from unlabeled data. Therefore, we propose a semi-supervised graph neural network for fraud detection. Specifically, we leverage transaction records to construct a temporal transaction graph, which is composed of temporal transactions (nodes) and interactions (edges) among them. Then we pass messages among the nodes through a Gated Temporal Attention Network (GTAN) to learn the transaction representation. We further model the fraud patterns through risk propagation among transactions. The extensive experiments are conducted on a real-world transaction dataset and two publicly available fraud detection datasets. The result shows that our proposed method, namely GTAN, outperforms other state-of-the-art baselines on three fraud detection datasets. Semi-supervised experiments demonstrate the excellent fraud detection performance of our model with only a tiny proportion of labeled data.", + "primary_area": "ai for social impact", + "author": "Sheng Xiang; Mingzhi Zhu; Dawei Cheng; Enxia Li; Ruihui Zhao; Yi Ouyang; Ling Chen; Yefeng Zheng", + "authorids": "", + "aff": "Australian Artificial Intelligence Institute, University of Technology Sydney, Sydney, Australia; Department of Computer Science and Technology, Tongji University, Shanghai, China; Department of Computer Science and Technology, Tongji University, Shanghai, China + Shanghai Artificial Intelligence Laboratory, Shanghai, China; Australian Artificial Intelligence Institute, University of Technology Sydney, Sydney, Australia; Tencent Jarvis Laboratory, Shenzhen, China; Tencent Jarvis Laboratory, Shenzhen, China; Australian Artificial Intelligence Institute, University of Technology Sydney, Sydney, Australia; Tencent Jarvis Laboratory, Shenzhen, China", + "bibtex": "@article{Xiang_Zhu_Cheng_Li_Zhao_Ouyang_Chen_Zheng_2023, title={Semi-supervised Credit Card Fraud Detection via Attribute-Driven Graph Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26702}, DOI={10.1609/aaai.v37i12.26702}, abstractNote={Credit card fraud incurs a considerable cost for both cardholders and issuing banks. Contemporary methods apply machine learning-based classifiers to detect fraudulent behavior from labeled transaction records. But labeled data are usually a small proportion of billions of real transactions due to expensive labeling costs, which implies that they do not well exploit many natural features from unlabeled data. Therefore, we propose a semi-supervised graph neural network for fraud detection. Specifically, we leverage transaction records to construct a temporal transaction graph, which is composed of temporal transactions (nodes) and interactions (edges) among them. Then we pass messages among the nodes through a Gated Temporal Attention Network (GTAN) to learn the transaction representation. We further model the fraud patterns through risk propagation among transactions. The extensive experiments are conducted on a real-world transaction dataset and two publicly available fraud detection datasets. The result shows that our proposed method, namely GTAN, outperforms other state-of-the-art baselines on three fraud detection datasets. Semi-supervised experiments demonstrate the excellent fraud detection performance of our model with only a tiny proportion of labeled data.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiang, Sheng and Zhu, Mingzhi and Cheng, Dawei and Li, Enxia and Zhao, Ruihui and Ouyang, Yi and Chen, Ling and Zheng, Yefeng}, year={2023}, month={Jun.}, pages={14557-14565} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26702/26474", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26702", + "pdf_size": 695271, + "gs_citation": 78, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14882539512370225550&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "uts.edu.au;uts.edu.au;nyu.edu;tongji.edu.cn;student.uts.edu.au;ruri.waseda.jp;tencent.com;tencent.com", + "email": "uts.edu.au;uts.edu.au;nyu.edu;tongji.edu.cn;student.uts.edu.au;ruri.waseda.jp;tencent.com;tencent.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1+2;0;3;3;0;3", + "aff_unique_norm": "University of Technology Sydney;Tongji University;Shanghai Artificial Intelligence Laboratory;Tencent Jarvis Laboratory", + "aff_unique_dep": "Australian Artificial Intelligence Institute;Department of Computer Science and Technology;;", + "aff_unique_url": "https://www.uts.edu.au;https://www.tongji.edu.cn;;https://jarvislab.tencent.com", + "aff_unique_abbr": "UTS;Tongji;;Tencent Jarvis Lab", + "aff_campus_unique_index": "0;1;1+1;0;2;2;0;2", + "aff_campus_unique": "Sydney;Shanghai;Shenzhen", + "aff_country_unique_index": "0;1;1+1;0;1;1;0;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "article-25183", + "title": "Semi-supervised Deep Large-Baseline Homography Estimation with Progressive Equivalence Constraint", + "track": "main", + "status": "Technical", + "abstract": "Homography estimation is erroneous in the case of large-baseline due to the low image overlay and limited receptive field. To address it, we propose a progressive estimation strategy by converting large-baseline homography into multiple intermediate ones, cumulatively multiplying these intermediate items can reconstruct the initial homography. Meanwhile, a semi-supervised homography identity loss, which consists of two components: a supervised objective and an unsupervised objective, is introduced. The first supervised loss is acting to optimize intermediate homographies, while the second unsupervised one helps to estimate a large-baseline homography without photometric losses. To validate our method, we propose a large-scale dataset that covers regular and challenging scenes. Experiments show that our method achieves state-of-the-art performance in large-baseline scenes while keeping competitive performance in small-baseline scenes. Code and dataset are available at https://github.com/megvii-research/LBHomo.", + "primary_area": "computer vision i", + "author": "Hai Jiang; Haipeng Li; Yuhang Lu; Songchen Han; Shuaicheng Liu", + "authorids": "", + "aff": "Sichuan University+Megvii Technology; University of Electronic Science and Technology of China+Megvii Technology; University of South Carolina; Sichuan University; University of Electronic Science and Technology of China+Megvii Technology", + "bibtex": "@article{Jiang_Li_Lu_Han_Liu_2023, title={Semi-supervised Deep Large-Baseline Homography Estimation with Progressive Equivalence Constraint}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25183}, DOI={10.1609/aaai.v37i1.25183}, abstractNote={Homography estimation is erroneous in the case of large-baseline due to the low image overlay and limited receptive field. To address it, we propose a progressive estimation strategy by converting large-baseline homography into multiple intermediate ones, cumulatively multiplying these intermediate items can reconstruct the initial homography. Meanwhile, a semi-supervised homography identity loss, which consists of two components: a supervised objective and an unsupervised objective, is introduced. The first supervised loss is acting to optimize intermediate homographies, while the second unsupervised one helps to estimate a large-baseline homography without photometric losses. To validate our method, we propose a large-scale dataset that covers regular and challenging scenes. Experiments show that our method achieves state-of-the-art performance in large-baseline scenes while keeping competitive performance in small-baseline scenes. Code and dataset are available at https://github.com/megvii-research/LBHomo.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Hai and Li, Haipeng and Lu, Yuhang and Han, Songchen and Liu, Shuaicheng}, year={2023}, month={Jun.}, pages={1024-1032} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25183/24955", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25183", + "pdf_size": 7297076, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3106241338199969954&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.scu.edu.cn;std.uestc.edu.cn;email.sc.edu;scu.edu.cn;uestc.edu.cn", + "email": "stu.scu.edu.cn;std.uestc.edu.cn;email.sc.edu;scu.edu.cn;uestc.edu.cn", + "github": "https://github.com/megvii-research/LBHomo", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2+1;3;0;2+1", + "aff_unique_norm": "Sichuan University;Megvii Technology;University of Electronic Science and Technology of China;University of South Carolina", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.scu.edu.cn;https://www.megvii.com;https://www.uestc.edu.cn;https://www.sc.edu", + "aff_unique_abbr": "SCU;Megvii;UESTC;USC", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26249", + "title": "Semi-supervised Learning with Support Isolation by Small-Paced Self-Training", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we address a special scenario of semi-supervised learning, where the label missing is caused by a preceding filtering mechanism, i.e., an instance can enter a subsequent process in which its label is revealed if and only if it passes the filtering mechanism. The rejected instances are prohibited to enter the subsequent labeling process due to economical or ethical reasons, making the support of the labeled and unlabeled distributions isolated from each other. In this case, semi-supervised learning approaches which rely on certain coherence of the labeled and unlabeled distribution would suffer from the consequent distribution mismatch, and hence result in poor prediction performance. In this paper, we propose a Small-Paced Self-Training framework, which iteratively discovers labeled and unlabeled instance subspaces with bounded Wasserstein distance. We theoretically prove that such a framework may achieve provably low error on the pseudo labels during learning. Experiments on both benchmark and pneumonia diagnosis tasks show that our method is effective.", + "primary_area": "machine learning iv", + "author": "Zheng Xie; Hui Sun; Ming Li", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Xie_Sun_Li_2023, title={Semi-supervised Learning with Support Isolation by Small-Paced Self-Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26249}, DOI={10.1609/aaai.v37i9.26249}, abstractNote={In this paper, we address a special scenario of semi-supervised learning, where the label missing is caused by a preceding filtering mechanism, i.e., an instance can enter a subsequent process in which its label is revealed if and only if it passes the filtering mechanism. The rejected instances are prohibited to enter the subsequent labeling process due to economical or ethical reasons, making the support of the labeled and unlabeled distributions isolated from each other. In this case, semi-supervised learning approaches which rely on certain coherence of the labeled and unlabeled distribution would suffer from the consequent distribution mismatch, and hence result in poor prediction performance. In this paper, we propose a Small-Paced Self-Training framework, which iteratively discovers labeled and unlabeled instance subspaces with bounded Wasserstein distance. We theoretically prove that such a framework may achieve provably low error on the pseudo labels during learning. Experiments on both benchmark and pneumonia diagnosis tasks show that our method is effective.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Zheng and Sun, Hui and Li, Ming}, year={2023}, month={Jun.}, pages={10510-10518} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26249/26021", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26249", + "pdf_size": 170880, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3632616579141503271&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26996", + "title": "Semi-supervised Review-Aware Rating Regression (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Semi-supervised learning is a promising solution to mitigate data sparsity in review-aware rating regression (RaRR), but it bears the risk of learning with noisy pseudo-labelled data. In this paper, we propose a paradigm called co-training-teaching (CoT2), which integrates the merits of both co-training and co-teaching towards the robust semi-supervised RaRR. Concretely, CoT2 employs two predictors and each of them alternately plays the roles of \"labeler\" and \"validator\" to generate and validate pseudo-labelled instances. Extensive experiments show that CoT2 considerably outperforms state-of-the-art RaRR techniques, especially when training data is severely insufficient.", + "primary_area": "", + "author": "Xiangkui Lu; Jun Wu", + "authorids": "", + "aff": "School of Computer and Information Technology, Beijing Jiaotong University, Beijing 100044, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing 100044, China", + "bibtex": "@article{Lu_Wu_2024, title={Semi-supervised Review-Aware Rating Regression (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26996}, DOI={10.1609/aaai.v37i13.26996}, abstractNote={Semi-supervised learning is a promising solution to mitigate data sparsity in review-aware rating regression (RaRR), but it bears the risk of learning with noisy pseudo-labelled data. In this paper, we propose a paradigm called co-training-teaching (CoT2), which integrates the merits of both co-training and co-teaching towards the robust semi-supervised RaRR. Concretely, CoT2 employs two predictors and each of them alternately plays the roles of "labeler" and "validator" to generate and validate pseudo-labelled instances. Extensive experiments show that CoT2 considerably outperforms state-of-the-art RaRR techniques, especially when training data is severely insufficient.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lu, Xiangkui and Wu, Jun}, year={2024}, month={Jul.}, pages={16272-16273} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26996/26768", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26996", + "pdf_size": 76925, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:g4Q9lKZnMmkJ:scholar.google.com/&scioq=Semi-supervised+Review-Aware+Rating+Regression+(Student+Abstract)&hl=en&as_sdt=0,31", + "gs_version_total": 2, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Beijing Jiaotong University", + "aff_unique_dep": "School of Computer and Information Technology", + "aff_unique_url": "http://www.bjtu.edu.cn", + "aff_unique_abbr": "BJTU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25931", + "title": "Semi-transductive Learning for Generalized Zero-Shot Sketch-Based Image Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Sketch-based image retrieval (SBIR) is an attractive research area where freehand sketches are used as queries to retrieve relevant images. Existing solutions have advanced the task to the challenging zero-shot setting (ZS-SBIR), where the trained models are tested on new classes without seen data. However, they are prone to overfitting under a realistic scenario when the test data includes both seen and unseen classes. In this paper, we study generalized ZS-SBIR (GZS-SBIR) and propose a novel semi-transductive learning paradigm. Transductive learning is performed on the image modality to explore the potential data distribution within unseen classes, and zero-shot learning is performed on the sketch modality sharing the learned knowledge through a semi-heterogeneous architecture. A hybrid metric learning strategy is proposed to establish semantics-aware ranking property and calibrate the joint embedding space. Extensive experiments are conducted on two large-scale benchmarks and four evaluation metrics. The results show that our method is superior over the state-of-the-art competitors in the challenging GZS-SBIR task.", + "primary_area": "machine learning i", + "author": "Ce Ge; Jingyu Wang; Qi Qi; Haifeng Sun; Tong Xu; Jianxin Liao", + "authorids": "", + "aff": "State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications", + "bibtex": "@article{Ge_Wang_Qi_Sun_Xu_Liao_2023, title={Semi-transductive Learning for Generalized Zero-Shot Sketch-Based Image Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25931}, DOI={10.1609/aaai.v37i6.25931}, abstractNote={Sketch-based image retrieval (SBIR) is an attractive research area where freehand sketches are used as queries to retrieve relevant images. Existing solutions have advanced the task to the challenging zero-shot setting (ZS-SBIR), where the trained models are tested on new classes without seen data. However, they are prone to overfitting under a realistic scenario when the test data includes both seen and unseen classes. In this paper, we study generalized ZS-SBIR (GZS-SBIR) and propose a novel semi-transductive learning paradigm. Transductive learning is performed on the image modality to explore the potential data distribution within unseen classes, and zero-shot learning is performed on the sketch modality sharing the learned knowledge through a semi-heterogeneous architecture. A hybrid metric learning strategy is proposed to establish semantics-aware ranking property and calibrate the joint embedding space. Extensive experiments are conducted on two large-scale benchmarks and four evaluation metrics. The results show that our method is superior over the state-of-the-art competitors in the challenging GZS-SBIR task.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ge, Ce and Wang, Jingyu and Qi, Qi and Sun, Haifeng and Xu, Tong and Liao, Jianxin}, year={2023}, month={Jun.}, pages={7678-7686} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25931/25703", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25931", + "pdf_size": 1427120, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6997489166477707410&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;ebupt.com;gmail.com", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;ebupt.com;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory of Networking and Switching Technology", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26270", + "title": "Semidefinite Programming versus Burer-Monteiro Factorization for Matrix Sensing", + "track": "main", + "status": "Technical", + "abstract": "Many fundamental low-rank optimization problems, such as matrix completion, phase retrieval, and robust PCA, can be formulated as the matrix sensing problem. Two main approaches for solving matrix sensing are based on semidefinite programming (SDP) and Burer-Monteiro (B-M) factorization. The former suffers from high computational and space complexities, whereas the latter may return a spurious solution due to the non-convexity of the problem. The existing theoretical guarantees for the success of these methods have led to similar conservative conditions, which may wrongly imply that these methods have comparable performances. In this paper, we shed light on some major differences between these two methods. First, we present a class of structured matrix completion problems for which the B-M methods fail with an overwhelming probability, while the SDP method works correctly. Second, we identify a class of highly sparse matrix completion problems for which the B-M method works and the SDP method fails. Third, we prove that although the B-M method exhibits the same performance independent of the rank of the unknown solution, the success of the SDP method is correlated to the rank of the solution and improves as the rank increases. Unlike the existing literature that has mainly focused on those instances of matrix sensing for which both SDP and B-M work, this paper offers the first result on the unique merit of each method over the alternative approach.", + "primary_area": "machine learning iv", + "author": "Baturalp Yal\u00e7\u0131n; Ziye Ma; Javad Lavaei; Somayeh Sojoudi", + "authorids": "", + "aff": "UC Berkeley, Industrial Engineering and Operations Research; UC Berkeley, Electrical Engineering and Computer Science; UC Berkeley, Industrial Engineering and Operations Research; UC Berkeley, Electrical Engineering and Computer Science", + "bibtex": "@article{Yal\u00e7\u0131n_Ma_Lavaei_Sojoudi_2023, title={Semidefinite Programming versus Burer-Monteiro Factorization for Matrix Sensing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26270}, DOI={10.1609/aaai.v37i9.26270}, abstractNote={Many fundamental low-rank optimization problems, such as matrix completion, phase retrieval, and robust PCA, can be formulated as the matrix sensing problem. Two main approaches for solving matrix sensing are based on semidefinite programming (SDP) and Burer-Monteiro (B-M) factorization. The former suffers from high computational and space complexities, whereas the latter may return a spurious solution due to the non-convexity of the problem. The existing theoretical guarantees for the success of these methods have led to similar conservative conditions, which may wrongly imply that these methods have comparable performances. In this paper, we shed light on some major differences between these two methods. First, we present a class of structured matrix completion problems for which the B-M methods fail with an overwhelming probability, while the SDP method works correctly. Second, we identify a class of highly sparse matrix completion problems for which the B-M method works and the SDP method fails. Third, we prove that although the B-M method exhibits the same performance independent of the rank of the unknown solution, the success of the SDP method is correlated to the rank of the solution and improves as the rank increases. Unlike the existing literature that has mainly focused on those instances of matrix sensing for which both SDP and B-M work, this paper offers the first result on the unique merit of each method over the alternative approach.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yal\u00e7\u0131n, Baturalp and Ma, Ziye and Lavaei, Javad and Sojoudi, Somayeh}, year={2023}, month={Jun.}, pages={10702-10710} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26270/26042", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26270", + "pdf_size": 169417, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12659896713460803097&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", + "email": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of California, Berkeley", + "aff_unique_dep": "Industrial Engineering and Operations Research", + "aff_unique_url": "https://www.berkeley.edu", + "aff_unique_abbr": "UC Berkeley", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25506", + "title": "Separate but Equal: Equality in Belief Propagation for Single Cycle Graphs", + "track": "main", + "status": "Technical", + "abstract": "Belief propagation is a widely used incomplete optimization algorithm, whose main theoretical properties hold only under the assumptions that beliefs are not equal. Nevertheless, there is much evidence that equality between beliefs does occur. A method to overcome belief equality by using unary function-nodes is assumed to resolve the problem.\n\nWe focus on Min-sum, the belief propagation version for solving constraint optimization problems. We prove that on a single cycle graph, belief equality can be avoided only when the algorithm converges to the optimal solution. In any other case, the unary function methods will not prevent equality, rendering some existing results in need of reassessment. We differentiate between belief equality, which includes equal beliefs in a single message, and assignment equality, that prevents a coherent selection of assignments to variables. We show the necessary and satisfying conditions for both.", + "primary_area": "constraint satisfaction and optimization", + "author": "Erel Cohen; Omer Lev; Roie Zivan", + "authorids": "", + "aff": "Ben Gurion University of the Negev; Ben Gurion University of the Negev; Ben Gurion University of the Negev", + "bibtex": "@article{Cohen_Lev_Zivan_2023, title={Separate but Equal: Equality in Belief Propagation for Single Cycle Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25506}, DOI={10.1609/aaai.v37i4.25506}, abstractNote={Belief propagation is a widely used incomplete optimization algorithm, whose main theoretical properties hold only under the assumptions that beliefs are not equal. Nevertheless, there is much evidence that equality between beliefs does occur. A method to overcome belief equality by using unary function-nodes is assumed to resolve the problem. We focus on Min-sum, the belief propagation version for solving constraint optimization problems. We prove that on a single cycle graph, belief equality can be avoided only when the algorithm converges to the optimal solution. In any other case, the unary function methods will not prevent equality, rendering some existing results in need of reassessment. We differentiate between belief equality, which includes equal beliefs in a single message, and assignment equality, that prevents a coherent selection of assignments to variables. We show the necessary and satisfying conditions for both.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cohen, Erel and Lev, Omer and Zivan, Roie}, year={2023}, month={Jun.}, pages={3924-3931} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25506/25278", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25506", + "pdf_size": 670660, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12968326071487669477&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "post.bgu.ac.il;bgu.ac.il;bgu.ac.il", + "email": "post.bgu.ac.il;bgu.ac.il;bgu.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Ben Gurion University of the Negev", + "aff_unique_dep": "", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26658", + "title": "Seq2Seq Surrogates of Epidemic Models to Facilitate Bayesian Inference", + "track": "aaai special track", + "status": "Technical", + "abstract": "Epidemic models are powerful tools in understanding infectious disease. However, as they increase in size and complexity, they can quickly become computationally intractable. Recent progress in modelling methodology has shown that surrogate models can be used to emulate complex epidemic models with a high-dimensional parameter space. We show that deep sequence-to-sequence (seq2seq) models can serve as accurate surrogates for complex epidemic models with sequence based model parameters, effectively replicating seasonal and long-term transmission dynamics. Once trained, our surrogate can predict scenarios a several thousand times faster than the original model, making them ideal for policy exploration. We demonstrate that replacing a traditional epidemic model with a learned simulator facilitates robust Bayesian inference.", + "primary_area": "ai for social impact", + "author": "Giovanni Charles; Timothy M. Wolock; Peter Winskill; Azra Ghani; Samir Bhatt; Seth Flaxman", + "authorids": "", + "aff": "Imperial College London; Imperial College London; Imperial College London; Imperial College London; University of Copenhagen; Department of Computer Science, University of Oxford", + "bibtex": "@article{Charles_Wolock_Winskill_Ghani_Bhatt_Flaxman_2023, title={Seq2Seq Surrogates of Epidemic Models to Facilitate Bayesian Inference}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26658}, DOI={10.1609/aaai.v37i12.26658}, abstractNote={Epidemic models are powerful tools in understanding infectious disease. However, as they increase in size and complexity, they can quickly become computationally intractable. Recent progress in modelling methodology has shown that surrogate models can be used to emulate complex epidemic models with a high-dimensional parameter space. We show that deep sequence-to-sequence (seq2seq) models can serve as accurate surrogates for complex epidemic models with sequence based model parameters, effectively replicating seasonal and long-term transmission dynamics. Once trained, our surrogate can predict scenarios a several thousand times faster than the original model, making them ideal for policy exploration. We demonstrate that replacing a traditional epidemic model with a learned simulator facilitates robust Bayesian inference.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Charles, Giovanni and Wolock, Timothy M. and Winskill, Peter and Ghani, Azra and Bhatt, Samir and Flaxman, Seth}, year={2023}, month={Jun.}, pages={14170-14177} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26658/26430", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26658", + "pdf_size": 1181907, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12884765036969081428&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "ic.ac.uk; ; ; ; ; ", + "email": "ic.ac.uk; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "Imperial College London;University of Copenhagen;University of Oxford", + "aff_unique_dep": ";;Department of Computer Science", + "aff_unique_url": "https://www.imperial.ac.uk;https://www.ku.dk;https://www.ox.ac.uk", + "aff_unique_abbr": "ICL;UCPH;Oxford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Oxford", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "United Kingdom;Denmark" + }, + { + "id": "article-26532", + "title": "Sequence Generation with Label Augmentation for Relation Extraction", + "track": "main", + "status": "Technical", + "abstract": "Sequence generation demonstrates promising performance in recent information extraction efforts, by incorporating large-scale pre-trained Seq2Seq models. This paper investigates the merits of employing sequence generation in relation extraction, finding that with relation names or synonyms as generation targets, their textual semantics and the correlation (in terms of word sequence pattern) among them affect model performance. We then propose Relation Extraction with Label Augmentation (RELA), a Seq2Seq model with automatic label augmentation for RE. By saying label augmentation, we mean prod semantically synonyms for each relation name as the generation target. Besides, we present an in-depth analysis of the Seq2Seq model's behavior when dealing with RE. Experimental results show that RELA achieves competitive results compared with previous methods on four RE datasets.", + "primary_area": "speech natural language processing", + "author": "Bo Li; Dingyao Yu; Wei Ye; Jinglei Zhang; Shikun Zhang", + "authorids": "", + "aff": "National Engineering Research Center for Software Engineering, Peking University+School of Software and Microelectronics, Peking University; National Engineering Research Center for Software Engineering, Peking University+School of Software and Microelectronics, Peking University; National Engineering Research Center for Software Engineering, Peking University; National Engineering Research Center for Software Engineering, Peking University+School of Software and Microelectronics, Peking University; National Engineering Research Center for Software Engineering, Peking University+School of Software and Microelectronics, Peking University", + "bibtex": "@article{Li_Yu_Ye_Zhang_Zhang_2023, title={Sequence Generation with Label Augmentation for Relation Extraction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26532}, DOI={10.1609/aaai.v37i11.26532}, abstractNote={Sequence generation demonstrates promising performance in recent information extraction efforts, by incorporating large-scale pre-trained Seq2Seq models. This paper investigates the merits of employing sequence generation in relation extraction, finding that with relation names or synonyms as generation targets, their textual semantics and the correlation (in terms of word sequence pattern) among them affect model performance. We then propose Relation Extraction with Label Augmentation (RELA), a Seq2Seq model with automatic label augmentation for RE. By saying label augmentation, we mean prod semantically synonyms for each relation name as the generation target. Besides, we present an in-depth analysis of the Seq2Seq model\u2019s behavior when dealing with RE. Experimental results show that RELA achieves competitive results compared with previous methods on four RE datasets.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Bo and Yu, Dingyao and Ye, Wei and Zhang, Jinglei and Zhang, Shikun}, year={2023}, month={Jun.}, pages={13043-13050} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26532/26304", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26532", + "pdf_size": 620514, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8654167317556134352&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0;0+0;0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "National Engineering Research Center for Software Engineering", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26982", + "title": "Sequential Graph Attention Learning for Predicting Dynamic Stock Trends (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "The stock market is characterized by a complex relationship between companies and the market. This study combines a sequential graph structure with attention mechanisms to learn global and local information within temporal time. Specifically, our proposed \u201cGAT-AGNN\u201d module compares model performance across multiple industries as well as within single industries. The results show that the proposed framework outperforms the state-of-the-art methods in predicting stock trends across multiple industries on Taiwan Stock datasets.", + "primary_area": "", + "author": "Tzu-Ya Lai; Wen Jung Cheng; Jun-En Ding", + "authorids": "", + "aff": "National Taipei University Master of Arts in Economics; University of Connecticut Master in Financial Technology; National Yang Ming Chiao Tung University Institute of Hospital and Health Care Administration", + "bibtex": "@article{Lai_Cheng_Ding_2024, title={Sequential Graph Attention Learning for Predicting Dynamic Stock Trends (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26982}, DOI={10.1609/aaai.v37i13.26982}, abstractNote={The stock market is characterized by a complex relationship between companies and the market. This study combines a sequential graph structure with attention mechanisms to learn global and local information within temporal time. Specifically, our proposed \u201cGAT-AGNN\u201d module compares model performance across multiple industries as well as within single industries. The results show that the proposed framework outperforms the state-of-the-art methods in predicting stock trends across multiple industries on Taiwan Stock datasets.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lai, Tzu-Ya and Cheng, Wen Jung and Ding, Jun-En}, year={2024}, month={Jul.}, pages={16244-16245} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26982/26754", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26982", + "pdf_size": 877460, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9812464732899532954&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "gm.ntpu.edu.tw;uconn.edu;nycu.edu.tw", + "email": "gm.ntpu.edu.tw;uconn.edu;nycu.edu.tw", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "National Taipei University;University of Connecticut;National Yang Ming Chiao Tung University", + "aff_unique_dep": "Master of Arts in Economics;Master in Financial Technology;Institute of Hospital and Health Care Administration", + "aff_unique_url": "https://www.ntpu.edu.tw;https://www.uconn.edu;https://www.nycu.edu.tw", + "aff_unique_abbr": "NTPU;UConn;NYCU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Taiwan, China;United States" + }, + { + "id": "article-25630", + "title": "Set-to-Sequence Ranking-Based Concept-Aware Learning Path Recommendation", + "track": "main", + "status": "Technical", + "abstract": "With the development of the online education system, personalized education recommendation has played an essential role. In this paper, we focus on developing path recommendation systems that aim to generating and recommending an entire learning path to the given user in each session. Noticing that existing approaches fail to consider the correlations of concepts in the path, we propose a novel framework named Set-to-Sequence Ranking-based Concept-aware Learning Path Recommendation (SRC), which formulates the recommendation task under a set-to-sequence paradigm. Specifically, we first design a concept-aware encoder module which can capture the correlations among the input learning concepts. The outputs are then fed into a decoder module that sequentially generates a path through an attention mechanism that handles correlations between the learning and target concepts. Our recommendation policy is optimized by policy gradient. In addition, we also introduce an auxiliary module based on knowledge tracing to enhance the model\u2019s stability by evaluating students\u2019 learning effects on learning concepts. We conduct extensive experiments on two real-world public datasets and one industrial dataset, and the experimental results demonstrate the superiority and effectiveness of SRC. Code now is available at https://gitee.com/mindspore/models/tree/master/research/recommend/SRC.", + "primary_area": "domain s of application", + "author": "Xianyu Chen; Jian Shen; Wei Xia; Jiarui Jin; Yakun Song; Weinan Zhang; Weiwen Liu; Menghui Zhu; Ruiming Tang; Kai Dong; Dingyin Xia; Yong Yu", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Huawei Noah\u2019s Ark Lab; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Huawei Noah\u2019s Ark Lab; Shanghai Jiao Tong University; Huawei Noah\u2019s Ark Lab; Huawei Technologies Co Ltd; Huawei Technologies Co Ltd; Shanghai Jiao Tong University", + "bibtex": "@article{Chen_Shen_Xia_Jin_Song_Zhang_Liu_Zhu_Tang_Dong_Xia_Yu_2023, title={Set-to-Sequence Ranking-Based Concept-Aware Learning Path Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25630}, DOI={10.1609/aaai.v37i4.25630}, abstractNote={With the development of the online education system, personalized education recommendation has played an essential role. In this paper, we focus on developing path recommendation systems that aim to generating and recommending an entire learning path to the given user in each session. Noticing that existing approaches fail to consider the correlations of concepts in the path, we propose a novel framework named Set-to-Sequence Ranking-based Concept-aware Learning Path Recommendation (SRC), which formulates the recommendation task under a set-to-sequence paradigm. Specifically, we first design a concept-aware encoder module which can capture the correlations among the input learning concepts. The outputs are then fed into a decoder module that sequentially generates a path through an attention mechanism that handles correlations between the learning and target concepts. Our recommendation policy is optimized by policy gradient. In addition, we also introduce an auxiliary module based on knowledge tracing to enhance the model\u2019s stability by evaluating students\u2019 learning effects on learning concepts. We conduct extensive experiments on two real-world public datasets and one industrial dataset, and the experimental results demonstrate the superiority and effectiveness of SRC. Code now is available at https://gitee.com/mindspore/models/tree/master/research/recommend/SRC.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xianyu and Shen, Jian and Xia, Wei and Jin, Jiarui and Song, Yakun and Zhang, Weinan and Liu, Weiwen and Zhu, Menghui and Tang, Ruiming and Dong, Kai and Xia, Dingyin and Yu, Yong}, year={2023}, month={Jun.}, pages={5027-5035} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25630/25402", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25630", + "pdf_size": 304111, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=674472513919744632&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;apex.sjtu.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;apex.sjtu.edu.cn;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com", + "github": "", + "project": "https://gitee.com/mindspore/models/tree/master/research/recommend/SRC", + "author_num": 12, + "aff_unique_index": "0;0;1;0;0;0;1;0;1;2;2;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Huawei;Huawei Technologies", + "aff_unique_dep": ";Noah\u2019s Ark Lab;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "SJTU;Huawei;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25148", + "title": "ShadowFormer: Global Context Helps Shadow Removal", + "track": "main", + "status": "Technical", + "abstract": "Recent deep learning methods have achieved promising results in image shadow removal. However, most of the existing approaches focus on working locally within shadow and non-shadow regions, resulting in severe artifacts around the shadow boundaries as well as inconsistent illumination between shadow and non-shadow regions. It is still challenging for the deep shadow removal model to exploit the global contextual correlation between shadow and non-shadow regions. In this work, we first propose a Retinex-based shadow model, from which we derive a novel transformer-based network, dubbed ShandowFormer, to exploit non-shadow regions to help shadow region restoration. A multi-scale channel attention framework is employed to hierarchically capture the global information. Based on that, we propose a Shadow-Interaction Module (SIM) with Shadow-Interaction Attention (SIA) in the bottleneck stage to effectively model the context correlation between shadow and non-shadow regions. We conduct extensive experiments on three popular public datasets, including ISTD, ISTD+, and SRD, \nto evaluate the proposed method. Our method achieves state-of-the-art performance by using up to 150X fewer model parameters.", + "primary_area": "computer vision i", + "author": "Lanqing Guo; Siyu Huang; Ding Liu; Hao Cheng; Bihan Wen", + "authorids": "", + "aff": "Nanyang Technological University, Singapore; Harvard University, USA; ByteDance Inc, USA; Nanyang Technological University, Singapore; Nanyang Technological University, Singapore", + "bibtex": "@article{Guo_Huang_Liu_Cheng_Wen_2023, title={ShadowFormer: Global Context Helps Shadow Removal}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25148}, DOI={10.1609/aaai.v37i1.25148}, abstractNote={Recent deep learning methods have achieved promising results in image shadow removal. However, most of the existing approaches focus on working locally within shadow and non-shadow regions, resulting in severe artifacts around the shadow boundaries as well as inconsistent illumination between shadow and non-shadow regions. It is still challenging for the deep shadow removal model to exploit the global contextual correlation between shadow and non-shadow regions. In this work, we first propose a Retinex-based shadow model, from which we derive a novel transformer-based network, dubbed ShandowFormer, to exploit non-shadow regions to help shadow region restoration. A multi-scale channel attention framework is employed to hierarchically capture the global information. Based on that, we propose a Shadow-Interaction Module (SIM) with Shadow-Interaction Attention (SIA) in the bottleneck stage to effectively model the context correlation between shadow and non-shadow regions. We conduct extensive experiments on three popular public datasets, including ISTD, ISTD+, and SRD, to evaluate the proposed method. Our method achieves state-of-the-art performance by using up to 150X fewer model parameters.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Lanqing and Huang, Siyu and Liu, Ding and Cheng, Hao and Wen, Bihan}, year={2023}, month={Jun.}, pages={710-718} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25148/24920", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25148", + "pdf_size": 1945399, + "gs_citation": 87, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15838729398919165834&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ntu.edu.sg;seas.harvard.edu;bytedance.com;ntu.edu.sg;ntu.edu.sg", + "email": "ntu.edu.sg;seas.harvard.edu;bytedance.com;ntu.edu.sg;ntu.edu.sg", + "github": "https://github.com/GuoLanqing/ShadowFormer", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0", + "aff_unique_norm": "Nanyang Technological University;Harvard University;ByteDance Inc", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.harvard.edu;https://www.bytedance.com", + "aff_unique_abbr": "NTU;Harvard;ByteDance", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;0", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "article-26877", + "title": "Shared Tasks as Tutorials: A Methodical Approach", + "track": "eaai symposium main track", + "status": "Technical", + "abstract": "In this paper, we discuss the benefits and challenges of shared tasks as a teaching method. A shared task is a scientific event and a friendly competition to solve a research problem, the task. In terms of linking research and teaching, shared-task-based tutorials fulfill several faculty desires: they leverage students' interdisciplinary and heterogeneous skills, foster teamwork, and engage them in creative work that has the potential to produce original research contributions. Based on ten information retrieval (IR) courses at two universities since 2019 with shared tasks as tutorials, we derive a domain-neutral process model to capture the respective tutorial structure. Meanwhile, our teaching method has been adopted by other universities in IR courses, but also in other areas of AI such as natural language processing and robotics.", + "primary_area": "", + "author": "Theresa Elstner; Frank Loebe; Yamen Ajjour; Christopher Akiki; Alexander Bondarenko; Maik Fr\u00f6be; Lukas Gienapp; Nikolay Kolyada; Janis Mohr; Stephan Sandfuchs; Matti Wiegmann; J\u00f6rg Frochte; Nicola Ferro; Sven Hofmann; Benno Stein; Matthias Hagen; Martin Potthast", + "authorids": "", + "aff": "Leipzig University, Germany; Leipzig University, Germany; Bauhaus-Universit\u00a8at Weimar, Germany; Leipzig University, Germany; Martin-Luther-Universit\u00a8at Halle-Wittenberg, Germany; Martin-Luther-Universit\u00a8at Halle-Wittenberg, Germany; Leipzig University, Germany; Bauhaus-Universit\u00a8at Weimar, Germany; Bochum University of Applied Sciences, Germany; Bochum University of Applied Sciences, Germany; Bauhaus-Universit\u00a8at Weimar, Germany; Bochum University of Applied Sciences, Germany; University of Padua, Italy; Leipzig University, Germany; Bauhaus-Universit\u00a8at Weimar, Germany; Martin-Luther-Universit\u00a8at Halle-Wittenberg, Germany; Leipzig University, Germany", + "bibtex": "@article{Elstner_Loebe_Ajjour_Akiki_Bondarenko_Fr\u00f6be_Gienapp_Kolyada_Mohr_Sandfuchs_Wiegmann_Frochte_Ferro_Hofmann_Stein_Hagen_Potthast_2024, title={Shared Tasks as Tutorials: A Methodical Approach}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26877}, DOI={10.1609/aaai.v37i13.26877}, abstractNote={In this paper, we discuss the benefits and challenges of shared tasks as a teaching method. A shared task is a scientific event and a friendly competition to solve a research problem, the task. In terms of linking research and teaching, shared-task-based tutorials fulfill several faculty desires: they leverage students\u2019 interdisciplinary and heterogeneous skills, foster teamwork, and engage them in creative work that has the potential to produce original research contributions. Based on ten information retrieval (IR) courses at two universities since 2019 with shared tasks as tutorials, we derive a domain-neutral process model to capture the respective tutorial structure. Meanwhile, our teaching method has been adopted by other universities in IR courses, but also in other areas of AI such as natural language processing and robotics.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Elstner, Theresa and Loebe, Frank and Ajjour, Yamen and Akiki, Christopher and Bondarenko, Alexander and Fr\u00f6be, Maik and Gienapp, Lukas and Kolyada, Nikolay and Mohr, Janis and Sandfuchs, Stephan and Wiegmann, Matti and Frochte, J\u00f6rg and Ferro, Nicola and Hofmann, Sven and Stein, Benno and Hagen, Matthias and Potthast, Martin}, year={2024}, month={Jul.}, pages={15807-15815} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26877/26649", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26877", + "pdf_size": 202395, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7650526775606147156&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": ";;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;", + "github": "", + "project": "https://pan.webis.de/; https://touche.webis.de/", + "author_num": 17, + "aff_unique_index": "0;0;1;0;2;2;0;1;3;3;1;3;4;0;1;2;0", + "aff_unique_norm": "Leipzig University;Bauhaus-Universit\u00e4t Weimar;Martin-Luther-University Halle-Wittenberg;Bochum University of Applied Sciences;University of Padua", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.uni-leipzig.de;https://www.bauhaus-university.de;https://www.uni-halle.de;https://www.hochschule-bochum.de/;https://www.unipd.it", + "aff_unique_abbr": "Uni Leipzig;Bauhaus-Uni Weimar;MLU;;UNIPD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0", + "aff_country_unique": "Germany;Italy" + }, + { + "id": "article-26179", + "title": "Sharing Pattern Submodels for Prediction with Missing Values", + "track": "main", + "status": "Technical", + "abstract": "Missing values are unavoidable in many applications of machine learning and present challenges both during training and at test time. When variables are missing in recurring patterns, fitting separate pattern submodels have been proposed as a solution. However, fitting models independently does not make efficient use of all available data. Conversely, fitting a single shared model to the full data set relies on imputation which often leads to biased results when missingness depends on unobserved factors. We propose an alternative approach, called sharing pattern submodels (SPSM), which i) makes predictions that are robust to missing values at test time, ii) maintains or improves the predictive power of pattern submodels, and iii) has a short description, enabling improved interpretability. Parameter sharing is enforced through sparsity-inducing regularization which we prove leads to consistent estimation. Finally, we give conditions for when a sharing model is optimal, even when both missingness and the target outcome depend on unobserved variables. Classification and regression experiments on synthetic and real-world data sets demonstrate that our models achieve a favorable tradeoff between pattern specialization and information sharing.", + "primary_area": "machine learning iii", + "author": "Lena Stempfle; Ashkan Panahi; Fredrik D. Johansson", + "authorids": "", + "aff": "Chalmers University of Technology, Department of Computer Science and Engineering, Gothenburg, Sweden; Chalmers University of Technology, Department of Computer Science and Engineering, Gothenburg, Sweden; Chalmers University of Technology, Department of Computer Science and Engineering, Gothenburg, Sweden", + "bibtex": "@article{Stempfle_Panahi_Johansson_2023, title={Sharing Pattern Submodels for Prediction with Missing Values}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26179}, DOI={10.1609/aaai.v37i8.26179}, abstractNote={Missing values are unavoidable in many applications of machine learning and present challenges both during training and at test time. When variables are missing in recurring patterns, fitting separate pattern submodels have been proposed as a solution. However, fitting models independently does not make efficient use of all available data. Conversely, fitting a single shared model to the full data set relies on imputation which often leads to biased results when missingness depends on unobserved factors. We propose an alternative approach, called sharing pattern submodels (SPSM), which i) makes predictions that are robust to missing values at test time, ii) maintains or improves the predictive power of pattern submodels, and iii) has a short description, enabling improved interpretability. Parameter sharing is enforced through sparsity-inducing regularization which we prove leads to consistent estimation. Finally, we give conditions for when a sharing model is optimal, even when both missingness and the target outcome depend on unobserved variables. Classification and regression experiments on synthetic and real-world data sets demonstrate that our models achieve a favorable tradeoff between pattern specialization and information sharing.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Stempfle, Lena and Panahi, Ashkan and Johansson, Fredrik D.}, year={2023}, month={Jun.}, pages={9882-9890} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26179/25951", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26179", + "pdf_size": 269955, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13192641792107880958&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "chalmers.se;chalmers.se;chalmers.se", + "email": "chalmers.se;chalmers.se;chalmers.se", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Chalmers University of Technology", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.chalmers.se", + "aff_unique_abbr": "Chalmers", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Gothenburg", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Sweden" + }, + { + "id": "article-25509", + "title": "SharpSSAT: A Witness-Generating Stochastic Boolean Satisfiability Solver", + "track": "main", + "status": "Technical", + "abstract": "Stochastic Boolean satisfiability (SSAT) is a formalism allowing decision-making for optimization under quantitative constraints. Although SSAT solvers are under active development, existing solvers do not provide Skolem-function witnesses, which are crucial for practical applications. In this work, we develop a new witness-generating SSAT solver, SharpSSAT, which integrates techniques, including component caching, clause learning, and pure literal detection. It can generate a set of Skolem functions witnessing the attained satisfying probability of a given SSAT formula. We also equip the solver ClauSSat with witness generation capability for comparison. Experimental results show that SharpSSAT outperforms current state-of-the-art solvers and can effectively generate compact Skolem-function witnesses. The new witness-generating solver may broaden the applicability of SSAT to practical applications.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yu-Wei Fan; Jie-Hong R. Jiang", + "authorids": "", + "aff": "Graduate Institute of Electronics Engineering, National Taiwan University, Taipei, Taiwan + Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan; Graduate Institute of Electronics Engineering, National Taiwan University, Taipei, Taiwan + Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", + "bibtex": "@article{Fan_Jiang_2023, title={SharpSSAT: A Witness-Generating Stochastic Boolean Satisfiability Solver}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25509}, DOI={10.1609/aaai.v37i4.25509}, abstractNote={Stochastic Boolean satisfiability (SSAT) is a formalism allowing decision-making for optimization under quantitative constraints. Although SSAT solvers are under active development, existing solvers do not provide Skolem-function witnesses, which are crucial for practical applications. In this work, we develop a new witness-generating SSAT solver, SharpSSAT, which integrates techniques, including component caching, clause learning, and pure literal detection. It can generate a set of Skolem functions witnessing the attained satisfying probability of a given SSAT formula. We also equip the solver ClauSSat with witness generation capability for comparison. Experimental results show that SharpSSAT outperforms current state-of-the-art solvers and can effectively generate compact Skolem-function witnesses. The new witness-generating solver may broaden the applicability of SSAT to practical applications.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fan, Yu-Wei and Jiang, Jie-Hong R.}, year={2023}, month={Jun.}, pages={3949-3958} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25509/25281", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25509", + "pdf_size": 650320, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16866863136270803542&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 2, + "aff_domain": "ntu.edu.tw;ntu.edu.tw", + "email": "ntu.edu.tw;ntu.edu.tw", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "Graduate Institute of Electronics Engineering", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Taipei", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26522", + "title": "SheetPT: Spreadsheet Pre-training Based on Hierarchical Attention Network", + "track": "main", + "status": "Technical", + "abstract": "Spreadsheets are an important and unique type of business document for data storage, analysis and presentation. The distinction between spreadsheets and most other types of digital documents lies in that spreadsheets provide users with high flexibility of data organization on the grid. Existing related techniques mainly focus on the tabular data and are incompetent in understanding the entire sheet. On the one hand, spreadsheets have no explicit separation across tabular data and other information, leaving a gap for the deployment of such techniques. On the other hand, pervasive data dependence and semantic relations across the sheet require comprehensive modeling of all the information rather than only the tables. In this paper, we propose SheetPT, the first pre-training technique on spreadsheets to enable effective representation learning under this scenario. For computational effectiveness and efficiency, we propose the coherent chunk, an intermediate semantic unit of sheet structure; and we accordingly devise a hierarchical attention-based architecture to capture contextual information across different structural granularities. Three pre-training objectives are also designed to ensure sufficient training against millions of spreadsheets. Two representative downstream tasks, formula prediction and sheet structure recognition are utilized to evaluate its capability and the prominent results reveal its superiority over existing state-of-the-art methods.", + "primary_area": "speech natural language processing", + "author": "Ran Jia; Qiyu Li; Zihan Xu; Xiaoyuan Jin; Lun Du; Haoyu Dong; Xiao Lv; Shi Han; Dongmei Zhang", + "authorids": "", + "aff": "Microsoft Research Asia; Peking University+Microsoft Research Asia; Peking University+Microsoft Research Asia; Peking University+Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia", + "bibtex": "@article{Jia_Li_Xu_Jin_Du_Dong_Lv_Han_Zhang_2023, title={SheetPT: Spreadsheet Pre-training Based on Hierarchical Attention Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26522}, DOI={10.1609/aaai.v37i11.26522}, abstractNote={Spreadsheets are an important and unique type of business document for data storage, analysis and presentation. The distinction between spreadsheets and most other types of digital documents lies in that spreadsheets provide users with high flexibility of data organization on the grid. Existing related techniques mainly focus on the tabular data and are incompetent in understanding the entire sheet. On the one hand, spreadsheets have no explicit separation across tabular data and other information, leaving a gap for the deployment of such techniques. On the other hand, pervasive data dependence and semantic relations across the sheet require comprehensive modeling of all the information rather than only the tables. In this paper, we propose SheetPT, the first pre-training technique on spreadsheets to enable effective representation learning under this scenario. For computational effectiveness and efficiency, we propose the coherent chunk, an intermediate semantic unit of sheet structure; and we accordingly devise a hierarchical attention-based architecture to capture contextual information across different structural granularities. Three pre-training objectives are also designed to ensure sufficient training against millions of spreadsheets. Two representative downstream tasks, formula prediction and sheet structure recognition are utilized to evaluate its capability and the prominent results reveal its superiority over existing state-of-the-art methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jia, Ran and Li, Qiyu and Xu, Zihan and Jin, Xiaoyuan and Du, Lun and Dong, Haoyu and Lv, Xiao and Han, Shi and Zhang, Dongmei}, year={2023}, month={Jun.}, pages={12951-12958} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26522/26294", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26522", + "pdf_size": 642714, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:MNzKmTvqAlsJ:scholar.google.com/&scioq=SheetPT:+Spreadsheet+Pre-training+Based+on+Hierarchical+Attention+Network&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "microsoft.com;pku.edu.cn;pku.edu.cn;pku.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;pku.edu.cn;pku.edu.cn;pku.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1+0;1+0;1+0;0;0;0;0;0", + "aff_unique_norm": "Microsoft Research;Peking University", + "aff_unique_dep": "Research;", + "aff_unique_url": "https://www.microsoft.com/en-us/research/group/asia;http://www.pku.edu.cn", + "aff_unique_abbr": "MSR Asia;Peking U", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_campus_unique": "Asia;", + "aff_country_unique_index": "0;0+0;0+0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26715", + "title": "Shielding in Resource-Constrained Goal POMDPs", + "track": "aaai special track", + "status": "Technical", + "abstract": "We consider partially observable Markov decision processes (POMDPs) modeling an agent that needs a supply of a certain resource (e.g., electricity stored in batteries) to operate correctly. The resource is consumed by the agent's actions and can be replenished only in certain states. The agent aims to minimize the expected cost of reaching some goal while preventing resource exhaustion, a problem we call resource-constrained goal optimization (RSGO). We take a two-step approach to the RSGO problem. First, using formal methods techniques, we design an algorithm computing a shield for a given scenario: a procedure that observes the agent and prevents it from using actions that might eventually lead to resource exhaustion. Second, we augment the POMCP heuristic search algorithm for POMDP planning with our shields to obtain an algorithm solving the RSGO problem. We implement our algorithm and present experiments showing its applicability to benchmarks from the literature.", + "primary_area": "safe and robust ai", + "author": "Michal Ajdar\u00f3w; \u0160imon Brlej; Petr Novotn\u00fd", + "authorids": "", + "aff": "Faculty of Informatics, Masaryk University; Faculty of Informatics, Masaryk University; Faculty of Informatics, Masaryk University", + "bibtex": "@article{Ajdar\u00f3w_Brlej_Novotn\u00fd_2023, title={Shielding in Resource-Constrained Goal POMDPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26715}, DOI={10.1609/aaai.v37i12.26715}, abstractNote={We consider partially observable Markov decision processes (POMDPs) modeling an agent that needs a supply of a certain resource (e.g., electricity stored in batteries) to operate correctly. The resource is consumed by the agent\u2019s actions and can be replenished only in certain states. The agent aims to minimize the expected cost of reaching some goal while preventing resource exhaustion, a problem we call resource-constrained goal optimization (RSGO). We take a two-step approach to the RSGO problem. First, using formal methods techniques, we design an algorithm computing a shield for a given scenario: a procedure that observes the agent and prevents it from using actions that might eventually lead to resource exhaustion. Second, we augment the POMCP heuristic search algorithm for POMDP planning with our shields to obtain an algorithm solving the RSGO problem. We implement our algorithm and present experiments showing its applicability to benchmarks from the literature.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ajdar\u00f3w, Michal and Brlej, \u0160imon and Novotn\u00fd, Petr}, year={2023}, month={Jun.}, pages={14674-14682} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26715/26487", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26715", + "pdf_size": 172973, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12664743796524976346&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "fi.muni.cz;fi.muni.cz;fi.muni.cz", + "email": "fi.muni.cz;fi.muni.cz;fi.muni.cz", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Masaryk University", + "aff_unique_dep": "Faculty of Informatics", + "aff_unique_url": "https://www.muni.cz", + "aff_unique_abbr": "MU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Czech Republic" + }, + { + "id": "article-25465", + "title": "ShiftDDPMs: Exploring Conditional Diffusion Models by Shifting Diffusion Trajectories", + "track": "main", + "status": "Technical", + "abstract": "Diffusion models have recently exhibited remarkable abilities to synthesize striking image samples since the introduction of denoising diffusion probabilistic models (DDPMs). Their key idea is to disrupt images into noise through a fixed forward process and learn its reverse process to generate samples from noise in a denoising way. For conditional DDPMs, most existing practices relate conditions only to the reverse process and fit it to the reversal of unconditional forward process. We find this will limit the condition modeling and generation in a small time window. In this paper, we propose a novel and flexible conditional diffusion model by introducing conditions into the forward process. We utilize extra latent space to allocate an exclusive diffusion trajectory for each condition based on some shifting rules, which will disperse condition modeling to all timesteps and improve the learning capacity of model. We formulate our method, which we call ShiftDDPMs, and provide a unified point of view on existing related methods. Extensive qualitative and quantitative experiments on image synthesis demonstrate the feasibility and effectiveness of ShiftDDPMs.", + "primary_area": "computer vision iii", + "author": "Zijian Zhang; Zhou Zhao; Jun Yu; Qi Tian", + "authorids": "", + "aff": "Department of Computer Science and Technology, Zhejiang University; Department of Computer Science and Technology, Zhejiang University; School of Computer Science and Technology, Hangzhou Dianzi University; Huawei Cloud & AI", + "bibtex": "@article{Zhang_Zhao_Yu_Tian_2023, title={ShiftDDPMs: Exploring Conditional Diffusion Models by Shifting Diffusion Trajectories}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25465}, DOI={10.1609/aaai.v37i3.25465}, abstractNote={Diffusion models have recently exhibited remarkable abilities to synthesize striking image samples since the introduction of denoising diffusion probabilistic models (DDPMs). Their key idea is to disrupt images into noise through a fixed forward process and learn its reverse process to generate samples from noise in a denoising way. For conditional DDPMs, most existing practices relate conditions only to the reverse process and fit it to the reversal of unconditional forward process. We find this will limit the condition modeling and generation in a small time window. In this paper, we propose a novel and flexible conditional diffusion model by introducing conditions into the forward process. We utilize extra latent space to allocate an exclusive diffusion trajectory for each condition based on some shifting rules, which will disperse condition modeling to all timesteps and improve the learning capacity of model. We formulate our method, which we call ShiftDDPMs, and provide a unified point of view on existing related methods. Extensive qualitative and quantitative experiments on image synthesis demonstrate the feasibility and effectiveness of ShiftDDPMs.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zijian and Zhao, Zhou and Yu, Jun and Tian, Qi}, year={2023}, month={Jun.}, pages={3552-3560} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25465/25237", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25465", + "pdf_size": 4101249, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16813519264365648660&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;hdu.edu.cn;huawei.com", + "email": "zju.edu.cn;zju.edu.cn;hdu.edu.cn;huawei.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "Zhejiang University;Hangzhou Dianzi University;Huawei", + "aff_unique_dep": "Department of Computer Science and Technology;School of Computer Science and Technology;Cloud & AI", + "aff_unique_url": "http://www.zju.edu.cn;https://www.hdu.edu.cn;https://www.huawei.com/en/cloud", + "aff_unique_abbr": "ZJU;HDU;Huawei Cloud & AI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25626", + "title": "Show Me the Way! Bilevel Search for Synthesizing Programmatic Strategies", + "track": "main", + "status": "Technical", + "abstract": "The synthesis of programmatic strategies requires one to search in large non-differentiable spaces of computer programs. Current search algorithms use self-play approaches to guide this search. The issue with these approaches is that the guiding function often provides a weak search signal. This is because self-play functions only measure how well a program performs against other programs. Thus, while small changes to a losing program might not transform it into a winning one, such changes might represent steps in the direction of a winning program. In this paper we introduce a bilevel search algorithm that searches concurrently in the space of programs and in a space of state features. Each iteration of the search in the space of features defines a set of target features that the search in the program space attempts to achieve (i.e., features one observes while following the strategy encoded in a program). We hypothesize the combination of a self-play function and a feature-based one provides a stronger search signal for synthesis. While both functions are used to guide the search in the program space, the self-play function is used to guide the search in the feature space, to allow for the selection of target features that are more likely to lead to winning programs. We evaluated our bilevel algorithm in MicroRTS, a real-time strategy game. Our results show that the bilevel search synthesizes stronger strategies than methods that search only in the program space. Also, the strategies our method synthesizes obtained the highest winning rate in a simulated tournament with several baseline agents, including the best agents from the two latest MicroRTS competitions.", + "primary_area": "domain s of application", + "author": "David S. Aleixo; Levi H.S. Lelis", + "authorids": "", + "aff": "Departamento de Inform\u00e1tica, Universidade Federal de Vicosa, Brazil; Department of Computing Science, Alberta Machine Intelligence Institute (Amii), University of Alberta, Canada", + "bibtex": "@article{Aleixo_Lelis_2023, title={Show Me the Way! Bilevel Search for Synthesizing Programmatic Strategies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25626}, DOI={10.1609/aaai.v37i4.25626}, abstractNote={The synthesis of programmatic strategies requires one to search in large non-differentiable spaces of computer programs. Current search algorithms use self-play approaches to guide this search. The issue with these approaches is that the guiding function often provides a weak search signal. This is because self-play functions only measure how well a program performs against other programs. Thus, while small changes to a losing program might not transform it into a winning one, such changes might represent steps in the direction of a winning program. In this paper we introduce a bilevel search algorithm that searches concurrently in the space of programs and in a space of state features. Each iteration of the search in the space of features defines a set of target features that the search in the program space attempts to achieve (i.e., features one observes while following the strategy encoded in a program). We hypothesize the combination of a self-play function and a feature-based one provides a stronger search signal for synthesis. While both functions are used to guide the search in the program space, the self-play function is used to guide the search in the feature space, to allow for the selection of target features that are more likely to lead to winning programs. We evaluated our bilevel algorithm in MicroRTS, a real-time strategy game. Our results show that the bilevel search synthesizes stronger strategies than methods that search only in the program space. Also, the strategies our method synthesizes obtained the highest winning rate in a simulated tournament with several baseline agents, including the best agents from the two latest MicroRTS competitions.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Aleixo, David S. and Lelis, Levi H.S.}, year={2023}, month={Jun.}, pages={4991-4998} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25626/25398", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25626", + "pdf_size": 237564, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8242824944294413930&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Universidade Federal de Vicosa;University of Alberta", + "aff_unique_dep": "Departamento de Inform\u00e1tica;Department of Computing Science", + "aff_unique_url": "http://www.ufv.br/;https://www.ualberta.ca", + "aff_unique_abbr": "UFV;UAlberta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Brazil;Canada" + }, + { + "id": "article-25285", + "title": "Show, Interpret and Tell: Entity-Aware Contextualised Image Captioning in Wikipedia", + "track": "main", + "status": "Technical", + "abstract": "Humans exploit prior knowledge to describe images, and are able to adapt their explanation to specific contextual information given, even to the extent of inventing plausible explanations when contextual information and images do not match. In this work, we propose the novel task of captioning Wikipedia images by integrating contextual knowledge. Specifically, we produce models that jointly reason over Wikipedia articles, Wikimedia images and their associated descriptions to produce contextualized captions. The same Wikimedia image can be used to illustrate different articles, and the produced caption needs to be adapted to the specific context allowing us to explore the limits of the model to adjust captions to different contextual information. Dealing with out-of-dictionary words and Named Entities is a challenging task in this domain. To address this, we propose a pre-training objective, Masked Named Entity Modeling (MNEM), and show that this pretext task results to significantly improved models. Furthermore, we verify that a model pre-trained in Wikipedia generalizes well to News Captioning datasets. We further define two different test splits according to the difficulty of the captioning task. We offer insights on the role and the importance of each modality and highlight the limitations of our model.", + "primary_area": "computer vision ii", + "author": "Khanh Nguyen; Ali Furkan Biten; Andres Mafla; Lluis Gomez; Dimosthenis Karatzas", + "authorids": "", + "aff": "Computer Vision Center, Universitat Aut `onoma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut `onoma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut `onoma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut `onoma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut `onoma de Barcelona, Barcelona, Spain", + "bibtex": "@article{Nguyen_Biten_Mafla_Gomez_Karatzas_2023, title={Show, Interpret and Tell: Entity-Aware Contextualised Image Captioning in Wikipedia}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25285}, DOI={10.1609/aaai.v37i2.25285}, abstractNote={Humans exploit prior knowledge to describe images, and are able to adapt their explanation to specific contextual information given, even to the extent of inventing plausible explanations when contextual information and images do not match. In this work, we propose the novel task of captioning Wikipedia images by integrating contextual knowledge. Specifically, we produce models that jointly reason over Wikipedia articles, Wikimedia images and their associated descriptions to produce contextualized captions. The same Wikimedia image can be used to illustrate different articles, and the produced caption needs to be adapted to the specific context allowing us to explore the limits of the model to adjust captions to different contextual information. Dealing with out-of-dictionary words and Named Entities is a challenging task in this domain. To address this, we propose a pre-training objective, Masked Named Entity Modeling (MNEM), and show that this pretext task results to significantly improved models. Furthermore, we verify that a model pre-trained in Wikipedia generalizes well to News Captioning datasets. We further define two different test splits according to the difficulty of the captioning task. We offer insights on the role and the importance of each modality and highlight the limitations of our model.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Khanh and Biten, Ali Furkan and Mafla, Andres and Gomez, Lluis and Karatzas, Dimosthenis}, year={2023}, month={Jun.}, pages={1940-1948} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25285/25057", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25285", + "pdf_size": 1759497, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15295066148173002998&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cvc.uab.es;cvc.uab.es;cvc.uab.es;cvc.uab.es;cvc.uab.es", + "email": "cvc.uab.es;cvc.uab.es;cvc.uab.es;cvc.uab.es;cvc.uab.es", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Universitat Autonoma de Barcelona", + "aff_unique_dep": "Computer Vision Center", + "aff_unique_url": "https://www.uab.cat", + "aff_unique_abbr": "UAB", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Barcelona", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "article-25325", + "title": "Siamese-Discriminant Deep Reinforcement Learning for Solving Jigsaw Puzzles with Large Eroded Gaps", + "track": "main", + "status": "Technical", + "abstract": "Jigsaw puzzle solving has recently become an emerging research area. The developed techniques have been widely used in applications beyond puzzle solving. This paper focuses on solving Jigsaw Puzzles with Large Eroded Gaps (JPwLEG). We formulate the puzzle reassembly as a combinatorial optimization problem and propose a Siamese-Discriminant Deep Reinforcement Learning (SD2RL) to solve it. A Deep Q-network (DQN) is designed to visually understand the puzzles, which consists of two sets of Siamese Discriminant Networks, one set to perceive the pairwise relations between vertical neighbors and another set for horizontal neighbors. The proposed DQN considers not only the evidence from the incumbent fragment but also the support from its four neighbors. The DQN is trained using replay experience with carefully designed rewards to guide the search for a sequence of fragment swaps to reach the correct puzzle solution. Two JPwLEG datasets are constructed to evaluate the proposed method, and the experimental results show that the proposed SD2RL significantly outperforms state-of-the-art methods.", + "primary_area": "computer vision ii", + "author": "Xingke Song; Jiahuan Jin; Chenglin Yao; Shihe Wang; Jianfeng Ren; Ruibin Bai", + "authorids": "", + "aff": "School of Computer Science, University of Nottingham Ningbo China, China; School of Computer Science, University of Nottingham Ningbo China, China; School of Computer Science, University of Nottingham Ningbo China, China; School of Computer Science, University of Nottingham Ningbo China, China; School of Computer Science, University of Nottingham Ningbo China, China + Nottingham Ningbo China Beacons of Excellence Research and Innovation Institute, University of Nottingham Ningbo China, China; School of Computer Science, University of Nottingham Ningbo China, China + Nottingham Ningbo China Beacons of Excellence Research and Innovation Institute, University of Nottingham Ningbo China, China", + "bibtex": "@article{Song_Jin_Yao_Wang_Ren_Bai_2023, title={Siamese-Discriminant Deep Reinforcement Learning for Solving Jigsaw Puzzles with Large Eroded Gaps}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25325}, DOI={10.1609/aaai.v37i2.25325}, abstractNote={Jigsaw puzzle solving has recently become an emerging research area. The developed techniques have been widely used in applications beyond puzzle solving. This paper focuses on solving Jigsaw Puzzles with Large Eroded Gaps (JPwLEG). We formulate the puzzle reassembly as a combinatorial optimization problem and propose a Siamese-Discriminant Deep Reinforcement Learning (SD2RL) to solve it. A Deep Q-network (DQN) is designed to visually understand the puzzles, which consists of two sets of Siamese Discriminant Networks, one set to perceive the pairwise relations between vertical neighbors and another set for horizontal neighbors. The proposed DQN considers not only the evidence from the incumbent fragment but also the support from its four neighbors. The DQN is trained using replay experience with carefully designed rewards to guide the search for a sequence of fragment swaps to reach the correct puzzle solution. Two JPwLEG datasets are constructed to evaluate the proposed method, and the experimental results show that the proposed SD2RL significantly outperforms state-of-the-art methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Song, Xingke and Jin, Jiahuan and Yao, Chenglin and Wang, Shihe and Ren, Jianfeng and Bai, Ruibin}, year={2023}, month={Jun.}, pages={2303-2311} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25325/25097", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25325", + "pdf_size": 2916313, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2454229847032912647&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn", + "email": "nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn;nottingham.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0+0;0+0", + "aff_unique_norm": "University of Nottingham Ningbo China", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.nottingham.edu.cn", + "aff_unique_abbr": "UNNC", + "aff_campus_unique_index": "0;0;0;0;0+0;0+0", + "aff_campus_unique": "Ningbo", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25919", + "title": "SigMaNet: One Laplacian to Rule Them All", + "track": "main", + "status": "Technical", + "abstract": "This paper introduces SigMaNet, a generalized Graph Convolutional Network (GCN) capable of handling both undirected and directed graphs with weights not restricted in sign nor magnitude. The cornerstone of SigMaNet is the Sign-Magnetic Laplacian (LSM), a new Laplacian matrix that we introduce ex novo in this work. LSM allows us to bridge a gap in the current literature by extending the theory of spectral GCNs to (directed) graphs with both positive and negative weights. LSM exhibits several desirable properties not enjoyed by other Laplacian matrices on which several state-of-the-art architectures are based, among which encoding the edge direction and weight in a clear and natural way that is not negatively affected by the weight magnitude. LSM is also completely parameter-free, which is not the case of other Laplacian operators such as, e.g., the Magnetic Laplacian. The versatility and the performance of our proposed approach is amply demonstrated via computational experiments. Indeed, our results show that, for at least a metric, SigMaNet achieves the best performance in 15 out of 21 cases and either the first- or second-best performance in 21 cases out of 21, even when compared to architectures that are either more complex or that, due to being designed for a narrower class of graphs, should---but do not---achieve a better performance.", + "primary_area": "machine learning i", + "author": "Stefano Fiorini; Stefano Coniglio; Michele Ciavotta; Enza Messina", + "authorids": "", + "aff": "University of Milano-Bicocca, Milan, Italy; University of Bergamo, Bergamo, Italy; University of Milano-Bicocca, Milan, Italy; University of Milano-Bicocca, Milan, Italy", + "bibtex": "@article{Fiorini_Coniglio_Ciavotta_Messina_2023, title={SigMaNet: One Laplacian to Rule Them All}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25919}, DOI={10.1609/aaai.v37i6.25919}, abstractNote={This paper introduces SigMaNet, a generalized Graph Convolutional Network (GCN) capable of handling both undirected and directed graphs with weights not restricted in sign nor magnitude. The cornerstone of SigMaNet is the Sign-Magnetic Laplacian (LSM), a new Laplacian matrix that we introduce ex novo in this work. LSM allows us to bridge a gap in the current literature by extending the theory of spectral GCNs to (directed) graphs with both positive and negative weights. LSM exhibits several desirable properties not enjoyed by other Laplacian matrices on which several state-of-the-art architectures are based, among which encoding the edge direction and weight in a clear and natural way that is not negatively affected by the weight magnitude. LSM is also completely parameter-free, which is not the case of other Laplacian operators such as, e.g., the Magnetic Laplacian. The versatility and the performance of our proposed approach is amply demonstrated via computational experiments. Indeed, our results show that, for at least a metric, SigMaNet achieves the best performance in 15 out of 21 cases and either the first- or second-best performance in 21 cases out of 21, even when compared to architectures that are either more complex or that, due to being designed for a narrower class of graphs, should---but do not---achieve a better performance.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fiorini, Stefano and Coniglio, Stefano and Ciavotta, Michele and Messina, Enza}, year={2023}, month={Jun.}, pages={7568-7576} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25919/25691", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25919", + "pdf_size": 165971, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6456586147592214816&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "unimib.it;unibg.it;unimib.it;unimib.it", + "email": "unimib.it;unibg.it;unimib.it;unimib.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Milano-Bicocca;University of Bergamo", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unimib.it;https://www.unibg.it", + "aff_unique_abbr": "UNIMIB;", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Milan;Bergamo", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25565", + "title": "Signed Laplacian Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "This paper studies learning meaningful node representations for signed graphs, where both positive and negative links exist. This problem has been widely studied by meticulously designing expressive signed graph neural networks, as well as capturing the structural information of the signed graph through traditional structure decomposition methods, e.g., spectral graph theory. In this paper, we propose a novel signed graph representation learning framework, called Signed Laplacian Graph Neural Network (SLGNN), which combines the advantages of both. Specifically, based on spectral graph theory and graph signal processing, we first design different low-pass and high-pass graph convolution filters to extract low-frequency and high-frequency information on positive and negative links, respectively, and then combine them into a unified message passing framework. To effectively model signed graphs, we further propose a self-gating mechanism to estimate the impacts of low-frequency and high-frequency information during message passing. We mathematically establish the relationship between the aggregation process in SLGNN and signed Laplacian regularization in signed graphs, and theoretically analyze the expressiveness of SLGNN. Experimental results demonstrate that SLGNN outperforms various competitive baselines and achieves state-of-the-art performance.", + "primary_area": "data mining and knowledge management", + "author": "Yu Li; Meng Qu; Jian Tang; Yi Chang", + "authorids": "", + "aff": "College of Computer Science and Technology, Jilin University, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China; Mila - Qu\u00b4ebec AI Institute, Canada+Univesit\u00b4e de Montr\u00b4eal, Canada+HEC Montr\u00b4eal, Canada+CIFAR AI Research Chair, Canada; Mila - Qu\u00b4ebec AI Institute, Canada+Univesit\u00b4e de Montr\u00b4eal, Canada+HEC Montr\u00b4eal, Canada+CIFAR AI Research Chair, Canada; School of Artificial Intelligence, Jilin University, China+International Center of Future Science, Jilin University, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China", + "bibtex": "@article{Li_Qu_Tang_Chang_2023, title={Signed Laplacian Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25565}, DOI={10.1609/aaai.v37i4.25565}, abstractNote={This paper studies learning meaningful node representations for signed graphs, where both positive and negative links exist. This problem has been widely studied by meticulously designing expressive signed graph neural networks, as well as capturing the structural information of the signed graph through traditional structure decomposition methods, e.g., spectral graph theory. In this paper, we propose a novel signed graph representation learning framework, called Signed Laplacian Graph Neural Network (SLGNN), which combines the advantages of both. Specifically, based on spectral graph theory and graph signal processing, we first design different low-pass and high-pass graph convolution filters to extract low-frequency and high-frequency information on positive and negative links, respectively, and then combine them into a unified message passing framework. To effectively model signed graphs, we further propose a self-gating mechanism to estimate the impacts of low-frequency and high-frequency information during message passing. We mathematically establish the relationship between the aggregation process in SLGNN and signed Laplacian regularization in signed graphs, and theoretically analyze the expressiveness of SLGNN. Experimental results demonstrate that SLGNN outperforms various competitive baselines and achieves state-of-the-art performance.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yu and Qu, Meng and Tang, Jian and Chang, Yi}, year={2023}, month={Jun.}, pages={4444-4452} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25565/25337", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25565", + "pdf_size": 256567, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=216537346345243191&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.jlu.edu.cn;umontreal.ca;hec.ca;jlu.edu.cn", + "email": "mails.jlu.edu.cn;umontreal.ca;hec.ca;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+3+4+5;2+3+4+5;0+0+1", + "aff_unique_norm": "Jilin University;Engineering Research Center of Knowledge-Driven Human-Machine Intelligence;Mila - Quebec AI Institute;Universit\u00e9 de Montr\u00e9al;HEC Montr\u00e9al;CIFAR", + "aff_unique_dep": "College of Computer Science and Technology;Ministry of Education;AI Institute;;;AI Research", + "aff_unique_url": "http://www.jlu.edu.cn;;https://mila.quebec;https://www.umontreal.ca;https://www.hec.ca;https://www.cifar.ca", + "aff_unique_abbr": "JLU;;Mila;UdeM;HEC;CIFAR", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1+1+1+1;1+1+1+1;0+0+0", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-26677", + "title": "SimFair: A Unified Framework for Fairness-Aware Multi-Label Classification", + "track": "aaai special track", + "status": "Technical", + "abstract": "Recent years have witnessed increasing concerns towards unfair decisions made by machine learning algorithms. To improve fairness in model decisions, various fairness notions have been proposed and many fairness-aware methods are developed. However, most of existing definitions and methods focus only on single-label classification. Fairness for multi-label classification, where each instance is associated with more than one labels, is still yet to establish. To fill this gap, we study fairness-aware multi-label classification in this paper. We start by extending Demographic Parity (DP) and Equalized Opportunity (EOp), two popular fairness notions, to multi-label classification scenarios. Through a systematic study, we show that on multi-label data, because of unevenly distributed labels, EOp usually fails to construct a reliable estimate on labels with few instances. We then propose a new framework named Similarity s-induced Fairness (s\u03b3 -SimFair). This new framework utilizes data that have similar labels when estimating fairness on a particular label group for better stability, and can unify DP and EOp. Theoretical analysis and experimental results on real-world datasets together demonstrate the advantage of s\u03b3 -SimFair over existing methods on multi-label classification tasks.", + "primary_area": "ai for social impact", + "author": "Tianci Liu; Haoyu Wang; Yaqing Wang; Xiaoqian Wang; Lu Su; Jing Gao", + "authorids": "", + "aff": "School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907; School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907; School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907; School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907; School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907; School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA, 47907", + "bibtex": "@article{Liu_Wang_Wang_Wang_Su_Gao_2023, title={SimFair: A Unified Framework for Fairness-Aware Multi-Label Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26677}, DOI={10.1609/aaai.v37i12.26677}, abstractNote={Recent years have witnessed increasing concerns towards unfair decisions made by machine learning algorithms. To improve fairness in model decisions, various fairness notions have been proposed and many fairness-aware methods are developed. However, most of existing definitions and methods focus only on single-label classification. Fairness for multi-label classification, where each instance is associated with more than one labels, is still yet to establish. To fill this gap, we study fairness-aware multi-label classification in this paper. We start by extending Demographic Parity (DP) and Equalized Opportunity (EOp), two popular fairness notions, to multi-label classification scenarios. Through a systematic study, we show that on multi-label data, because of unevenly distributed labels, EOp usually fails to construct a reliable estimate on labels with few instances. We then propose a new framework named Similarity s-induced Fairness (s\u03b3 -SimFair). This new framework utilizes data that have similar labels when estimating fairness on a particular label group for better stability, and can unify DP and EOp. Theoretical analysis and experimental results on real-world datasets together demonstrate the advantage of s\u03b3 -SimFair over existing methods on multi-label classification tasks.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Tianci and Wang, Haoyu and Wang, Yaqing and Wang, Xiaoqian and Su, Lu and Gao, Jing}, year={2023}, month={Jun.}, pages={14338-14346} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26677/26449", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26677", + "pdf_size": 380461, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13953769828484912737&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "purdue.edu;purdue.edu;purdue.edu;purdue.edu;purdue.edu;purdue.edu", + "email": "purdue.edu;purdue.edu;purdue.edu;purdue.edu;purdue.edu;purdue.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Purdue University", + "aff_unique_dep": "School of Electrical and Computer Engineering", + "aff_unique_url": "https://www.purdue.edu", + "aff_unique_abbr": "Purdue", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "West Lafayette", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26731", + "title": "Similarity Distribution Based Membership Inference Attack on Person Re-identification", + "track": "aaai special track", + "status": "Technical", + "abstract": "While person Re-identification (Re-ID) has progressed rapidly due to its wide real-world applications, it also causes severe risks of leaking personal information from training data. Thus, this paper focuses on quantifying this risk by membership inference (MI) attack. Most of the existing MI attack algorithms focus on classification models, while Re-ID follows a totally different training and inference paradigm. Re-ID is a fine-grained recognition task with complex feature embedding, and model outputs commonly used by existing MI like logits and losses are not accessible during inference. Since Re-ID focuses on modelling the relative relationship between image pairs instead of individual semantics, we conduct a formal and empirical analysis which validates that the distribution shift of the inter-sample similarity between training and test set is a critical criterion for Re-ID membership inference. As a result, we propose a novel membership inference attack method based on the inter-sample similarity distribution. Specifically, a set of anchor images are sampled to represent the similarity distribution conditioned on a target image, and a neural network with a novel anchor selection module is proposed to predict the membership of the target image. Our experiments validate the effectiveness of the proposed approach on both the Re-ID task and conventional classification task.", + "primary_area": "safe and robust ai", + "author": "Junyao Gao; Xinyang Jiang; Huishuai Zhang; Yifan Yang; Shuguang Dou; Dongsheng Li; Duoqian Miao; Cheng Deng; Cairong Zhao", + "authorids": "", + "aff": "Tongji University; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Tongji University; Microsoft Research Asia; Tongji University; Xidian University; Tongji University", + "bibtex": "@article{Gao_Jiang_Zhang_Yang_Dou_Li_Miao_Deng_Zhao_2023, title={Similarity Distribution Based Membership Inference Attack on Person Re-identification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26731}, DOI={10.1609/aaai.v37i12.26731}, abstractNote={While person Re-identification (Re-ID) has progressed rapidly due to its wide real-world applications, it also causes severe risks of leaking personal information from training data. Thus, this paper focuses on quantifying this risk by membership inference (MI) attack. Most of the existing MI attack algorithms focus on classification models, while Re-ID follows a totally different training and inference paradigm. Re-ID is a fine-grained recognition task with complex feature embedding, and model outputs commonly used by existing MI like logits and losses are not accessible during inference. Since Re-ID focuses on modelling the relative relationship between image pairs instead of individual semantics, we conduct a formal and empirical analysis which validates that the distribution shift of the inter-sample similarity between training and test set is a critical criterion for Re-ID membership inference. As a result, we propose a novel membership inference attack method based on the inter-sample similarity distribution. Specifically, a set of anchor images are sampled to represent the similarity distribution conditioned on a target image, and a neural network with a novel anchor selection module is proposed to predict the membership of the target image. Our experiments validate the effectiveness of the proposed approach on both the Re-ID task and conventional classification task.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Junyao and Jiang, Xinyang and Zhang, Huishuai and Yang, Yifan and Dou, Shuguang and Li, Dongsheng and Miao, Duoqian and Deng, Cheng and Zhao, Cairong}, year={2023}, month={Jun.}, pages={14820-14828} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26731/26503", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26731", + "pdf_size": 694524, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8843743787793275569&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "mail.tongji.edu.cn; ; ; ; ; ; ; ;mail.tongji.edu.cn", + "email": "mail.tongji.edu.cn; ; ; ; ; ; ; ;mail.tongji.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;0;1;0;2;0", + "aff_unique_norm": "Tongji University;Microsoft Research;Xidian University", + "aff_unique_dep": ";Research;", + "aff_unique_url": "https://www.tongji.edu.cn;https://www.microsoft.com/en-us/research/group/asia;http://www.xidian.edu.cn/", + "aff_unique_abbr": "Tongji;MSR Asia;Xidian", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25199", + "title": "Simple and Effective Synthesis of Indoor 3D Scenes", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of synthesizing immersive 3D indoor scenes from one or a few images. Our aim is to generate high-resolution images and videos from novel viewpoints, including viewpoints that extrapolate far beyond the input images while maintaining 3D consistency. Existing approaches are highly complex, with many separately trained stages and components. We propose a simple alternative: an image-to-image GAN that maps directly from reprojections of incomplete point clouds to full high-resolution RGB-D images. On the Matterport3D and RealEstate10K datasets, our approach significantly outperforms prior work when evaluated by humans, as well as on FID scores. Further, we show that our model is useful for generative data augmentation. A vision-and-language navigation (VLN) agent trained with trajectories spatially-perturbed by our model improves success rate by up to 1.5% over a state of the art baseline on the mature R2R benchmark. Our code will be made available to facilitate generative data augmentation and applications to downstream robotics and embodied AI tasks.", + "primary_area": "computer vision i", + "author": "Jing Yu Koh; Harsh Agrawal; Dhruv Batra; Richard Tucker; Austin Waters; Honglak Lee; Yinfei Yang; Jason Baldridge; Peter Anderson", + "authorids": "", + "aff": "Google Research; Georgia Institute of Technology; Georgia Institute of Technology; Google Research; Google Research; University of Michigan; Apple + Google Research; Google Research; Google Research", + "bibtex": "@article{Koh_Agrawal_Batra_Tucker_Waters_Lee_Yang_Baldridge_Anderson_2023, title={Simple and Effective Synthesis of Indoor 3D Scenes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25199}, DOI={10.1609/aaai.v37i1.25199}, abstractNote={We study the problem of synthesizing immersive 3D indoor scenes from one or a few images. Our aim is to generate high-resolution images and videos from novel viewpoints, including viewpoints that extrapolate far beyond the input images while maintaining 3D consistency. Existing approaches are highly complex, with many separately trained stages and components. We propose a simple alternative: an image-to-image GAN that maps directly from reprojections of incomplete point clouds to full high-resolution RGB-D images. On the Matterport3D and RealEstate10K datasets, our approach significantly outperforms prior work when evaluated by humans, as well as on FID scores. Further, we show that our model is useful for generative data augmentation. A vision-and-language navigation (VLN) agent trained with trajectories spatially-perturbed by our model improves success rate by up to 1.5% over a state of the art baseline on the mature R2R benchmark. Our code will be made available to facilitate generative data augmentation and applications to downstream robotics and embodied AI tasks.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Koh, Jing Yu and Agrawal, Harsh and Batra, Dhruv and Tucker, Richard and Waters, Austin and Lee, Honglak and Yang, Yinfei and Baldridge, Jason and Anderson, Peter}, year={2023}, month={Jun.}, pages={1169-1178} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25199/24971", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25199", + "pdf_size": 5193756, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4628069040955043200&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "cmu.edu;cc.gatech.edu;cc.gatech.edu;google.com;google.com;umich.edu;apple.com;google.com;google.com", + "email": "cmu.edu;cc.gatech.edu;cc.gatech.edu;google.com;google.com;umich.edu;apple.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;0;0;2;3+0;0;0", + "aff_unique_norm": "Google;Georgia Institute of Technology;University of Michigan;Apple Inc.", + "aff_unique_dep": "Google Research;;;", + "aff_unique_url": "https://research.google;https://www.gatech.edu;https://www.umich.edu;https://www.apple.com", + "aff_unique_abbr": "Google Research;Georgia Tech;UM;Apple", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Mountain View;", + "aff_country_unique_index": "0;0;0;0;0;0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26283", + "title": "Simple and Efficient Heterogeneous Graph Neural Network", + "track": "main", + "status": "Technical", + "abstract": "Heterogeneous graph neural networks (HGNNs) have the powerful capability to embed rich structural and semantic information of a heterogeneous graph into node representations. Existing HGNNs inherit many mechanisms from graph neural networks (GNNs) designed for homogeneous graphs, especially the attention mechanism and the multi-layer structure. These mechanisms bring excessive complexity, but seldom work studies whether they are really effective on heterogeneous graphs. In this paper, we conduct an in-depth and detailed study of these mechanisms and propose the Simple and Efficient Heterogeneous Graph Neural Network (SeHGNN). To easily capture structural information, SeHGNN pre-computes the neighbor aggregation using a light-weight mean aggregator, which reduces complexity by removing overused neighbor attention and avoiding repeated neighbor aggregation in every training epoch. To better utilize semantic information, SeHGNN adopts the single-layer structure with long metapaths to extend the receptive field, as well as a transformer-based semantic fusion module to fuse features from different metapaths. As a result, SeHGNN exhibits the characteristics of a simple network structure, high prediction accuracy, and fast training speed. Extensive experiments on five real-world heterogeneous graphs demonstrate the superiority of SeHGNN over the state-of-the-arts on both accuracy and training speed.", + "primary_area": "machine learning iv", + "author": "Xiaocheng Yang; Mingyu Yan; Shirui Pan; Xiaochun Ye; Dongrui Fan", + "authorids": "", + "aff": "State Key Lab of Processors, Institute for Computing Technology, Chinese Academy of Sciences, China; State Key Lab of Processors, Institute for Computing Technology, Chinese Academy of Sciences, China; School of Information and Communication Technology, Griffith University, Australia; State Key Lab of Processors, Institute for Computing Technology, Chinese Academy of Sciences, China; State Key Lab of Processors, Institute for Computing Technology, Chinese Academy of Sciences, China+School of Computer Science and Technology, University of Chinese Academy of Sciences, China", + "bibtex": "@article{Yang_Yan_Pan_Ye_Fan_2023, title={Simple and Efficient Heterogeneous Graph Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26283}, DOI={10.1609/aaai.v37i9.26283}, abstractNote={Heterogeneous graph neural networks (HGNNs) have the powerful capability to embed rich structural and semantic information of a heterogeneous graph into node representations. Existing HGNNs inherit many mechanisms from graph neural networks (GNNs) designed for homogeneous graphs, especially the attention mechanism and the multi-layer structure. These mechanisms bring excessive complexity, but seldom work studies whether they are really effective on heterogeneous graphs. In this paper, we conduct an in-depth and detailed study of these mechanisms and propose the Simple and Efficient Heterogeneous Graph Neural Network (SeHGNN). To easily capture structural information, SeHGNN pre-computes the neighbor aggregation using a light-weight mean aggregator, which reduces complexity by removing overused neighbor attention and avoiding repeated neighbor aggregation in every training epoch. To better utilize semantic information, SeHGNN adopts the single-layer structure with long metapaths to extend the receptive field, as well as a transformer-based semantic fusion module to fuse features from different metapaths. As a result, SeHGNN exhibits the characteristics of a simple network structure, high prediction accuracy, and fast training speed. Extensive experiments on five real-world heterogeneous graphs demonstrate the superiority of SeHGNN over the state-of-the-arts on both accuracy and training speed.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Xiaocheng and Yan, Mingyu and Pan, Shirui and Ye, Xiaochun and Fan, Dongrui}, year={2023}, month={Jun.}, pages={10816-10824} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26283/26055", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26283", + "pdf_size": 449662, + "gs_citation": 188, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16129836209784305770&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 9, + "aff_domain": "ict.ac.cn;ict.ac.cn;griffith.edu.au;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;griffith.edu.au;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0+2", + "aff_unique_norm": "Chinese Academy of Sciences;Griffith University;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute for Computing Technology;School of Information and Communication Technology;School of Computer Science and Technology", + "aff_unique_url": "http://www.ict.ac.cn;https://www.griffith.edu.au;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25820", + "title": "Simulating Network Paths with Recurrent Buffering Units", + "track": "main", + "status": "Technical", + "abstract": "Simulating physical network paths (e.g., Internet) is a cornerstone research problem in the emerging sub-field of AI-for-networking. We seek a model that generates end-to-end packet delay values in response to the time-varying load offered by a sender, which is typically a function of the previously output delays. The problem setting is unique, and renders the state-of-the-art text and time-series generative models inapplicable or ineffective. We formulate an ML problem at the intersection of dynamical systems, sequential decision making, and time-series modeling. We propose a novel grey-box approach to network simulation that embeds the semantics of physical network path in a new RNN-style model called Recurrent Buffering Unit, providing the interpretability of standard network simulator tools, the power of neural models, the efficiency of SGD-based techniques for learning, and yielding promising results on synthetic and real-world network traces.", + "primary_area": "machine learning i", + "author": "Divyam Anshumaan; Sriram Balasubramanian; Shubham Tiwari; Nagarajan Natarajan; Sundararajan Sellamanickam; Venkat N. Padmanabhan", + "authorids": "", + "aff": "Microsoft Research India; Microsoft Research India + University of Maryland, College Park; Microsoft Research India; Microsoft Research India; Microsoft Research India; Microsoft Research India", + "bibtex": "@article{Anshumaan_Balasubramanian_Tiwari_Natarajan_Sellamanickam_Padmanabhan_2023, title={Simulating Network Paths with Recurrent Buffering Units}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25820}, DOI={10.1609/aaai.v37i6.25820}, abstractNote={Simulating physical network paths (e.g., Internet) is a cornerstone research problem in the emerging sub-field of AI-for-networking. We seek a model that generates end-to-end packet delay values in response to the time-varying load offered by a sender, which is typically a function of the previously output delays. The problem setting is unique, and renders the state-of-the-art text and time-series generative models inapplicable or ineffective. We formulate an ML problem at the intersection of dynamical systems, sequential decision making, and time-series modeling. We propose a novel grey-box approach to network simulation that embeds the semantics of physical network path in a new RNN-style model called Recurrent Buffering Unit, providing the interpretability of standard network simulator tools, the power of neural models, the efficiency of SGD-based techniques for learning, and yielding promising results on synthetic and real-world network traces.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Anshumaan, Divyam and Balasubramanian, Sriram and Tiwari, Shubham and Natarajan, Nagarajan and Sellamanickam, Sundararajan and Padmanabhan, Venkat N.}, year={2023}, month={Jun.}, pages={6684-6692} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25820/25592", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25820", + "pdf_size": 1076163, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:1qG-urVTId4J:scholar.google.com/&scioq=Simulating+Network+Paths+with+Recurrent+Buffering+Units&hl=en&as_sdt=0,5", + "gs_version_total": 7, + "aff_domain": "microsoft.com;cs.umd.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;cs.umd.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;0", + "aff_unique_norm": "Microsoft Research;University of Maryland", + "aff_unique_dep": "Microsoft Research India;", + "aff_unique_url": "https://www.microsoft.com/en-us/research/group/microsoft-research-india;https://www/umd.edu", + "aff_unique_abbr": "MSR India;UMD", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";College Park", + "aff_country_unique_index": "0;0+1;0;0;0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "article-26156", + "title": "Simultaneously Updating All Persistence Values in Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In Reinforcement Learning, the performance of learning agents is highly sensitive to the choice of time discretization. Agents acting at high frequencies have the best control opportunities, along with some drawbacks, such as possible inefficient exploration and vanishing of the action advantages. The repetition of the actions, i.e., action persistence, comes into help, as it allows the agent to visit wider regions of the state space and improve the estimation of the action effects. In this work, we derive a novel operator, the All-Persistence Bellman Operator, which allows an effective use of both the low-persistence experience, by decomposition into sub-transition, and the high-persistence experience, thanks to the introduction of a suitable bootstrap procedure. In this way, we employ transitions collected at any time scale to update simultaneously the action values of the considered persistence set. We prove the contraction property of the All-Persistence Bellman Operator and, based on it, we extend classic Q-learning and DQN. After providing a study on the effects of persistence, we experimentally evaluate our approach in both tabular contexts and more challenging frameworks, including some Atari games.", + "primary_area": "machine learning iii", + "author": "Luca Sabbioni; Luca Al Daire; Lorenzo Bisi; Alberto Maria Metelli; Marcello Restelli", + "authorids": "", + "aff": "Politecnico di Milano; Politecnico di Milano; ML cube; Politecnico di Milano; Politecnico di Milano", + "bibtex": "@article{Sabbioni_Al Daire_Bisi_Metelli_Restelli_2023, title={Simultaneously Updating All Persistence Values in Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26156}, DOI={10.1609/aaai.v37i8.26156}, abstractNote={In Reinforcement Learning, the performance of learning agents is highly sensitive to the choice of time discretization. Agents acting at high frequencies have the best control opportunities, along with some drawbacks, such as possible inefficient exploration and vanishing of the action advantages. The repetition of the actions, i.e., action persistence, comes into help, as it allows the agent to visit wider regions of the state space and improve the estimation of the action effects. In this work, we derive a novel operator, the All-Persistence Bellman Operator, which allows an effective use of both the low-persistence experience, by decomposition into sub-transition, and the high-persistence experience, thanks to the introduction of a suitable bootstrap procedure. In this way, we employ transitions collected at any time scale to update simultaneously the action values of the considered persistence set. We prove the contraction property of the All-Persistence Bellman Operator and, based on it, we extend classic Q-learning and DQN. After providing a study on the effects of persistence, we experimentally evaluate our approach in both tabular contexts and more challenging frameworks, including some Atari games.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sabbioni, Luca and Al Daire, Luca and Bisi, Lorenzo and Metelli, Alberto Maria and Restelli, Marcello}, year={2023}, month={Jun.}, pages={9668-9676} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26156/25928", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26156", + "pdf_size": 1004657, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1069563345923151989&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "polimi.it; ; ; ; ", + "email": "polimi.it; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Politecnico di Milano;ML cube", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.polimi.it;", + "aff_unique_abbr": "Polimi;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy;" + }, + { + "id": "article-26952", + "title": "SkateboardAI: The Coolest Video Action Recognition for Skateboarding (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Impressed by the coolest skateboarding sports program from 2021 Tokyo Olympic Games, we are the first to curate the original real-world video datasets \"SkateboardAI\" in the wild, even self-design and implement diverse uni-modal and multi-modal video action recognition approaches to recognize different tricks accurately. For uni-modal methods, we separately apply (1)CNN and LSTM; (2)CNN and BiLSTM; (3)CNN and BiLSTM with effective attention mechanisms; (4)Transformer-based action recognition pipeline. Transferred to the multi-modal conditions, we investigated the two-stream Inflated-3D architecture on \"SkateboardAI\" datasets to compare its performance with uni-modal cases. In sum, our objective is developing an excellent AI sport referee for the coolest skateboarding competitions.", + "primary_area": "", + "author": "Hanxiao Chen", + "authorids": "", + "aff": "Department of Automation, Harbin Institute of Technology, Harbin, China 150001", + "bibtex": "@article{Chen_2024, title={SkateboardAI: The Coolest Video Action Recognition for Skateboarding (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26952}, DOI={10.1609/aaai.v37i13.26952}, abstractNote={Impressed by the coolest skateboarding sports program from 2021 Tokyo Olympic Games, we are the first to curate the original real-world video datasets "SkateboardAI" in the wild, even self-design and implement diverse uni-modal and multi-modal video action recognition approaches to recognize different tricks accurately. For uni-modal methods, we separately apply (1)CNN and LSTM; (2)CNN and BiLSTM; (3)CNN and BiLSTM with effective attention mechanisms; (4)Transformer-based action recognition pipeline. Transferred to the multi-modal conditions, we investigated the two-stream Inflated-3D architecture on "SkateboardAI" datasets to compare its performance with uni-modal cases. In sum, our objective is developing an excellent AI sport referee for the coolest skateboarding competitions.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Hanxiao}, year={2024}, month={Jul.}, pages={16184-16185} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26952/26724", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26952", + "pdf_size": 1603033, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18094016202269066950&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "hit.edu.cn", + "email": "hit.edu.cn", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "Department of Automation", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Harbin", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "article-25392", + "title": "Skating-Mixer: Long-Term Sport Audio-Visual Modeling with MLPs", + "track": "main", + "status": "Technical", + "abstract": "Figure skating scoring is challenging because it requires judging players\u2019 technical moves as well as coordination with the background music. Most learning-based methods struggle for two reasons: 1) each move in figure skating changes quickly, hence simply applying traditional frame sampling will lose a lot of valuable information, especially in 3 to 5 minutes lasting videos; 2) prior methods rarely considered the critical audio-visual relationship in their models. Due to these reasons, we introduce a novel architecture, named Skating-Mixer. It extends the MLP framework into a multimodal fashion and effectively learns long-term representations through our designed memory recurrent unit (MRU). Aside from the model, we collected a high-quality audio-visual FS1000 dataset, which contains over 1000 videos on 8 types of programs with 7 different rating metrics, overtaking other datasets in both quantity and diversity. Experiments show the proposed method achieves SOTAs over all major metrics on the public Fis-V and our FS1000 dataset. In addition, we include an analysis applying our method to the recent competitions in Beijing 2022 Winter Olympic Games, proving our method has strong applicability.", + "primary_area": "computer vision iii", + "author": "Jingfei Xia; Mingchen Zhuge; Tiantian Geng; Shun Fan; Yuantai Wei; Zhenyu He; Feng Zheng", + "authorids": "", + "aff": "Southern University of Science and Technology + The Chinese University of Hong Kong; Southern University of Science and Technology + AI Initiative, King Abdullah University of Science and Technology (KAUST); Southern University of Science and Technology; Southern University of Science and Technology; Southern University of Science and Technology; Harbin Institute of Technology (Shenzhen); Southern University of Science and Technology", + "bibtex": "@article{Xia_Zhuge_Geng_Fan_Wei_He_Zheng_2023, title={Skating-Mixer: Long-Term Sport Audio-Visual Modeling with MLPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25392}, DOI={10.1609/aaai.v37i3.25392}, abstractNote={Figure skating scoring is challenging because it requires judging players\u2019 technical moves as well as coordination with the background music. Most learning-based methods struggle for two reasons: 1) each move in figure skating changes quickly, hence simply applying traditional frame sampling will lose a lot of valuable information, especially in 3 to 5 minutes lasting videos; 2) prior methods rarely considered the critical audio-visual relationship in their models. Due to these reasons, we introduce a novel architecture, named Skating-Mixer. It extends the MLP framework into a multimodal fashion and effectively learns long-term representations through our designed memory recurrent unit (MRU). Aside from the model, we collected a high-quality audio-visual FS1000 dataset, which contains over 1000 videos on 8 types of programs with 7 different rating metrics, overtaking other datasets in both quantity and diversity. Experiments show the proposed method achieves SOTAs over all major metrics on the public Fis-V and our FS1000 dataset. In addition, we include an analysis applying our method to the recent competitions in Beijing 2022 Winter Olympic Games, proving our method has strong applicability.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xia, Jingfei and Zhuge, Mingchen and Geng, Tiantian and Fan, Shun and Wei, Yuantai and He, Zhenyu and Zheng, Feng}, year={2023}, month={Jun.}, pages={2901-2909} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25392/25164", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25392", + "pdf_size": 1447919, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7921752639393171816&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "ie.cuhk.edu.hk;kaust.edu.sa;gmail.com;gmail.com;gmail.com;hit.edu.cn;gmail.com", + "email": "ie.cuhk.edu.hk;kaust.edu.sa;gmail.com;gmail.com;gmail.com;hit.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+2;0;0;0;3;0", + "aff_unique_norm": "Southern University of Science and Technology;The Chinese University of Hong Kong;King Abdullah University of Science and Technology;Harbin Institute of Technology", + "aff_unique_dep": ";;AI Initiative;", + "aff_unique_url": "https://www.sustech.edu.cn;https://www.cuhk.edu.hk;https://www.kaust.edu.sa;http://en.hhit.edu.cn/", + "aff_unique_abbr": "SUSTech;CUHK;KAUST;HIT", + "aff_campus_unique_index": ";;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0+1;0;0;0;0;0", + "aff_country_unique": "China;Saudi Arabia" + }, + { + "id": "article-26598", + "title": "SlideVQA: A Dataset for Document Visual Question Answering on Multiple Images", + "track": "main", + "status": "Technical", + "abstract": "Visual question answering on document images that contain textual, visual, and layout information, called document VQA, has received much attention recently. Although many datasets have been proposed for developing document VQA systems, most of the existing datasets focus on understanding the content relationships within a single image and not across multiple images. In this study, we propose a new multi-image document VQA dataset, SlideVQA, containing 2.6k+ slide decks composed of 52k+ slide images and 14.5k questions about a slide deck. SlideVQA requires complex reasoning, including single-hop, multi-hop, and numerical reasoning, and also provides annotated arithmetic expressions of numerical answers for enhancing the ability of numerical reasoning. Moreover, we developed a new end-to-end document VQA model that treats evidence selection and question answering as a unified sequence-to-sequence format. Experiments on SlideVQA show that our model outperformed existing state-of-the-art QA models, but that it still has a large gap behind human performance. We believe that our dataset will facilitate research on document VQA.", + "primary_area": "speech natural language processing", + "author": "Ryota Tanaka; Kyosuke Nishida; Kosuke Nishida; Taku Hasegawa; Itsumi Saito; Kuniko Saito", + "authorids": "", + "aff": "NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation; NTT Human Informatics Laboratories, NTT Corporation", + "bibtex": "@article{Tanaka_Nishida_Nishida_Hasegawa_Saito_Saito_2023, title={SlideVQA: A Dataset for Document Visual Question Answering on Multiple Images}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26598}, DOI={10.1609/aaai.v37i11.26598}, abstractNote={Visual question answering on document images that contain textual, visual, and layout information, called document VQA, has received much attention recently. Although many datasets have been proposed for developing document VQA systems, most of the existing datasets focus on understanding the content relationships within a single image and not across multiple images. In this study, we propose a new multi-image document VQA dataset, SlideVQA, containing 2.6k+ slide decks composed of 52k+ slide images and 14.5k questions about a slide deck. SlideVQA requires complex reasoning, including single-hop, multi-hop, and numerical reasoning, and also provides annotated arithmetic expressions of numerical answers for enhancing the ability of numerical reasoning. Moreover, we developed a new end-to-end document VQA model that treats evidence selection and question answering as a unified sequence-to-sequence format. Experiments on SlideVQA show that our model outperformed existing state-of-the-art QA models, but that it still has a large gap behind human performance. We believe that our dataset will facilitate research on document VQA.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tanaka, Ryota and Nishida, Kyosuke and Nishida, Kosuke and Hasegawa, Taku and Saito, Itsumi and Saito, Kuniko}, year={2023}, month={Jun.}, pages={13636-13645} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26598/26370", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26598", + "pdf_size": 3689074, + "gs_citation": 79, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2797177540370333707&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "email": "hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp;hco.ntt.co.jp", + "github": "https://github.com/nttmdlab-nlp/SlideVQA", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "NTT Corporation", + "aff_unique_dep": "Human Informatics Laboratories", + "aff_unique_url": "https://www.ntt.co.jp", + "aff_unique_abbr": "NTT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26430", + "title": "Smoothed Online Combinatorial Optimization Using Imperfect Predictions", + "track": "main", + "status": "Technical", + "abstract": "Smoothed online combinatorial optimization considers a learner who repeatedly chooses a combinatorial decision to minimize an unknown changing cost function with a penalty on switching decisions in consecutive rounds. We study smoothed online combinatorial optimization problems when an imperfect predictive model is available, where the model can forecast the future cost functions with uncertainty. We show that using predictions to plan for a finite time horizon leads to regret dependent on the total predictive uncertainty and an additional switching cost. This observation suggests choosing a suitable planning window to balance between uncertainty and switching cost, which leads to an online algorithm with guarantees on the upper and lower bounds of the cumulative regret. Empirically, our algorithm shows a significant improvement in cumulative regret compared to other baselines in synthetic online distributed streaming problems.", + "primary_area": "planning routing and scheduling", + "author": "Kai Wang; Zhao Song; Georgios Theocharous; Sridhar Mahadevan", + "authorids": "", + "aff": "Harvard University; Adobe Research; Adobe Research; Adobe Research", + "bibtex": "@article{Wang_Song_Theocharous_Mahadevan_2023, title={Smoothed Online Combinatorial Optimization Using Imperfect Predictions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26430}, DOI={10.1609/aaai.v37i10.26430}, abstractNote={Smoothed online combinatorial optimization considers a learner who repeatedly chooses a combinatorial decision to minimize an unknown changing cost function with a penalty on switching decisions in consecutive rounds. We study smoothed online combinatorial optimization problems when an imperfect predictive model is available, where the model can forecast the future cost functions with uncertainty. We show that using predictions to plan for a finite time horizon leads to regret dependent on the total predictive uncertainty and an additional switching cost. This observation suggests choosing a suitable planning window to balance between uncertainty and switching cost, which leads to an online algorithm with guarantees on the upper and lower bounds of the cumulative regret. Empirically, our algorithm shows a significant improvement in cumulative regret compared to other baselines in synthetic online distributed streaming problems.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Kai and Song, Zhao and Theocharous, Georgios and Mahadevan, Sridhar}, year={2023}, month={Jun.}, pages={12130-12137} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26430/26202", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26430", + "pdf_size": 413819, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11515109404130322829&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "g.harvard.edu;adobe.com;adobe.com;adobe.com", + "email": "g.harvard.edu;adobe.com;adobe.com;adobe.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Harvard University;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www.harvard.edu;https://research.adobe.com", + "aff_unique_abbr": "Harvard;Adobe", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26054", + "title": "Social Bias Meets Data Bias: The Impacts of Labeling and Measurement Errors on Fairness Criteria", + "track": "main", + "status": "Technical", + "abstract": "Although many fairness criteria have been proposed to ensure that machine learning algorithms do not exhibit or amplify our existing social biases, these algorithms are trained on datasets that can themselves be statistically biased. In this paper, we investigate the robustness of existing (demographic) fairness criteria when the algorithm is trained on biased data. We consider two forms of dataset bias: errors by prior decision makers in the labeling process, and errors in the measurement of the features of disadvantaged individuals. We analytically show that some constraints (such as Demographic Parity) can remain robust when facing certain statistical biases, while others (such as Equalized Odds) are significantly violated if trained on biased data. We provide numerical experiments based on three real-world datasets (the FICO, Adult, and German credit score datasets) supporting our analytical findings. While fairness criteria are primarily chosen under normative considerations in practice, our results show that naively applying a fairness constraint can lead to not only a loss in utility for the decision maker, but more severe unfairness when data bias exists. Thus, understanding how fairness criteria react to different forms of data bias presents a critical guideline for choosing among existing fairness criteria, or for proposing new criteria, when available datasets may be biased.", + "primary_area": "machine learning ii", + "author": "Yiqiao Liao; Parinaz Naghizadeh", + "authorids": "", + "aff": "Department of Computer Science and Engineering, The Ohio State University; Department of Electrical and Computer Engineering, The Ohio State University", + "bibtex": "@article{Liao_Naghizadeh_2023, title={Social Bias Meets Data Bias: The Impacts of Labeling and Measurement Errors on Fairness Criteria}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26054}, DOI={10.1609/aaai.v37i7.26054}, abstractNote={Although many fairness criteria have been proposed to ensure that machine learning algorithms do not exhibit or amplify our existing social biases, these algorithms are trained on datasets that can themselves be statistically biased. In this paper, we investigate the robustness of existing (demographic) fairness criteria when the algorithm is trained on biased data. We consider two forms of dataset bias: errors by prior decision makers in the labeling process, and errors in the measurement of the features of disadvantaged individuals. We analytically show that some constraints (such as Demographic Parity) can remain robust when facing certain statistical biases, while others (such as Equalized Odds) are significantly violated if trained on biased data. We provide numerical experiments based on three real-world datasets (the FICO, Adult, and German credit score datasets) supporting our analytical findings. While fairness criteria are primarily chosen under normative considerations in practice, our results show that naively applying a fairness constraint can lead to not only a loss in utility for the decision maker, but more severe unfairness when data bias exists. Thus, understanding how fairness criteria react to different forms of data bias presents a critical guideline for choosing among existing fairness criteria, or for proposing new criteria, when available datasets may be biased.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liao, Yiqiao and Naghizadeh, Parinaz}, year={2023}, month={Jun.}, pages={8764-8772} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26054/25826", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26054", + "pdf_size": 654851, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=221511595679235800&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff_domain": "osu.edu;osu.edu", + "email": "osu.edu;osu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Ohio State University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.osu.edu", + "aff_unique_abbr": "OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26940", + "title": "Social Intelligence towards Human-AI Teambuilding (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "As Artificial Intelligence (AI) continues to develop, it becomes vital to understand more of the nuances of Human-AI interactions. This study aims to uncover how developers can design AI to feel more human in a work environment where only written feedback is possible. Participants will identify a location from Google Maps. To do this successfully, participants must rely on the answers provided by their teammates, one AI and one human. The experiment will run a 2x4 de-sign where AI's responses will either be designed in a human style (high humanness) or state a one-word answer (low humanness), the latter of which is more typical in machines and AI. The reliability of the AI will either be 60% or 90%, and the human will be 30%. Participants will be given a series of questionnaires to rate their opinions of the AI and rate feelings of trust, confidence and performance throughout the study. Following this study, the aim is to identify specific design elements that allow AI to feel human and successfully appear to have social intelligence in more interactive settings.", + "primary_area": "", + "author": "Morgan E Bailey; Frank E Pollick", + "authorids": "", + "aff": "University of Glasgow, University Avenue, Glasgow; University of Glasgow, University Avenue, Glasgow", + "bibtex": "@article{Bailey_Pollick_2024, title={Social Intelligence towards Human-AI Teambuilding (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26940}, DOI={10.1609/aaai.v37i13.26940}, abstractNote={As Artificial Intelligence (AI) continues to develop, it becomes vital to understand more of the nuances of Human-AI interactions. This study aims to uncover how developers can design AI to feel more human in a work environment where only written feedback is possible. Participants will identify a location from Google Maps. To do this successfully, participants must rely on the answers provided by their teammates, one AI and one human. The experiment will run a 2x4 de-sign where AI\u2019s responses will either be designed in a human style (high humanness) or state a one-word answer (low humanness), the latter of which is more typical in machines and AI. The reliability of the AI will either be 60% or 90%, and the human will be 30%. Participants will be given a series of questionnaires to rate their opinions of the AI and rate feelings of trust, confidence and performance throughout the study. Following this study, the aim is to identify specific design elements that allow AI to feel human and successfully appear to have social intelligence in more interactive settings.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bailey, Morgan E and Pollick, Frank E}, year={2024}, month={Jul.}, pages={16160-16161} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26940/26712", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26940", + "pdf_size": 189606, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:2jvLFvENY_8J:scholar.google.com/&scioq=Social+Intelligence+towards+Human-AI+Teambuilding+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "research.gla.ac.uk;glasgow.ac.uk", + "email": "research.gla.ac.uk;glasgow.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Glasgow", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gla.ac.uk", + "aff_unique_abbr": "UoG", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Glasgow", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25151", + "title": "Social Relation Reasoning Based on Triangular Constraints", + "track": "main", + "status": "Technical", + "abstract": "Social networks are essentially in a graph structure where persons act as nodes and the edges connecting nodes denote social relations. The prediction of social relations, therefore, relies on the context in graphs to model the higher-order constraints among relations, which has not been exploited sufficiently by previous works, however. In this paper, we formulate the paradigm of the higher-order constraints in social relations into triangular relational closed-loop structures, i.e., triangular constraints, and further introduce the triangular reasoning graph attention network (TRGAT). Our TRGAT employs the attention mechanism to aggregate features with triangular constraints in the graph, thereby exploiting the higher-order context to reason social relations iteratively. Besides, to acquire better feature representations of persons, we introduce node contrastive learning into relation reasoning. Experimental results show that our method outperforms existing approaches significantly, with higher accuracy and better consistency in generating social relation graphs.", + "primary_area": "computer vision i", + "author": "Yunfei Guo; Fei Yin; Wei Feng; Xudong Yan; Tao Xue; Shuqi Mei; Cheng-Lin Liu", + "authorids": "", + "aff": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation of Chinese Academy of Sciences, Beijing 100190, China + School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China + CAS Center for Excellence of Brain Science and Intelligence Technology, Beijing 100190, China; National Laboratory of Pattern Recognition (NLPR), Institute of Automation of Chinese Academy of Sciences, Beijing 100190, China + School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China + CAS Center for Excellence of Brain Science and Intelligence Technology, Beijing 100190, China; National Laboratory of Pattern Recognition (NLPR), Institute of Automation of Chinese Academy of Sciences, Beijing 100190, China + School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China + CAS Center for Excellence of Brain Science and Intelligence Technology, Beijing 100190, China; T Lab, Tencent Map, Tencent Technology (Beijing) Co., Ltd., Beijing 100193, China; T Lab, Tencent Map, Tencent Technology (Beijing) Co., Ltd., Beijing 100193, China; T Lab, Tencent Map, Tencent Technology (Beijing) Co., Ltd., Beijing 100193, China; National Laboratory of Pattern Recognition (NLPR), Institute of Automation of Chinese Academy of Sciences, Beijing 100190, China + School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100049, China + CAS Center for Excellence of Brain Science and Intelligence Technology, Beijing 100190, China", + "bibtex": "@article{Guo_Yin_Feng_Yan_Xue_Mei_Liu_2023, title={Social Relation Reasoning Based on Triangular Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25151}, DOI={10.1609/aaai.v37i1.25151}, abstractNote={Social networks are essentially in a graph structure where persons act as nodes and the edges connecting nodes denote social relations. The prediction of social relations, therefore, relies on the context in graphs to model the higher-order constraints among relations, which has not been exploited sufficiently by previous works, however. In this paper, we formulate the paradigm of the higher-order constraints in social relations into triangular relational closed-loop structures, i.e., triangular constraints, and further introduce the triangular reasoning graph attention network (TRGAT). Our TRGAT employs the attention mechanism to aggregate features with triangular constraints in the graph, thereby exploiting the higher-order context to reason social relations iteratively. Besides, to acquire better feature representations of persons, we introduce node contrastive learning into relation reasoning. Experimental results show that our method outperforms existing approaches significantly, with higher accuracy and better consistency in generating social relation graphs.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Yunfei and Yin, Fei and Feng, Wei and Yan, Xudong and Xue, Tao and Mei, Shuqi and Liu, Cheng-Lin}, year={2023}, month={Jun.}, pages={737-745} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25151/24923", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25151", + "pdf_size": 1000710, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8305557301380438225&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;tencent.com;tencent.com;tencent.com;nlpr.ia.ac.cn", + "email": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;tencent.com;tencent.com;tencent.com;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;3;3;3;0+1+2", + "aff_unique_norm": "National Laboratory of Pattern Recognition;University of Chinese Academy of Sciences;Chinese Academy of Sciences;Tencent Technology (Beijing) Co., Ltd.", + "aff_unique_dep": "Institute of Automation of Chinese Academy of Sciences;School of Artificial Intelligence;Center for Excellence of Brain Science and Intelligence Technology;T Lab", + "aff_unique_url": ";http://www.ucas.ac.cn;http://www.cas.cn;https://www.tencent.com", + "aff_unique_abbr": "NLPR;UCAS;CAS;Tencent", + "aff_campus_unique_index": "0+0+0;0+0+0;0+0+0;0;0;0;0+0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26375", + "title": "Socially Optimal Non-discriminatory Restrictions for Continuous-Action Games", + "track": "main", + "status": "Technical", + "abstract": "We address the following mechanism design problem: Given a multi-player Normal-Form Game (NFG) with a continuous action space, find a non-discriminatory (i.e., identical for all players) restriction of the action space which maximizes the resulting Nash Equilibrium with respect to a fixed social utility function. First, we propose a formal model of a Restricted Game and the corresponding restriction optimization problem. We then present an algorithm to find optimal non-discriminatory restrictions under some assumptions. Our experimental results with Braess' Paradox and the Cournot Game show that this method leads to an optimized social utility of the Nash Equilibria, even when the assumptions are not guaranteed to hold. Finally, we outline a generalization of our approach to the much wider scope of Stochastic Games.", + "primary_area": "multiagent systems", + "author": "Michael Oesterle; Guni Sharon", + "authorids": "", + "aff": "Institute for Enterprise Systems, University of Mannheim, Germany; Department of Computer Science & Engineering, Texas A&M University", + "bibtex": "@article{Oesterle_Sharon_2023, title={Socially Optimal Non-discriminatory Restrictions for Continuous-Action Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26375}, DOI={10.1609/aaai.v37i10.26375}, abstractNote={We address the following mechanism design problem: Given a multi-player Normal-Form Game (NFG) with a continuous action space, find a non-discriminatory (i.e., identical for all players) restriction of the action space which maximizes the resulting Nash Equilibrium with respect to a fixed social utility function. First, we propose a formal model of a Restricted Game and the corresponding restriction optimization problem. We then present an algorithm to find optimal non-discriminatory restrictions under some assumptions. Our experimental results with Braess\u2019 Paradox and the Cournot Game show that this method leads to an optimized social utility of the Nash Equilibria, even when the assumptions are not guaranteed to hold. Finally, we outline a generalization of our approach to the much wider scope of Stochastic Games.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Oesterle, Michael and Sharon, Guni}, year={2023}, month={Jun.}, pages={11638-11646} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26375/26147", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26375", + "pdf_size": 285393, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14769593962674765279&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "uni-mannheim.de;tamu.edu", + "email": "uni-mannheim.de;tamu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Mannheim;Texas A&M University", + "aff_unique_dep": "Institute for Enterprise Systems;Department of Computer Science & Engineering", + "aff_unique_url": "https://www.uni-mannheim.de;https://www.tamu.edu", + "aff_unique_abbr": ";TAMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Germany;United States" + }, + { + "id": "article-25850", + "title": "Soft Action Priors: Towards Robust Policy Transfer", + "track": "main", + "status": "Technical", + "abstract": "Despite success in many challenging problems, reinforcement learning (RL) is still confronted with sample inefficiency, which can be mitigated by introducing prior knowledge to agents. However, many transfer techniques in reinforcement learning make the limiting assumption that the teacher is an expert. In this paper, we use the action prior from the Reinforcement Learning as Inference framework - that is, a distribution over actions at each state which resembles a teacher policy, rather than a Bayesian prior - to recover state-of-the-art policy distillation techniques. Then, we propose a class of adaptive methods that can robustly exploit action priors by combining reward shaping and auxiliary regularization losses. In contrast to prior work, we develop algorithms for leveraging suboptimal action priors that may nevertheless impart valuable knowledge - which we call soft action priors. The proposed algorithms adapt by adjusting the strength of teacher feedback according to an estimate of the teacher's usefulness in each state. We perform tabular experiments, which show that the proposed methods achieve state-of-the-art performance, surpassing it when learning from suboptimal priors. Finally, we demonstrate the robustness of the adaptive algorithms in continuous action deep RL problems, in which adaptive algorithms considerably improved stability when compared to existing policy distillation methods.", + "primary_area": "machine learning i", + "author": "Matheus Centa; Philippe Preux", + "authorids": "", + "aff": "Univ. Lille, CNRS, UMR 9189 \u2013 CRIStAL, F-59000 Lille, France+Inria+Centrale Lille; Univ. Lille, CNRS, UMR 9189 \u2013 CRIStAL, F-59000 Lille, France+Inria+Centrale Lille", + "bibtex": "@article{Centa_Preux_2023, title={Soft Action Priors: Towards Robust Policy Transfer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25850}, DOI={10.1609/aaai.v37i6.25850}, abstractNote={Despite success in many challenging problems, reinforcement learning (RL) is still confronted with sample inefficiency, which can be mitigated by introducing prior knowledge to agents. However, many transfer techniques in reinforcement learning make the limiting assumption that the teacher is an expert. In this paper, we use the action prior from the Reinforcement Learning as Inference framework - that is, a distribution over actions at each state which resembles a teacher policy, rather than a Bayesian prior - to recover state-of-the-art policy distillation techniques. Then, we propose a class of adaptive methods that can robustly exploit action priors by combining reward shaping and auxiliary regularization losses. In contrast to prior work, we develop algorithms for leveraging suboptimal action priors that may nevertheless impart valuable knowledge - which we call soft action priors. The proposed algorithms adapt by adjusting the strength of teacher feedback according to an estimate of the teacher\u2019s usefulness in each state. We perform tabular experiments, which show that the proposed methods achieve state-of-the-art performance, surpassing it when learning from suboptimal priors. Finally, we demonstrate the robustness of the adaptive algorithms in continuous action deep RL problems, in which adaptive algorithms considerably improved stability when compared to existing policy distillation methods.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Centa, Matheus and Preux, Philippe}, year={2023}, month={Jun.}, pages={6953-6961} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25850/25622", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25850", + "pdf_size": 2400187, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=866704703788771274&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "inria.fr;inria.fr", + "email": "inria.fr;inria.fr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1+2;0+1+2", + "aff_unique_norm": "University of Lille;Inria;Centrale Lille", + "aff_unique_dep": "CRIStAL (UMR 9189);;", + "aff_unique_url": "https://www.univ-lille.fr;https://www.inria.fr;https://www.centrale-lille.org", + "aff_unique_abbr": "Univ. Lille;Inria;CL", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Lille;", + "aff_country_unique_index": "0+0+0;0+0+0", + "aff_country_unique": "France" + }, + { + "id": "article-25544", + "title": "Soft Target-Enhanced Matching Framework for Deep Entity Matching", + "track": "main", + "status": "Technical", + "abstract": "Deep Entity Matching (EM) is one of the core research topics in data integration. Typical existing works construct EM models by training deep neural networks (DNNs) based on the training samples with onehot labels. However, these sharp supervision signals of onehot labels harm the generalization of EM models, causing them to overfit the training samples and perform badly in unseen datasets. To solve this problem, we first propose that the challenge of training a well-generalized EM model lies in achieving the compromise between fitting the training samples and imposing regularization, i.e., the bias-variance tradeoff. Then, we propose a novel Soft Target-EnhAnced Matching (Steam) framework, which exploits the automatically generated soft targets as label-wise regularizers to constrain the model training. Specifically, Steam regards the EM model trained in previous iteration as a virtual teacher and takes its softened output as the extra regularizer to train the EM model in the current iteration. As such, Steam effectively calibrates the obtained EM model, achieving the bias-variance tradeoff without any additional computational cost. We conduct extensive experiments over open datasets and the results show that our proposed Steam outperforms the state-of-the-art EM approaches in terms of effectiveness and label efficiency.", + "primary_area": "data mining and knowledge management", + "author": "Wenzhou Dou; Derong Shen; Xiangmin Zhou; Tiezheng Nie; Yue Kou; Hang Cui; Ge Yu", + "authorids": "", + "aff": "Northeastern University; Northeastern University; RMIT University; Northeastern University; Northeastern University; University of Illinois at Urbana-Champaign; Northeastern University", + "bibtex": "@article{Dou_Shen_Zhou_Nie_Kou_Cui_Yu_2023, title={Soft Target-Enhanced Matching Framework for Deep Entity Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25544}, DOI={10.1609/aaai.v37i4.25544}, abstractNote={Deep Entity Matching (EM) is one of the core research topics in data integration. Typical existing works construct EM models by training deep neural networks (DNNs) based on the training samples with onehot labels. However, these sharp supervision signals of onehot labels harm the generalization of EM models, causing them to overfit the training samples and perform badly in unseen datasets. To solve this problem, we first propose that the challenge of training a well-generalized EM model lies in achieving the compromise between fitting the training samples and imposing regularization, i.e., the bias-variance tradeoff. Then, we propose a novel Soft Target-EnhAnced Matching (Steam) framework, which exploits the automatically generated soft targets as label-wise regularizers to constrain the model training. Specifically, Steam regards the EM model trained in previous iteration as a virtual teacher and takes its softened output as the extra regularizer to train the EM model in the current iteration. As such, Steam effectively calibrates the obtained EM model, achieving the bias-variance tradeoff without any additional computational cost. We conduct extensive experiments over open datasets and the results show that our proposed Steam outperforms the state-of-the-art EM approaches in terms of effectiveness and label efficiency.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dou, Wenzhou and Shen, Derong and Zhou, Xiangmin and Nie, Tiezheng and Kou, Yue and Cui, Hang and Yu, Ge}, year={2023}, month={Jun.}, pages={4259-4266} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25544/25316", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25544", + "pdf_size": 464426, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5102891369543952009&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;cse.neu.edu.cn;rmit.edu.au;cse.neu.edu.cn;cse.neu.edu.cn;illinois.edu;cse.neu.edu.cn", + "email": "gmail.com;cse.neu.edu.cn;rmit.edu.au;cse.neu.edu.cn;cse.neu.edu.cn;illinois.edu;cse.neu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;2;0", + "aff_unique_norm": "Northeastern University;RMIT University;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.northeastern.edu;https://www.rmit.edu.au;https://illinois.edu", + "aff_unique_abbr": "NEU;RMIT;UIUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;0;1;0;0;0;0", + "aff_country_unique": "United States;Australia" + }, + { + "id": "article-26531", + "title": "SoftCorrect: Error Correction with Soft Detection for Automatic Speech Recognition", + "track": "main", + "status": "Technical", + "abstract": "Error correction in automatic speech recognition (ASR) aims to correct those incorrect words in sentences generated by ASR models. Since recent ASR models usually have low word error rate (WER), to avoid affecting originally correct tokens, error correction models should only modify incorrect words, and therefore detecting incorrect words is important for error correction. Previous works on error correction either implicitly detect error words through target-source attention or CTC (connectionist temporal classification) loss, or explicitly locate specific deletion/substitution/insertion errors. However, implicit error detection does not provide clear signal about which tokens are incorrect and explicit error detection suffers from low detection accuracy. In this paper, we propose SoftCorrect with a soft error detection mechanism to avoid the limitations of both explicit and implicit error detection. Specifically, we first detect whether a token is correct or not through a probability produced by a dedicatedly designed language model, and then design a constrained CTC loss that only duplicates the detected incorrect tokens to let the decoder focus on the correction of error tokens. Compared with implicit error detection with CTC loss, SoftCorrect provides explicit signal about which words are incorrect and thus does not need to duplicate every token but only incorrect tokens; compared with explicit error detection, SoftCorrect does not detect specific deletion/substitution/insertion errors but just leaves it to CTC loss. Experiments on AISHELL-1 and Aidatatang datasets show that SoftCorrect achieves 26.1% and 9.4% CER reduction respectively, outperforming previous works by a large margin, while still enjoying fast speed of parallel generation.", + "primary_area": "speech natural language processing", + "author": "Yichong Leng; Xu Tan; Wenjie Liu; Kaitao Song; Rui Wang; Xiang-Yang Li; Tao Qin; Ed Lin; Tie-Yan Liu", + "authorids": "", + "aff": "University of Science and Technology of China; Microsoft Research Asia; Microsoft Azure Speech; Microsoft Research Asia; Microsoft Research Asia; University of Science and Technology of China; Microsoft Research Asia; Microsoft Azure Speech; Microsoft Research Asia", + "bibtex": "@article{Leng_Tan_Liu_Song_Wang_Li_Qin_Lin_Liu_2023, title={SoftCorrect: Error Correction with Soft Detection for Automatic Speech Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26531}, DOI={10.1609/aaai.v37i11.26531}, abstractNote={Error correction in automatic speech recognition (ASR) aims to correct those incorrect words in sentences generated by ASR models. Since recent ASR models usually have low word error rate (WER), to avoid affecting originally correct tokens, error correction models should only modify incorrect words, and therefore detecting incorrect words is important for error correction. Previous works on error correction either implicitly detect error words through target-source attention or CTC (connectionist temporal classification) loss, or explicitly locate specific deletion/substitution/insertion errors. However, implicit error detection does not provide clear signal about which tokens are incorrect and explicit error detection suffers from low detection accuracy. In this paper, we propose SoftCorrect with a soft error detection mechanism to avoid the limitations of both explicit and implicit error detection. Specifically, we first detect whether a token is correct or not through a probability produced by a dedicatedly designed language model, and then design a constrained CTC loss that only duplicates the detected incorrect tokens to let the decoder focus on the correction of error tokens. Compared with implicit error detection with CTC loss, SoftCorrect provides explicit signal about which words are incorrect and thus does not need to duplicate every token but only incorrect tokens; compared with explicit error detection, SoftCorrect does not detect specific deletion/substitution/insertion errors but just leaves it to CTC loss. Experiments on AISHELL-1 and Aidatatang datasets show that SoftCorrect achieves 26.1% and 9.4% CER reduction respectively, outperforming previous works by a large margin, while still enjoying fast speed of parallel generation.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Leng, Yichong and Tan, Xu and Liu, Wenjie and Song, Kaitao and Wang, Rui and Li, Xiang-Yang and Qin, Tao and Lin, Ed and Liu, Tie-Yan}, year={2023}, month={Jun.}, pages={13034-13042} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26531/26303", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26531", + "pdf_size": 380986, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15851940939178161294&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "email": "mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2;1;1;0;1;2;1", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research;Microsoft Corporation", + "aff_unique_dep": ";Research;Azure Speech", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.microsoft.com", + "aff_unique_abbr": "USTC;MSR Asia;Microsoft", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;1;0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26858", + "title": "SolderNet: Towards Trustworthy Visual Inspection of Solder Joints in Electronics Manufacturing Using Explainable Artificial Intelligence", + "track": "iaai technical track", + "status": "Technical", + "abstract": "In electronics manufacturing, solder joint defects are a common problem affecting a variety of printed circuit board components. To identify and correct solder joint defects, the solder joints on a circuit board are typically inspected manually by trained human inspectors, which is a very time-consuming and error-prone process. To improve both inspection efficiency and accuracy, in this work we describe an explainable deep learning-based visual quality inspection system tailored for visual inspection of solder joints in electronics manufacturing environments. At the core of this system is an explainable solder joint defect identification system called SolderNet which we design and implement with trust and transparency in mind. While several challenges remain before the full system can be developed and deployed, this study presents important progress towards trustworthy visual inspection of solder joints in electronics manufacturing.", + "primary_area": "emerging applications of ai", + "author": "Hayden Gunraj; Paul Guerrier; Sheldon Fernandez; Alexander Wong", + "authorids": "", + "aff": "Vision and Image Processing Research Group, University of Waterloo; Moog Inc., New York, USA; DarwinAI Corp., Waterloo, Canada; Vision and Image Processing Research Group, University of Waterloo + DarwinAI Corp., Waterloo, Canada", + "bibtex": "@article{Gunraj_Guerrier_Fernandez_Wong_2024, title={SolderNet: Towards Trustworthy Visual Inspection of Solder Joints in Electronics Manufacturing Using Explainable Artificial Intelligence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26858}, DOI={10.1609/aaai.v37i13.26858}, abstractNote={In electronics manufacturing, solder joint defects are a common problem affecting a variety of printed circuit board components. To identify and correct solder joint defects, the solder joints on a circuit board are typically inspected manually by trained human inspectors, which is a very time-consuming and error-prone process. To improve both inspection efficiency and accuracy, in this work we describe an explainable deep learning-based visual quality inspection system tailored for visual inspection of solder joints in electronics manufacturing environments. At the core of this system is an explainable solder joint defect identification system called SolderNet which we design and implement with trust and transparency in mind. While several challenges remain before the full system can be developed and deployed, this study presents important progress towards trustworthy visual inspection of solder joints in electronics manufacturing.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gunraj, Hayden and Guerrier, Paul and Fernandez, Sheldon and Wong, Alexander}, year={2024}, month={Jul.}, pages={15668-15674} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26858/26630", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26858", + "pdf_size": 6402523, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3571520303332135801&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "uwaterloo.ca; ; ;uwaterloo.ca", + "email": "uwaterloo.ca; ; ;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0+2", + "aff_unique_norm": "University of Waterloo;Moog Inc.;DarwinAI Corp.", + "aff_unique_dep": "Vision and Image Processing Research Group;;", + "aff_unique_url": "https://uwaterloo.ca;https://www.mooginc.com;https://www.darwinai.com", + "aff_unique_abbr": ";;DarwinAI", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Waterloo", + "aff_country_unique_index": "0;1;0;0+0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "article-25514", + "title": "Solving Explainability Queries with Quantification: The Case of Feature Relevancy", + "track": "main", + "status": "Technical", + "abstract": "Trustable explanations of machine learning (ML) models are vital in\nhigh-risk uses of artificial intelligence (AI). Apart from the\ncomputation of trustable explanations, a number of explainability\nqueries have been identified and studied in recent work. Some of these\nqueries involve solving quantification problems, either in\npropositional or in more expressive logics. This paper investigates\none of these quantification problems, namely the feature relevancy\nproblem (FRP), i.e.\\ to decide whether a (possibly sensitive) feature\ncan occur in some explanation of a prediction. In contrast with\nearlier work, that studied FRP for specific classifiers, this paper\nproposes a novel algorithm for the \\fprob quantification problem which\nis applicable to any ML classifier that meets minor requirements.\nFurthermore, the paper shows that the novel algorithm is efficient\nin practice. The experimental results, obtained using random forests\n(RFs) induced from well-known publicly available datasets,\ndemonstrate that the proposed solution outperforms existing\nstate-of-the-art solvers for Quantified Boolean Formulas (QBF) by\norders of magnitude. Finally, the paper also identifies a novel family\nof formulas that are challenging for currently state-of-the-art QBF\nsolvers.", + "primary_area": "constraint satisfaction and optimization", + "author": "Xuanxiang Huang; Yacine Izza; Joao Marques-Silva", + "authorids": "", + "aff": "IRIT, University of Toulouse, France; IRIT, University of Toulouse, France + CREATE, National University of Singapore, Singapore; IRIT, CNRS, Toulouse, France", + "bibtex": "@article{Huang_Izza_Marques-Silva_2023, title={Solving Explainability Queries with Quantification: The Case of Feature Relevancy}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25514}, DOI={10.1609/aaai.v37i4.25514}, abstractNote={Trustable explanations of machine learning (ML) models are vital in\nhigh-risk uses of artificial intelligence (AI). Apart from the\ncomputation of trustable explanations, a number of explainability\nqueries have been identified and studied in recent work. Some of these\nqueries involve solving quantification problems, either in\npropositional or in more expressive logics. This paper investigates\none of these quantification problems, namely the feature relevancy\nproblem (FRP), i.e.\\ to decide whether a (possibly sensitive) feature\ncan occur in some explanation of a prediction. In contrast with\nearlier work, that studied FRP for specific classifiers, this paper\nproposes a novel algorithm for the \\fprob quantification problem which\nis applicable to any ML classifier that meets minor requirements.\nFurthermore, the paper shows that the novel algorithm is efficient\nin practice. The experimental results, obtained using random forests\n(RFs) induced from well-known publicly available datasets,\ndemonstrate that the proposed solution outperforms existing\nstate-of-the-art solvers for Quantified Boolean Formulas (QBF) by\norders of magnitude. Finally, the paper also identifies a novel family\nof formulas that are challenging for currently state-of-the-art QBF\nsolvers.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Xuanxiang and Izza, Yacine and Marques-Silva, Joao}, year={2023}, month={Jun.}, pages={3996-4006} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25514/25286", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25514", + "pdf_size": 171593, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8266968943953538412&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "univ-toulouse.fr;com.nus.edu.sg;irit.fr", + "email": "univ-toulouse.fr;com.nus.edu.sg;irit.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "University of Toulouse;National University of Singapore;Institut de Recherche en Informatique de Toulouse", + "aff_unique_dep": "IRIT;;", + "aff_unique_url": "https://www.univ-toulouse.fr;https://www.nus.edu.sg;https://www.irit.fr", + "aff_unique_abbr": "UT;NUS;IRIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Toulouse;", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "France;Singapore" + }, + { + "id": "article-26369", + "title": "Solving Large-Scale Pursuit-Evasion Games Using Pre-trained Strategies", + "track": "main", + "status": "Technical", + "abstract": "Pursuit-evasion games on graphs model the coordination of police forces chasing a fleeing felon in real-world urban settings, using the standard framework of imperfect-information extensive-form games (EFGs). In recent years, solving EFGs has been largely dominated by the Policy-Space Response Oracle (PSRO) methods due to their modularity, scalability, and favorable convergence properties. However, even these methods quickly reach their limits when facing large combinatorial strategy spaces of the pursuit-evasion games. To improve their efficiency, we integrate the pre-training and fine-tuning paradigm into the core module of PSRO -- the repeated computation of the best response. First, we pre-train the pursuer's policy base model against many different strategies of the evader. Then we proceed with the PSRO loop and fine-tune the pre-trained policy to attain the pursuer's best responses. The empirical evaluation shows that our approach significantly outperforms the baselines in terms of speed and scalability, and can solve even games on street maps of megalopolises with tens of thousands of crossroads -- a scale beyond the effective reach of previous methods.", + "primary_area": "multiagent systems", + "author": "Shuxin Li; Xinrun Wang; Youzhi Zhang; Wanqi Xue; Jakub \u010cern\u00fd; Bo An", + "authorids": "", + "aff": "School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore", + "bibtex": "@article{Li_Wang_Zhang_Xue_\u010cern\u00fd_An_2023, title={Solving Large-Scale Pursuit-Evasion Games Using Pre-trained Strategies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26369}, DOI={10.1609/aaai.v37i10.26369}, abstractNote={Pursuit-evasion games on graphs model the coordination of police forces chasing a fleeing felon in real-world urban settings, using the standard framework of imperfect-information extensive-form games (EFGs). In recent years, solving EFGs has been largely dominated by the Policy-Space Response Oracle (PSRO) methods due to their modularity, scalability, and favorable convergence properties. However, even these methods quickly reach their limits when facing large combinatorial strategy spaces of the pursuit-evasion games. To improve their efficiency, we integrate the pre-training and fine-tuning paradigm into the core module of PSRO -- the repeated computation of the best response. First, we pre-train the pursuer\u2019s policy base model against many different strategies of the evader. Then we proceed with the PSRO loop and fine-tune the pre-trained policy to attain the pursuer\u2019s best responses. The empirical evaluation shows that our approach significantly outperforms the baselines in terms of speed and scalability, and can solve even games on street maps of megalopolises with tens of thousands of crossroads -- a scale beyond the effective reach of previous methods.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shuxin and Wang, Xinrun and Zhang, Youzhi and Xue, Wanqi and \u010cern\u00fd, Jakub and An, Bo}, year={2023}, month={Jun.}, pages={11586-11594} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26369/26141", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26369", + "pdf_size": 449530, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=495848404147076262&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ntu.edu.sg;ntu.edu.sg;cair-cas.org.hk;ntu.edu.sg;disroot.org;ntu.edu.sg", + "email": "ntu.edu.sg;ntu.edu.sg;cair-cas.org.hk;ntu.edu.sg;disroot.org;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Nanyang Technological University;Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences", + "aff_unique_dep": "School of Computer Science and Engineering;Centre for Artificial Intelligence and Robotics", + "aff_unique_url": "https://www.ntu.edu.sg;", + "aff_unique_abbr": "NTU;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Singapore;", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26896", + "title": "Solving Math Word Problems concerning Systems of Equations with GPT-3", + "track": "eaai symposium ai for education", + "status": "Technical", + "abstract": "Researchers have been interested in developing AI tools to help students learn various mathematical subjects. One challenging set of tasks for school students is learning to solve math word problems. We explore how recent advances in natural language processing, specifically the rise of powerful transformer based models, can be applied to help math learners with such problems. Concretely, we evaluate the use of GPT-3, a 1.75B parameter transformer model recently released by OpenAI, for three related challenges pertaining to math word problems corresponding to systems of two linear equations. The three challenges are classifying word problems, extracting equations from word problems, and generating word problems. For the first challenge, we define a set of problem classes and find that GPT-3 has generally very high accuracy in classifying word problems (80%-100%), for all but one of these classes. For the second challenge, we find the accuracy for extracting equations improves with number of examples provided to the model, ranging from an accuracy of 31% for zero-shot learning to about 69% using 3-shot learning, which is further improved to a high value of 80% with fine-tuning. For the third challenge, we find that GPT-3 is able to generate problems with accuracy ranging from 33% to 93%, depending on the problem type.", + "primary_area": "", + "author": "Mingyu Zong; Bhaskar Krishnamachari", + "authorids": "", + "aff": "USC Viterbi School of Engineering, Los Angeles, California 90089; USC Viterbi School of Engineering, Los Angeles, California 90089", + "bibtex": "@article{Zong_Krishnamachari_2024, title={Solving Math Word Problems concerning Systems of Equations with GPT-3}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26896}, DOI={10.1609/aaai.v37i13.26896}, abstractNote={Researchers have been interested in developing AI tools to help students learn various mathematical subjects. One challenging set of tasks for school students is learning to solve math word problems. We explore how recent advances in natural language processing, specifically the rise of powerful transformer based models, can be applied to help math learners with such problems. Concretely, we evaluate the use of GPT-3, a 1.75B parameter transformer model recently released by OpenAI, for three related challenges pertaining to math word problems corresponding to systems of two linear equations. The three challenges are classifying word problems, extracting equations from word problems, and generating word problems. For the first challenge, we define a set of problem classes and find that GPT-3 has generally very high accuracy in classifying word problems (80%-100%), for all but one of these classes. For the second challenge, we find the accuracy for extracting equations improves with number of examples provided to the model, ranging from an accuracy of 31% for zero-shot learning to about 69% using 3-shot learning, which is further improved to a high value of 80% with fine-tuning. For the third challenge, we find that GPT-3 is able to generate problems with accuracy ranging from 33% to 93%, depending on the problem type.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zong, Mingyu and Krishnamachari, Bhaskar}, year={2024}, month={Jul.}, pages={15972-15979} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26896/26668", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26896", + "pdf_size": 105528, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2990169802599775282&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "usc.edu;usc.edu", + "email": "usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "Viterbi School of Engineering", + "aff_unique_url": "https://viterbi.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26161", + "title": "Sparse Coding in a Dual Memory System for Lifelong Learning", + "track": "main", + "status": "Technical", + "abstract": "Efficient continual learning in humans is enabled by a rich set of neurophysiological mechanisms and interactions between multiple memory systems. The brain efficiently encodes information in non-overlapping sparse codes, which facilitates the learning of new associations faster with controlled interference with previous associations. To mimic sparse coding in DNNs, we enforce activation sparsity along with a dropout mechanism which encourages the model to activate similar units for semantically similar inputs and have less overlap with activation patterns of semantically dissimilar inputs. This provides us with an efficient mechanism for balancing the reusability and interference of features, depending on the similarity of classes across tasks. Furthermore, we employ sparse coding in a multiple-memory replay mechanism. Our method maintains an additional long-term semantic memory that aggregates and consolidates information encoded in the synaptic weights of the working model. Our extensive evaluation and characteristics analysis show that equipped with these biologically inspired mechanisms, the model can further mitigate forgetting. Code available at \\url{https://github.com/NeurAI-Lab/SCoMMER}.", + "primary_area": "machine learning iii", + "author": "Fahad Sarfraz; Elahe Arani; Bahram Zonooz", + "authorids": "", + "aff": "1Advanced Research Lab, NavInfo Europe, The Netherlands; 2Department of Mathematics and Computer Science, Eindhoven University of Technology, The Netherlands; 1Advanced Research Lab, NavInfo Europe, The Netherlands + 2Department of Mathematics and Computer Science, Eindhoven University of Technology, The Netherlands", + "bibtex": "@article{Sarfraz_Arani_Zonooz_2023, title={Sparse Coding in a Dual Memory System for Lifelong Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26161}, DOI={10.1609/aaai.v37i8.26161}, abstractNote={Efficient continual learning in humans is enabled by a rich set of neurophysiological mechanisms and interactions between multiple memory systems. The brain efficiently encodes information in non-overlapping sparse codes, which facilitates the learning of new associations faster with controlled interference with previous associations. To mimic sparse coding in DNNs, we enforce activation sparsity along with a dropout mechanism which encourages the model to activate similar units for semantically similar inputs and have less overlap with activation patterns of semantically dissimilar inputs. This provides us with an efficient mechanism for balancing the reusability and interference of features, depending on the similarity of classes across tasks. Furthermore, we employ sparse coding in a multiple-memory replay mechanism. Our method maintains an additional long-term semantic memory that aggregates and consolidates information encoded in the synaptic weights of the working model. Our extensive evaluation and characteristics analysis show that equipped with these biologically inspired mechanisms, the model can further mitigate forgetting. Code available at \\url{https://github.com/NeurAI-Lab/SCoMMER}.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sarfraz, Fahad and Arani, Elahe and Zonooz, Bahram}, year={2023}, month={Jun.}, pages={9714-9722} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26161/25933", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26161", + "pdf_size": 831897, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1771123282163307954&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "navinfo.eu;tue.nl;gmail.com", + "email": "navinfo.eu;tue.nl;gmail.com", + "github": "https://github.com/NeurAI-Lab/SCoMMER", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "NavInfo Europe;Eindhoven University of Technology", + "aff_unique_dep": "Advanced Research Lab;Department of Mathematics and Computer Science", + "aff_unique_url": ";https://www.tue.nl", + "aff_unique_abbr": ";TU/e", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-25676", + "title": "Sparse Maximum Margin Learning from Multimodal Human Behavioral Patterns", + "track": "main", + "status": "Technical", + "abstract": "We propose a multimodal data fusion framework to systematically analyze human behavioral data from specialized domains that are inherently dynamic, sparse, and heterogeneous. We develop a two-tier architecture of probabilistic mixtures, where the lower tier leverages parametric distributions from the exponential family to extract significant behavioral patterns from each data modality. These patterns are then organized into a dynamic latent state space at the higher tier to fuse patterns from different modalities. In addition, our framework jointly performs pattern discovery and maximum-margin learning for downstream classification tasks by using a group-wise sparse prior that regularizes the coefficients of the maximum-margin classifier. Therefore, the discovered patterns are highly interpretable and discriminative to support downstream classification tasks. Experiments on real-world behavioral data from medical and psychological domains demonstrate that our framework discovers meaningful multimodal behavioral patterns with improved interpretability and prediction performance.", + "primary_area": "domain s of application", + "author": "Ervine Zheng; Qi Yu; Zhi Zheng", + "authorids": "", + "aff": "Rochester Institute of Technology; Rochester Institute of Technology; Rochester Institute of Technology", + "bibtex": "@article{Zheng_Yu_Zheng_2023, title={Sparse Maximum Margin Learning from Multimodal Human Behavioral Patterns}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25676}, DOI={10.1609/aaai.v37i4.25676}, abstractNote={We propose a multimodal data fusion framework to systematically analyze human behavioral data from specialized domains that are inherently dynamic, sparse, and heterogeneous. We develop a two-tier architecture of probabilistic mixtures, where the lower tier leverages parametric distributions from the exponential family to extract significant behavioral patterns from each data modality. These patterns are then organized into a dynamic latent state space at the higher tier to fuse patterns from different modalities. In addition, our framework jointly performs pattern discovery and maximum-margin learning for downstream classification tasks by using a group-wise sparse prior that regularizes the coefficients of the maximum-margin classifier. Therefore, the discovered patterns are highly interpretable and discriminative to support downstream classification tasks. Experiments on real-world behavioral data from medical and psychological domains demonstrate that our framework discovers meaningful multimodal behavioral patterns with improved interpretability and prediction performance.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Ervine and Yu, Qi and Zheng, Zhi}, year={2023}, month={Jun.}, pages={5437-5445} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25676/25448", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25676", + "pdf_size": 1540539, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9668473152062036686&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "rit.edu;rit.edu;rit.edu", + "email": "rit.edu;rit.edu;rit.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Rochester Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.rit.edu", + "aff_unique_abbr": "RIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25221", + "title": "Spatial-Spectral Transformer for Hyperspectral Image Denoising", + "track": "main", + "status": "Technical", + "abstract": "Hyperspectral image (HSI) denoising is a crucial preprocessing procedure for the subsequent HSI applications. Unfortunately, though witnessing the development of deep learning in HSI denoising area, existing convolution-based methods face the trade-off between computational efficiency and capability to model non-local characteristics of HSI. In this paper, we propose a Spatial-Spectral Transformer (SST) to alleviate this problem. To fully explore intrinsic similarity characteristics in both spatial dimension and spectral dimension, we conduct non-local spatial self-attention and global spectral self-attention with Transformer architecture. The window-based spatial self-attention focuses on the spatial similarity beyond the neighboring region. While, the spectral self-attention exploits the long-range dependencies between highly correlative bands. Experimental results show that our proposed method outperforms the state-of-the-art HSI denoising methods in quantitative quality and visual results. The code is released at https://github.com/MyuLi/SST.", + "primary_area": "computer vision i", + "author": "Miaoyu Li; Ying Fu; Yulun Zhang", + "authorids": "", + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; ETH Z\u00fcrich", + "bibtex": "@article{Li_Fu_Zhang_2023, title={Spatial-Spectral Transformer for Hyperspectral Image Denoising}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25221}, DOI={10.1609/aaai.v37i1.25221}, abstractNote={Hyperspectral image (HSI) denoising is a crucial preprocessing procedure for the subsequent HSI applications. Unfortunately, though witnessing the development of deep learning in HSI denoising area, existing convolution-based methods face the trade-off between computational efficiency and capability to model non-local characteristics of HSI. In this paper, we propose a Spatial-Spectral Transformer (SST) to alleviate this problem. To fully explore intrinsic similarity characteristics in both spatial dimension and spectral dimension, we conduct non-local spatial self-attention and global spectral self-attention with Transformer architecture. The window-based spatial self-attention focuses on the spatial similarity beyond the neighboring region. While, the spectral self-attention exploits the long-range dependencies between highly correlative bands. Experimental results show that our proposed method outperforms the state-of-the-art HSI denoising methods in quantitative quality and visual results. The code is released at https://github.com/MyuLi/SST.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Miaoyu and Fu, Ying and Zhang, Yulun}, year={2023}, month={Jun.}, pages={1368-1376} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25221/24993", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25221", + "pdf_size": 1826168, + "gs_citation": 82, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15176191846631609021&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "bit.edu.cn;bit.edu.cn;gmail.com", + "email": "bit.edu.cn;bit.edu.cn;gmail.com", + "github": "https://github.com/MyuLi/SST", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Beijing Institute of Technology;ETH Z\u00fcrich", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.ethz.ch", + "aff_unique_abbr": "BIT;ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "China;Switzerland" + }, + { + "id": "article-26016", + "title": "SpatialFormer: Semantic and Target Aware Attentions for Few-Shot Learning", + "track": "main", + "status": "Technical", + "abstract": "Recent Few-Shot Learning (FSL) methods put emphasis on generating a discriminative embedding features to precisely measure the similarity between support and query sets. Current CNN-based cross-attention approaches generate discriminative representations via enhancing the mutually semantic similar regions of support and query pairs. However, it suffers from two problems: CNN structure produces inaccurate attention map based on local features, and mutually similar backgrounds cause distraction. To alleviate these problems, we design a novel SpatialFormer structure to generate more accurate attention regions based on global features. Different from the traditional Transformer modeling intrinsic instance-level similarity which causes accuracy degradation in FSL, our SpatialFormer explores the semantic-level similarity between pair inputs to boost the performance. Then we derive two specific attention modules, named SpatialFormer Semantic Attention (SFSA) and SpatialFormer Target Attention (SFTA), to enhance the target object regions while reduce the background distraction. Particularly, SFSA highlights the regions with same semantic information between pair features, and SFTA finds potential foreground object regions of novel feature that are similar to base categories. Extensive experiments show that our methods are effective and achieve new state-of-the-art results on few-shot classification benchmarks.", + "primary_area": "machine learning ii", + "author": "Jinxiang Lai; Siqian Yang; Wenlong Wu; Tao Wu; Guannan Jiang; Xi Wang; Jun Liu; Bin-Bin Gao; Wei Zhang; Yuan Xie; Chengjie Wang", + "authorids": "", + "aff": "Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; Tencent Youtu Lab; CATL; CATL; Tencent Youtu Lab; Tencent Youtu Lab; CATL; East China Normal University; Tencent Youtu Lab + Shanghai Jiao Tong University", + "bibtex": "@article{Lai_Yang_Wu_Wu_Jiang_Wang_Liu_Gao_Zhang_Xie_Wang_2023, title={SpatialFormer: Semantic and Target Aware Attentions for Few-Shot Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26016}, DOI={10.1609/aaai.v37i7.26016}, abstractNote={Recent Few-Shot Learning (FSL) methods put emphasis on generating a discriminative embedding features to precisely measure the similarity between support and query sets. Current CNN-based cross-attention approaches generate discriminative representations via enhancing the mutually semantic similar regions of support and query pairs. However, it suffers from two problems: CNN structure produces inaccurate attention map based on local features, and mutually similar backgrounds cause distraction. To alleviate these problems, we design a novel SpatialFormer structure to generate more accurate attention regions based on global features. Different from the traditional Transformer modeling intrinsic instance-level similarity which causes accuracy degradation in FSL, our SpatialFormer explores the semantic-level similarity between pair inputs to boost the performance. Then we derive two specific attention modules, named SpatialFormer Semantic Attention (SFSA) and SpatialFormer Target Attention (SFTA), to enhance the target object regions while reduce the background distraction. Particularly, SFSA highlights the regions with same semantic information between pair features, and SFTA finds potential foreground object regions of novel feature that are similar to base categories. Extensive experiments show that our methods are effective and achieve new state-of-the-art results on few-shot classification benchmarks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lai, Jinxiang and Yang, Siqian and Wu, Wenlong and Wu, Tao and Jiang, Guannan and Wang, Xi and Liu, Jun and Gao, Bin-Bin and Zhang, Wei and Xie, Yuan and Wang, Chengjie}, year={2023}, month={Jun.}, pages={8430-8437} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26016/25788", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26016", + "pdf_size": 482445, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12529826967983683589&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;catl.com;catl.com;gmail.com;gmail.com;catl.com;cs.ecnu.edu.cn;tencent.com", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;catl.com;catl.com;gmail.com;gmail.com;catl.com;cs.ecnu.edu.cn;tencent.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;1;1;0;0;1;2;0+3", + "aff_unique_norm": "Tencent;CATL;East China Normal University;Shanghai Jiao Tong University", + "aff_unique_dep": "Youtu Lab;;;", + "aff_unique_url": "https://www.tencent.com;https://www.catl.com.cn;http://www.ecnu.edu.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "Tencent;CATL;ECNU;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26669", + "title": "Spatio-Temporal Graph Neural Point Process for Traffic Congestion Event Prediction", + "track": "aaai special track", + "status": "Technical", + "abstract": "Traffic congestion event prediction is an important yet challenging task in intelligent transportation systems. Many existing works about traffic prediction integrate various temporal encoders and graph convolution networks (GCNs), called spatio-temporal graph-based neural networks, which focus on predicting dense variables such as flow, speed and demand in time snapshots, but they can hardly forecast the traffic congestion events that are sparsely distributed on the continuous time axis. In recent years, neural point process (NPP) has emerged as an appropriate framework for event prediction in continuous time scenarios. However, most conventional works about NPP cannot model the complex spatio-temporal dependencies and congestion evolution patterns. To address these limitations, we propose a spatio-temporal graph neural point process framework, named STGNPP for traffic congestion event prediction. Specifically, we first design the spatio-temporal graph learning module to fully capture the long-range spatio-temporal dependencies from the historical traffic state data along with the road network. The extracted spatio-temporal hidden representation and congestion event information are then fed into a continuous gated recurrent unit to model the congestion evolution patterns. In particular, to fully exploit the periodic information, we also improve the intensity function calculation of the point process with a periodic gated mechanism. Finally, our model simultaneously predicts the occurrence time and duration of the next congestion. Extensive experiments on two real-world datasets demonstrate that our method achieves superior performance in comparison to existing state-of-the-art approaches.", + "primary_area": "ai for social impact", + "author": "Guangyin Jin; Lingbo Liu; Fuxian Li; Jincai Huang", + "authorids": "", + "aff": "School of System Engineering, National University of Defense Technology; Department of Computer Sciences, The Hong Kong Polytechnic University; Department of Electronic Engineering, Tsinghua University; School of System Engineering, National University of Defense Technology", + "bibtex": "@article{Jin_Liu_Li_Huang_2023, title={Spatio-Temporal Graph Neural Point Process for Traffic Congestion Event Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26669}, DOI={10.1609/aaai.v37i12.26669}, abstractNote={Traffic congestion event prediction is an important yet challenging task in intelligent transportation systems. Many existing works about traffic prediction integrate various temporal encoders and graph convolution networks (GCNs), called spatio-temporal graph-based neural networks, which focus on predicting dense variables such as flow, speed and demand in time snapshots, but they can hardly forecast the traffic congestion events that are sparsely distributed on the continuous time axis. In recent years, neural point process (NPP) has emerged as an appropriate framework for event prediction in continuous time scenarios. However, most conventional works about NPP cannot model the complex spatio-temporal dependencies and congestion evolution patterns. To address these limitations, we propose a spatio-temporal graph neural point process framework, named STGNPP for traffic congestion event prediction. Specifically, we first design the spatio-temporal graph learning module to fully capture the long-range spatio-temporal dependencies from the historical traffic state data along with the road network. The extracted spatio-temporal hidden representation and congestion event information are then fed into a continuous gated recurrent unit to model the congestion evolution patterns. In particular, to fully exploit the periodic information, we also improve the intensity function calculation of the point process with a periodic gated mechanism. Finally, our model simultaneously predicts the occurrence time and duration of the next congestion. Extensive experiments on two real-world datasets demonstrate that our method achieves superior performance in comparison to existing state-of-the-art approaches.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Guangyin and Liu, Lingbo and Li, Fuxian and Huang, Jincai}, year={2023}, month={Jun.}, pages={14268-14276} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26669/26441", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26669", + "pdf_size": 1401284, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1180316529513303714&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "nudt.edu.cn;gmail.com;163.com;nudt.edu.cn", + "email": "nudt.edu.cn;gmail.com;163.com;nudt.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "National University of Defense Technology;The Hong Kong Polytechnic University;Tsinghua University", + "aff_unique_dep": "School of System Engineering;Department of Computer Sciences;Department of Electronic Engineering", + "aff_unique_url": "http://www.nudt.edu.cn/;https://www.polyu.edu.hk;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "NUDT;PolyU;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25976", + "title": "Spatio-Temporal Meta-Graph Learning for Traffic Forecasting", + "track": "main", + "status": "Technical", + "abstract": "Traffic forecasting as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the spatio-temporal heterogeneity and non-stationarity implied in the traffic stream, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this idea into Meta-Graph Convolutional Recurrent Network (MegaCRN) by plugging the Meta-Graph Learner powered by a Meta-Node Bank into GCRN encoder-decoder. We conduct a comprehensive evaluation on two benchmark datasets (i.e., METR-LA and PEMS-BAY) and a new large-scale traffic speed dataset called EXPY-TKY that covers 1843 expressway road links in Tokyo. Our model outperformed the state-of-the-arts on all three datasets. Besides, through a series of qualitative evaluations, we demonstrate that our model can explicitly disentangle the road links and time slots with different patterns and be robustly adaptive to any anomalous traffic situations. Codes and datasets are available at https://github.com/deepkashiwa20/MegaCRN.", + "primary_area": "machine learning ii", + "author": "Renhe Jiang; Zhaonan Wang; Jiawei Yong; Puneet Jeph; Quanjun Chen; Yasumasa Kobayashi; Xuan Song; Shintaro Fukushima; Toyotaro Suzumura", + "authorids": "", + "aff": "Information Technology Center, The University of Tokyo + Center for Spatial Information Science, The University of Tokyo; Center for Spatial Information Science, The University of Tokyo; Toyota Motor Corporation; Center for Spatial Information Science, The University of Tokyo; Center for Spatial Information Science, The University of Tokyo; Toyota Motor Corporation; Center for Spatial Information Science, The University of Tokyo; Toyota Motor Corporation; Information Technology Center, The University of Tokyo", + "bibtex": "@article{Jiang_Wang_Yong_Jeph_Chen_Kobayashi_Song_Fukushima_Suzumura_2023, title={Spatio-Temporal Meta-Graph Learning for Traffic Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25976}, DOI={10.1609/aaai.v37i7.25976}, abstractNote={Traffic forecasting as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the spatio-temporal heterogeneity and non-stationarity implied in the traffic stream, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this idea into Meta-Graph Convolutional Recurrent Network (MegaCRN) by plugging the Meta-Graph Learner powered by a Meta-Node Bank into GCRN encoder-decoder. We conduct a comprehensive evaluation on two benchmark datasets (i.e., METR-LA and PEMS-BAY) and a new large-scale traffic speed dataset called EXPY-TKY that covers 1843 expressway road links in Tokyo. Our model outperformed the state-of-the-arts on all three datasets. Besides, through a series of qualitative evaluations, we demonstrate that our model can explicitly disentangle the road links and time slots with different patterns and be robustly adaptive to any anomalous traffic situations. Codes and datasets are available at https://github.com/deepkashiwa20/MegaCRN.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jiang, Renhe and Wang, Zhaonan and Yong, Jiawei and Jeph, Puneet and Chen, Quanjun and Kobayashi, Yasumasa and Song, Xuan and Fukushima, Shintaro and Suzumura, Toyotaro}, year={2023}, month={Jun.}, pages={8078-8086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25976/25748", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25976", + "pdf_size": 731725, + "gs_citation": 208, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5695834160226009861&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "csis.u-tokyo.ac.jp;csis.u-tokyo.ac.jp;mail.toyota.co.jp;csis.u-tokyo.ac.jp;csis.u-tokyo.ac.jp;toyota.co.jp;csis.u-tokyo.ac.jp;mail.toyota.co.jp;itc.u-tokyo.ac.jp", + "email": "csis.u-tokyo.ac.jp;csis.u-tokyo.ac.jp;mail.toyota.co.jp;csis.u-tokyo.ac.jp;csis.u-tokyo.ac.jp;toyota.co.jp;csis.u-tokyo.ac.jp;mail.toyota.co.jp;itc.u-tokyo.ac.jp", + "github": "https://github.com/deepkashiwa20/MegaCRN", + "project": "", + "author_num": 9, + "aff_unique_index": "0+0;0;1;0;0;1;0;1;0", + "aff_unique_norm": "The University of Tokyo;Toyota Motor Corporation", + "aff_unique_dep": "Information Technology Center;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.toyota-global.com", + "aff_unique_abbr": "UTokyo;Toyota", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Tokyo;", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-25542", + "title": "Spatio-Temporal Neural Structural Causal Models for Bike Flow Prediction", + "track": "main", + "status": "Technical", + "abstract": "As a representative of public transportation, the fundamental issue of managing bike-sharing systems is bike flow prediction. Recent methods overemphasize the spatio-temporal correlations in the data, ignoring the effects of contextual conditions on the transportation system and the inter-regional time-varying causality. In addition, due to the disturbance of incomplete observations in the data, random contextual conditions lead to spurious correlations between data and features, making the prediction of the model ineffective in special scenarios. To overcome this issue, we propose a Spatio-temporal Neural Structure Causal Model(STNSCM) from the perspective of causality. First, we build a causal graph to describe the traffic prediction, and further analyze the causal relationship between the input data, contextual conditions, spatio-temporal states, and prediction results. Second, we propose to apply the frontdoor criterion to eliminate confounding biases in the feature extraction process. Finally, we propose a counterfactual representation reasoning module to extrapolate the spatio-temporal state under the factual scenario to future counterfactual scenarios to improve the prediction performance. Experiments on real-world datasets demonstrate the superior performance of our model, especially its resistance to fluctuations caused by the external environment. The source code and data will be released.", + "primary_area": "data mining and knowledge management", + "author": "Pan Deng; Yu Zhao; Junting Liu; Xiaofeng Jia; Mulan Wang", + "authorids": "", + "aff": "Beihang University; Beihang University; Beihang University; Beijing Big Data Centre; Beihang University", + "bibtex": "@article{Deng_Zhao_Liu_Jia_Wang_2023, title={Spatio-Temporal Neural Structural Causal Models for Bike Flow Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25542}, DOI={10.1609/aaai.v37i4.25542}, abstractNote={As a representative of public transportation, the fundamental issue of managing bike-sharing systems is bike flow prediction. Recent methods overemphasize the spatio-temporal correlations in the data, ignoring the effects of contextual conditions on the transportation system and the inter-regional time-varying causality. In addition, due to the disturbance of incomplete observations in the data, random contextual conditions lead to spurious correlations between data and features, making the prediction of the model ineffective in special scenarios. To overcome this issue, we propose a Spatio-temporal Neural Structure Causal Model(STNSCM) from the perspective of causality. First, we build a causal graph to describe the traffic prediction, and further analyze the causal relationship between the input data, contextual conditions, spatio-temporal states, and prediction results. Second, we propose to apply the frontdoor criterion to eliminate confounding biases in the feature extraction process. Finally, we propose a counterfactual representation reasoning module to extrapolate the spatio-temporal state under the factual scenario to future counterfactual scenarios to improve the prediction performance. Experiments on real-world datasets demonstrate the superior performance of our model, especially its resistance to fluctuations caused by the external environment. The source code and data will be released.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Pan and Zhao, Yu and Liu, Junting and Jia, Xiaofeng and Wang, Mulan}, year={2023}, month={Jun.}, pages={4242-4249} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25542/25314", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25542", + "pdf_size": 7809724, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2270705016695678899&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;jxj.beijing.gov.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;jxj.beijing.gov.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Beihang University;Beijing Big Data Centre", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.buaa.edu.cn/;", + "aff_unique_abbr": "BUAA;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25555", + "title": "Spatio-Temporal Self-Supervised Learning for Traffic Flow Prediction", + "track": "main", + "status": "Technical", + "abstract": "Robust prediction of citywide traffic flows at different time periods plays a crucial role in intelligent transportation systems. While previous work has made great efforts to model spatio-temporal correlations, existing methods still suffer from two key limitations: i) Most models collectively predict all regions' flows without accounting for spatial heterogeneity, i.e., different regions may have skewed traffic flow distributions. ii) These models fail to capture the temporal heterogeneity induced by time-varying traffic patterns, as they typically model temporal correlations with a shared parameterized space for all time periods. To tackle these challenges, we propose a novel Spatio-Temporal Self-Supervised Learning (ST-SSL) traffic prediction framework which enhances the traffic pattern representations to be reflective of both spatial and temporal heterogeneity, with auxiliary self-supervised learning paradigms. Specifically, our ST-SSL is built over an integrated module with temporal and spatial convolutions for encoding the information across space and time. To achieve the adaptive spatio-temporal self-supervised learning, our ST-SSL first performs the adaptive augmentation over the traffic flow graph data at both attribute- and structure-levels. On top of the augmented traffic graph, two SSL auxiliary tasks are constructed to supplement the main traffic prediction task with spatial and temporal heterogeneity-aware augmentation. Experiments on four benchmark datasets demonstrate that ST-SSL consistently outperforms various state-of-the-art baselines. Since spatio-temporal heterogeneity widely exists in practical datasets, the proposed framework may also cast light on other spatial-temporal applications. Model implementation is available at https://github.com/Echo-Ji/ST-SSL.", + "primary_area": "data mining and knowledge management", + "author": "Jiahao Ji; Jingyuan Wang; Chao Huang; Junjie Wu; Boren Xu; Zhenhe Wu; Junbo Zhang; Yu Zheng", + "authorids": "", + "aff": "School of Computer Science & Engineering, Beihang University, China; School of Computer Science & Engineering, Beihang University, China + School of Economics & Management, Beihang University, China; Department of Computer Science, Musketeers Foundation Institute of Data Science, University of Hong Kong, China; School of Economics & Management, Beihang University, China; School of Computer Science & Engineering, Beihang University, China; School of Computer Science & Engineering, Beihang University, China; JD Intelligent Cities Research, Beijing, China + JD iCity, JD Technology, Beijing, China; JD Intelligent Cities Research, Beijing, China + JD iCity, JD Technology, Beijing, China", + "bibtex": "@article{Ji_Wang_Huang_Wu_Xu_Wu_Zhang_Zheng_2023, title={Spatio-Temporal Self-Supervised Learning for Traffic Flow Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25555}, DOI={10.1609/aaai.v37i4.25555}, abstractNote={Robust prediction of citywide traffic flows at different time periods plays a crucial role in intelligent transportation systems. While previous work has made great efforts to model spatio-temporal correlations, existing methods still suffer from two key limitations: i) Most models collectively predict all regions\u2019 flows without accounting for spatial heterogeneity, i.e., different regions may have skewed traffic flow distributions. ii) These models fail to capture the temporal heterogeneity induced by time-varying traffic patterns, as they typically model temporal correlations with a shared parameterized space for all time periods. To tackle these challenges, we propose a novel Spatio-Temporal Self-Supervised Learning (ST-SSL) traffic prediction framework which enhances the traffic pattern representations to be reflective of both spatial and temporal heterogeneity, with auxiliary self-supervised learning paradigms. Specifically, our ST-SSL is built over an integrated module with temporal and spatial convolutions for encoding the information across space and time. To achieve the adaptive spatio-temporal self-supervised learning, our ST-SSL first performs the adaptive augmentation over the traffic flow graph data at both attribute- and structure-levels. On top of the augmented traffic graph, two SSL auxiliary tasks are constructed to supplement the main traffic prediction task with spatial and temporal heterogeneity-aware augmentation. Experiments on four benchmark datasets demonstrate that ST-SSL consistently outperforms various state-of-the-art baselines. Since spatio-temporal heterogeneity widely exists in practical datasets, the proposed framework may also cast light on other spatial-temporal applications. Model implementation is available at https://github.com/Echo-Ji/ST-SSL.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ji, Jiahao and Wang, Jingyuan and Huang, Chao and Wu, Junjie and Xu, Boren and Wu, Zhenhe and Zhang, Junbo and Zheng, Yu}, year={2023}, month={Jun.}, pages={4356-4364} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25555/25327", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25555", + "pdf_size": 1757539, + "gs_citation": 206, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17779963208995814844&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;hku.hk;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;outlook.com;jd.com", + "email": "buaa.edu.cn;buaa.edu.cn;hku.hk;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;outlook.com;jd.com", + "github": "https://github.com/Echo-Ji/ST-SSL", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0+0;1;0;0;0;2+3;2+3", + "aff_unique_norm": "Beihang University;University of Hong Kong;JD Intelligent Cities Research;JD Technology", + "aff_unique_dep": "School of Computer Science & Engineering;Department of Computer Science;;", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.hku.hk;;", + "aff_unique_abbr": "Beihang;HKU;;", + "aff_campus_unique_index": ";1;2;2", + "aff_campus_unique": ";Hong Kong;Beijing", + "aff_country_unique_index": "0;0+0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25423", + "title": "Spatiotemporal Deformation Perception for Fisheye Video Rectification", + "track": "main", + "status": "Technical", + "abstract": "Although the distortion correction of fisheye images has been extensively studied, the correction of fisheye videos is still an elusive challenge. For different frames of the fisheye video, the existing image correction methods ignore the correlation of sequences, resulting in temporal jitter in the corrected video. To solve this problem, we propose a temporal weighting scheme to get a plausible global optical flow, which mitigates the jitter effect by progressively reducing the weight of frames. Subsequently, we observe that the inter-frame optical flow of the video is facilitated to perceive the local spatial deformation of the fisheye video. Therefore, we derive the spatial deformation through the flows of fisheye and distorted-free videos, thereby enhancing the local accuracy of the predicted result. However, the independent correction for each frame disrupts the temporal correlation. Due to the property of fisheye video, a distorted moving object may be able to find its distorted-free pattern at another moment. To this end, a temporal deformation aggregator is designed to reconstruct the deformation correlation between frames and provide a reliable global feature. Our method achieves an end-to-end correction and demonstrates superiority in correction quality and stability compared with the SOTA correction methods.", + "primary_area": "computer vision iii", + "author": "Shangrong Yang; Chunyu Lin; Kang Liao; Yao Zhao", + "authorids": "", + "aff": "Institute of Information Science, Beijing Jiaotong University; Institute of Information Science, Beijing Jiaotong University; Institute of Information Science, Beijing Jiaotong University; Institute of Information Science, Beijing Jiaotong University", + "bibtex": "@article{Yang_Lin_Liao_Zhao_2023, title={Spatiotemporal Deformation Perception for Fisheye Video Rectification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25423}, DOI={10.1609/aaai.v37i3.25423}, abstractNote={Although the distortion correction of fisheye images has been extensively studied, the correction of fisheye videos is still an elusive challenge. For different frames of the fisheye video, the existing image correction methods ignore the correlation of sequences, resulting in temporal jitter in the corrected video. To solve this problem, we propose a temporal weighting scheme to get a plausible global optical flow, which mitigates the jitter effect by progressively reducing the weight of frames. Subsequently, we observe that the inter-frame optical flow of the video is facilitated to perceive the local spatial deformation of the fisheye video. Therefore, we derive the spatial deformation through the flows of fisheye and distorted-free videos, thereby enhancing the local accuracy of the predicted result. However, the independent correction for each frame disrupts the temporal correlation. Due to the property of fisheye video, a distorted moving object may be able to find its distorted-free pattern at another moment. To this end, a temporal deformation aggregator is designed to reconstruct the deformation correlation between frames and provide a reliable global feature. Our method achieves an end-to-end correction and demonstrates superiority in correction quality and stability compared with the SOTA correction methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Shangrong and Lin, Chunyu and Liao, Kang and Zhao, Yao}, year={2023}, month={Jun.}, pages={3181-3189} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25423/25195", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25423", + "pdf_size": 3029782, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6316749116445079327&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beijing Jiaotong University", + "aff_unique_dep": "Institute of Information Science", + "aff_unique_url": "http://www.bjtu.edu.cn", + "aff_unique_abbr": "BJTU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26204", + "title": "Spearman Rank Correlation Screening for Ultrahigh-Dimensional Censored Data", + "track": "main", + "status": "Technical", + "abstract": "Herein, we propose a Spearman rank correlation-based screening procedure for ultrahigh-dimensional data with censored response cases. The proposed method is model-free without specifying any regression forms of predictors or response variables and is robust under the unknown monotone transformations of these response variable and predictors. The sure-screening and rank-consistency properties are established under some mild regularity conditions. Simulation studies demonstrate that the new screening method performs well in the presence\nof a heavy-tailed distribution, strongly dependent predictors or outliers, and offers superior performance over the existing nonparametric screening procedures. In particular, the new screening method still works well when a response variable is observed under a high censoring rate. An illustrative example is provided.", + "primary_area": "machine learning iii", + "author": "Hongni Wang; Jingxin Yan; Xiaodong Yan", + "authorids": "", + "aff": "School of Statistics and Mathematics, Shandong University of Finance and Economics; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Zhongtai Securities Institute for Financial Studies, Shandong University + Shandong Province Key Laboratory of Financial Risk + Shandong National Center for Applied Mathematics", + "bibtex": "@article{Wang_Yan_Yan_2023, title={Spearman Rank Correlation Screening for Ultrahigh-Dimensional Censored Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26204}, DOI={10.1609/aaai.v37i8.26204}, abstractNote={Herein, we propose a Spearman rank correlation-based screening procedure for ultrahigh-dimensional data with censored response cases. The proposed method is model-free without specifying any regression forms of predictors or response variables and is robust under the unknown monotone transformations of these response variable and predictors. The sure-screening and rank-consistency properties are established under some mild regularity conditions. Simulation studies demonstrate that the new screening method performs well in the presence\nof a heavy-tailed distribution, strongly dependent predictors or outliers, and offers superior performance over the existing nonparametric screening procedures. In particular, the new screening method still works well when a response variable is observed under a high censoring rate. An illustrative example is provided.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Hongni and Yan, Jingxin and Yan, Xiaodong}, year={2023}, month={Jun.}, pages={10104-10112} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26204/25976", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26204", + "pdf_size": 187747, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=458455015592228501&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "126.com;mails.ucas.ac.cn;sdu.edu.cn", + "email": "126.com;mails.ucas.ac.cn;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+2+3", + "aff_unique_norm": "Shandong University of Finance and Economics;Chinese Academy of Sciences;Shandong University;Shandong National Center for Applied Mathematics", + "aff_unique_dep": "School of Statistics and Mathematics;Academy of Mathematics and Systems Science;Zhongtai Securities Institute for Financial Studies;Center for Applied Mathematics", + "aff_unique_url": "http://www.sdufe.edu.cn;http://www.amss.cas.cn;http://www.sdu.edu.cn;", + "aff_unique_abbr": ";AMSS;;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26336", + "title": "Spectral Feature Augmentation for Graph Contrastive Learning and Beyond", + "track": "main", + "status": "Technical", + "abstract": "Although augmentations (e.g., perturbation of graph edges, image crops) boost the efficiency of Contrastive Learning (CL), feature level augmentation is another plausible, complementary yet not well researched strategy. Thus, we present a novel spectral feature argumentation for contrastive learning on graphs (and images). To this end, for each data view, we estimate a low-rank approximation per feature map and subtract that approximation from the map to obtain its complement. This is achieved by the proposed herein incomplete power iteration, a non-standard power iteration regime which enjoys two valuable byproducts (under mere one or two iterations): (i) it partially balances spectrum of the feature map, and (ii) it injects the noise into rebalanced singular values of the feature map (spectral augmentation). For two views, we align these rebalanced feature maps as such an improved alignment step can focus more on less dominant singular values of matrices of both views, whereas the spectral augmentation does not affect the spectral angle alignment (singular vectors are not perturbed). We derive the analytical form for: (i) the incomplete power iteration to capture its spectrum-balancing effect, and (ii) the variance of singular values augmented implicitly by the noise. We also show that the spectral augmentation improves the generalization bound. Experiments on graph/image datasets show that our spectral feature augmentation outperforms baselines, and is complementary with other augmentation strategies and compatible with various contrastive losses.", + "primary_area": "machine learning iv", + "author": "Yifei Zhang; Hao Zhu; Zixing Song; Piotr Koniusz; Irwin King", + "authorids": "", + "aff": "The Chinese University of Hong Kong; Australian National University+Data61/CSIRO; The Chinese University of Hong Kong; Data61/CSIRO+Australian National University; The Chinese University of Hong Kong", + "bibtex": "@article{Zhang_Zhu_Song_Koniusz_King_2023, title={Spectral Feature Augmentation for Graph Contrastive Learning and Beyond}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26336}, DOI={10.1609/aaai.v37i9.26336}, abstractNote={Although augmentations (e.g., perturbation of graph edges, image crops) boost the efficiency of Contrastive Learning (CL), feature level augmentation is another plausible, complementary yet not well researched strategy. Thus, we present a novel spectral feature argumentation for contrastive learning on graphs (and images). To this end, for each data view, we estimate a low-rank approximation per feature map and subtract that approximation from the map to obtain its complement. This is achieved by the proposed herein incomplete power iteration, a non-standard power iteration regime which enjoys two valuable byproducts (under mere one or two iterations): (i) it partially balances spectrum of the feature map, and (ii) it injects the noise into rebalanced singular values of the feature map (spectral augmentation). For two views, we align these rebalanced feature maps as such an improved alignment step can focus more on less dominant singular values of matrices of both views, whereas the spectral augmentation does not affect the spectral angle alignment (singular vectors are not perturbed). We derive the analytical form for: (i) the incomplete power iteration to capture its spectrum-balancing effect, and (ii) the variance of singular values augmented implicitly by the noise. We also show that the spectral augmentation improves the generalization bound. Experiments on graph/image datasets show that our spectral feature augmentation outperforms baselines, and is complementary with other augmentation strategies and compatible with various contrastive losses.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yifei and Zhu, Hao and Song, Zixing and Koniusz, Piotr and King, Irwin}, year={2023}, month={Jun.}, pages={11289-11297} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26336/26108", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26336", + "pdf_size": 691159, + "gs_citation": 104, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9991113547562701916&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cse.cuhk.edu.hk;gmail.com;cse.cuhk.edu.hk;data61.csiro.au;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;gmail.com;cse.cuhk.edu.hk;data61.csiro.au;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;0;2+1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Australian National University;Commonwealth Scientific and Industrial Research Organisation", + "aff_unique_dep": ";;Data61", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.anu.edu.au;https://www.csiro.au", + "aff_unique_abbr": "CUHK;ANU;CSIRO", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+1;0;1+1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26049", + "title": "SplitNet: A Reinforcement Learning Based Sequence Splitting Method for the MinMax Multiple Travelling Salesman Problem", + "track": "main", + "status": "Technical", + "abstract": "MinMax Multiple Travelling Salesman Problem (mTSP) is an important class of combinatorial optimization problems with many practical applications, of which the goal is to minimize the longest tour of all vehicles. Due to its high computational complexity, existing methods for solving this problem cannot obtain a solution of satisfactory quality with fast speed, especially when the scale of the problem is large. In this paper, we propose a learning-based method named SplitNet to transform the single TSP solutions into the MinMax mTSP solutions of the same instances. Specifically, we generate single TSP solution sequences and split them into mTSP subsequences using an attention-based model trained by reinforcement learning. We also design a decision region for the splitting policy, which significantly reduces the policy action space on instances of various scales and thus improves the generalization ability of SplitNet. The experimental results show that SplitNet generalizes well and outperforms existing learning-based baselines and Google OR-Tools on widely-used random datasets of different scales and public datasets with fast solving speed.", + "primary_area": "machine learning ii", + "author": "Hebin Liang; Yi Ma; Zilin Cao; Tianyang Liu; Fei Ni; Zhigang Li; Jianye Hao", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University + Noah\u2019s Ark Lab, Huawei", + "bibtex": "@article{Liang_Ma_Cao_Liu_Ni_Li_Hao_2023, title={SplitNet: A Reinforcement Learning Based Sequence Splitting Method for the MinMax Multiple Travelling Salesman Problem}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26049}, DOI={10.1609/aaai.v37i7.26049}, abstractNote={MinMax Multiple Travelling Salesman Problem (mTSP) is an important class of combinatorial optimization problems with many practical applications, of which the goal is to minimize the longest tour of all vehicles. Due to its high computational complexity, existing methods for solving this problem cannot obtain a solution of satisfactory quality with fast speed, especially when the scale of the problem is large. In this paper, we propose a learning-based method named SplitNet to transform the single TSP solutions into the MinMax mTSP solutions of the same instances. Specifically, we generate single TSP solution sequences and split them into mTSP subsequences using an attention-based model trained by reinforcement learning. We also design a decision region for the splitting policy, which significantly reduces the policy action space on instances of various scales and thus improves the generalization ability of SplitNet. The experimental results show that SplitNet generalizes well and outperforms existing learning-based baselines and Google OR-Tools on widely-used random datasets of different scales and public datasets with fast solving speed.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Hebin and Ma, Yi and Cao, Zilin and Liu, Tianyang and Ni, Fei and Li, Zhigang and Hao, Jianye}, year={2023}, month={Jun.}, pages={8720-8727} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26049/25821", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26049", + "pdf_size": 538784, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5092663633562934214&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0+1", + "aff_unique_norm": "Tianjin University;Huawei", + "aff_unique_dep": "College of Intelligence and Computing;Noah\u2019s Ark Lab", + "aff_unique_url": "http://www.tju.edu.cn;https://www.huawei.com", + "aff_unique_abbr": ";Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25780", + "title": "Splitting Answer Set Programs with Respect to Intensionality Statements", + "track": "main", + "status": "Technical", + "abstract": "Splitting a logic program allows us to reduce the task of computing its stable models to similar tasks for its subprograms. This can be used to increase solving performance and to prove the correctness of programs. We generalize the conditions under which this technique is applicable, by considering not only dependencies between predicates but also their arguments and context. This allows splitting programs commonly used in practice to which previous results were not applicable.", + "primary_area": "knowledge representation and reasoning", + "author": "Jorge Fandinno; Yuliya Lierler", + "authorids": "", + "aff": "University of Nebraska at Omaha; University of Nebraska at Omaha", + "bibtex": "@article{Fandinno_Lierler_2023, title={Splitting Answer Set Programs with Respect to Intensionality Statements}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25780}, DOI={10.1609/aaai.v37i5.25780}, abstractNote={Splitting a logic program allows us to reduce the task of computing its stable models to similar tasks for its subprograms. This can be used to increase solving performance and to prove the correctness of programs. We generalize the conditions under which this technique is applicable, by considering not only dependencies between predicates but also their arguments and context. This allows splitting programs commonly used in practice to which previous results were not applicable.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fandinno, Jorge and Lierler, Yuliya}, year={2023}, month={Jun.}, pages={6338-6345} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25780/25552", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25780", + "pdf_size": 157899, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:gjfs351Y-qQJ:scholar.google.com/&scioq=Splitting+Answer+Set+Programs+with+Respect+to+Intensionality+Statements&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "unomaha.edu;unomaha.edu", + "email": "unomaha.edu;unomaha.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Nebraska", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unomaha.edu", + "aff_unique_abbr": "UNO", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Omaha", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26205", + "title": "Stability-Based Generalization Analysis for Mixtures of Pointwise and Pairwise Learning", + "track": "main", + "status": "Technical", + "abstract": "Recently, some mixture algorithms of pointwise and pairwise learning (PPL) have been formulated by employing the hybrid error metric of \u201cpointwise loss + pairwise loss\u201d and have shown empirical effectiveness on feature selection, ranking and recommendation tasks. However, to the best of our knowledge, the learning theory foundation of PPL has not been touched in the existing works. In this paper, we try to fill this theoretical gap by investigating the generalization properties of PPL. After extending the definitions of algorithmic stability to the PPL setting, we establish the high-probability generalization bounds for uniformly stable PPL algorithms. Moreover, explicit convergence rates of stochastic gradient descent (SGD) and regularized risk minimization (RRM) for PPL are stated by developing the stability analysis technique of pairwise learning. In addition, the refined generalization bounds of PPL are obtained by replacing uniform stability with on-average stability.", + "primary_area": "machine learning iii", + "author": "Jiahuan Wang; Jun Chen; Hong Chen; Bin Gu; Weifu Li; Xin Tang", + "authorids": "", + "aff": "College of Science, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; College of Science, Huazhong Agricultural University + Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education + Key Laboratory of Smart Farming for Agricultural Animals; Mohamed bin Zayed University of Artificial Intelligence; College of Science, Huazhong Agricultural University + Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education + Key Laboratory of Smart Farming for Agricultural Animals; Ping An Property & Casualty Insurance Company", + "bibtex": "@article{Wang_Chen_Chen_Gu_Li_Tang_2023, title={Stability-Based Generalization Analysis for Mixtures of Pointwise and Pairwise Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26205}, DOI={10.1609/aaai.v37i8.26205}, abstractNote={Recently, some mixture algorithms of pointwise and pairwise learning (PPL) have been formulated by employing the hybrid error metric of \u201cpointwise loss + pairwise loss\u201d and have shown empirical effectiveness on feature selection, ranking and recommendation tasks. However, to the best of our knowledge, the learning theory foundation of PPL has not been touched in the existing works. In this paper, we try to fill this theoretical gap by investigating the generalization properties of PPL. After extending the definitions of algorithmic stability to the PPL setting, we establish the high-probability generalization bounds for uniformly stable PPL algorithms. Moreover, explicit convergence rates of stochastic gradient descent (SGD) and regularized risk minimization (RRM) for PPL are stated by developing the stability analysis technique of pairwise learning. In addition, the refined generalization bounds of PPL are obtained by replacing uniform stability with on-average stability.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Jiahuan and Chen, Jun and Chen, Hong and Gu, Bin and Li, Weifu and Tang, Xin}, year={2023}, month={Jun.}, pages={10113-10121} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26205/25977", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26205", + "pdf_size": 153743, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1090637829206989689&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.hzau.edu.cn; ;mail.hzau.edu.cn; ; ; ", + "email": "mail.hzau.edu.cn; ;mail.hzau.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1+2;3;0+1+2;4", + "aff_unique_norm": "Huazhong Agricultural University;Engineering Research Center of Intelligent Technology for Agriculture;Key Laboratory of Smart Farming for Agricultural Animals;Mohamed bin Zayed University of Artificial Intelligence;Ping An Property & Casualty Insurance Company", + "aff_unique_dep": "College of Science;Ministry of Education;;;", + "aff_unique_url": "http://www.hzau.edu.cn/;;;https://www.mbzuai.ac.ae;https://www.pingan.com", + "aff_unique_abbr": "HAU;;;MBZUAI;Ping An", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0+0;1;0+0+0;0", + "aff_country_unique": "China;United Arab Emirates" + }, + { + "id": "article-25894", + "title": "Stability-Based Generalization Analysis of the Asynchronous Decentralized SGD", + "track": "main", + "status": "Technical", + "abstract": "The generalization ability often determines the success of machine learning algorithms in practice. Therefore, it is of great theoretical and practical importance to understand and bound the generalization error of machine learning algorithms. In this paper, we provide the first generalization results of the popular stochastic gradient descent (SGD) algorithm in the distributed asynchronous decentralized setting. Our analysis is based on the uniform stability tool, where stable means that the learned model does not change much in small variations of the training set. Under some mild assumptions, we perform a comprehensive generalizability analysis of the asynchronous decentralized SGD, including generalization error and excess generalization error bounds for the strongly convex, convex, and non-convex cases. Our theoretical results reveal the effects of the learning rate, training data size, training iterations, decentralized communication topology, and asynchronous delay on the generalization performance of the asynchronous decentralized SGD. We also study the optimization error regarding the objective function values and investigate how the initial point affects the excess generalization error. Finally, we conduct extensive experiments on MNIST, CIFAR-10, CIFAR-100, and Tiny-ImageNet datasets to validate the theoretical findings.", + "primary_area": "machine learning i", + "author": "Xiaoge Deng; Tao Sun; Shengwei Li; Dongsheng Li", + "authorids": "", + "aff": "National Lab for Parallel and Distributed Processing (PDL), College of Computer, National University of Defense Technology, Changsha, Hunan, China; National Lab for Parallel and Distributed Processing (PDL), College of Computer, National University of Defense Technology, Changsha, Hunan, China; National Lab for Parallel and Distributed Processing (PDL), College of Computer, National University of Defense Technology, Changsha, Hunan, China; National Lab for Parallel and Distributed Processing (PDL), College of Computer, National University of Defense Technology, Changsha, Hunan, China", + "bibtex": "@article{Deng_Sun_Li_Li_2023, title={Stability-Based Generalization Analysis of the Asynchronous Decentralized SGD}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25894}, DOI={10.1609/aaai.v37i6.25894}, abstractNote={The generalization ability often determines the success of machine learning algorithms in practice. Therefore, it is of great theoretical and practical importance to understand and bound the generalization error of machine learning algorithms. In this paper, we provide the first generalization results of the popular stochastic gradient descent (SGD) algorithm in the distributed asynchronous decentralized setting. Our analysis is based on the uniform stability tool, where stable means that the learned model does not change much in small variations of the training set. Under some mild assumptions, we perform a comprehensive generalizability analysis of the asynchronous decentralized SGD, including generalization error and excess generalization error bounds for the strongly convex, convex, and non-convex cases. Our theoretical results reveal the effects of the learning rate, training data size, training iterations, decentralized communication topology, and asynchronous delay on the generalization performance of the asynchronous decentralized SGD. We also study the optimization error regarding the objective function values and investigate how the initial point affects the excess generalization error. Finally, we conduct extensive experiments on MNIST, CIFAR-10, CIFAR-100, and Tiny-ImageNet datasets to validate the theoretical findings.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deng, Xiaoge and Sun, Tao and Li, Shengwei and Li, Dongsheng}, year={2023}, month={Jun.}, pages={7340-7348} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25894/25666", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25894", + "pdf_size": 207010, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18026181575849272109&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "nudt.edu.cn;163.com;gmail.com;nudt.edu.cn", + "email": "nudt.edu.cn;163.com;gmail.com;nudt.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "National University of Defense Technology", + "aff_unique_dep": "College of Computer", + "aff_unique_url": "http://www.nudt.edu.cn", + "aff_unique_abbr": "NUDT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Changsha", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26303", + "title": "Stable Learning via Sparse Variable Independence", + "track": "main", + "status": "Technical", + "abstract": "The problem of covariate-shift generalization has attracted intensive research attention. Previous stable learning algorithms employ sample reweighting schemes to decorrelate the covariates when there is no explicit domain information about training data. However, with finite samples, it is difficult to achieve the desirable weights that ensure perfect independence to get rid of the unstable variables. Besides, decorrelating within stable variables may bring about high variance of learned models because of the over-reduced effective sample size. A tremendous sample size is required for these algorithms to work. In this paper, with theoretical justification, we propose SVI (Sparse Variable Independence) for the covariate-shift generalization problem. We introduce sparsity constraint to compensate for the imperfectness of sample reweighting under the finite-sample setting in previous methods. Furthermore, we organically combine independence-based sample reweighting and sparsity-based variable selection in an iterative way to avoid decorrelating within stable variables, increasing the effective sample size to alleviate variance inflation. Experiments on both synthetic and real-world datasets demonstrate the improvement of covariate-shift generalization performance brought by SVI.", + "primary_area": "machine learning iv", + "author": "Han Yu; Peng Cui; Yue He; Zheyan Shen; Yong Lin; Renzhe Xu; Xingxuan Zhang", + "authorids": "", + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Hong Kong University of Science and Technology; Tsinghua University; Tsinghua University", + "bibtex": "@article{Yu_Cui_He_Shen_Lin_Xu_Zhang_2023, title={Stable Learning via Sparse Variable Independence}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26303}, DOI={10.1609/aaai.v37i9.26303}, abstractNote={The problem of covariate-shift generalization has attracted intensive research attention. Previous stable learning algorithms employ sample reweighting schemes to decorrelate the covariates when there is no explicit domain information about training data. However, with finite samples, it is difficult to achieve the desirable weights that ensure perfect independence to get rid of the unstable variables. Besides, decorrelating within stable variables may bring about high variance of learned models because of the over-reduced effective sample size. A tremendous sample size is required for these algorithms to work. In this paper, with theoretical justification, we propose SVI (Sparse Variable Independence) for the covariate-shift generalization problem. We introduce sparsity constraint to compensate for the imperfectness of sample reweighting under the finite-sample setting in previous methods. Furthermore, we organically combine independence-based sample reweighting and sparsity-based variable selection in an iterative way to avoid decorrelating within stable variables, increasing the effective sample size to alleviate variance inflation. Experiments on both synthetic and real-world datasets demonstrate the improvement of covariate-shift generalization performance brought by SVI.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Han and Cui, Peng and He, Yue and Shen, Zheyan and Lin, Yong and Xu, Renzhe and Zhang, Xingxuan}, year={2023}, month={Jun.}, pages={10998-11006} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26303/26075", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26303", + "pdf_size": 410093, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7957566118677093473&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;qq.com;connect.ust.hk;gmail.com;hotmail.com", + "email": "mails.tsinghua.edu.cn;tsinghua.edu.cn;mails.tsinghua.edu.cn;qq.com;connect.ust.hk;gmail.com;hotmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;0;0", + "aff_unique_norm": "Tsinghua University;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.ust.hk", + "aff_unique_abbr": "THU;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26213", + "title": "State-Conditioned Adversarial Subgoal Generation", + "track": "main", + "status": "Technical", + "abstract": "Hierarchical reinforcement learning (HRL) proposes to solve difficult tasks by performing decision-making and control at successively higher levels of temporal abstraction. However, off-policy HRL often suffers from the problem of a non-stationary high-level policy since the low-level policy is constantly changing. In this paper, we propose a novel HRL approach for mitigating the non-stationarity by adversarially enforcing the high-level policy to generate subgoals compatible with the current instantiation of the low-level policy. In practice, the adversarial learning is implemented by training a simple state conditioned discriminator network concurrently with the high-level policy which determines the compatibility level of subgoals. Comparison to state-of-the-art algorithms shows that our approach improves both learning efficiency and performance in challenging continuous control tasks.", + "primary_area": "machine learning iii", + "author": "Vivienne Huiling Wang; Joni Pajarinen; Tinghuai Wang; Joni-Kristian K\u00e4m\u00e4r\u00e4inen", + "authorids": "", + "aff": "Computing Sciences, Tampere University, Finland + Department of Electrical Engineering and Automation, Aalto University, Finland; Department of Electrical Engineering and Automation, Aalto University, Finland; Huawei Helsinki Research Center, Finland; Computing Sciences, Tampere University, Finland", + "bibtex": "@article{Wang_Pajarinen_Wang_K\u00e4m\u00e4r\u00e4inen_2023, title={State-Conditioned Adversarial Subgoal Generation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26213}, DOI={10.1609/aaai.v37i8.26213}, abstractNote={Hierarchical reinforcement learning (HRL) proposes to solve difficult tasks by performing decision-making and control at successively higher levels of temporal abstraction. However, off-policy HRL often suffers from the problem of a non-stationary high-level policy since the low-level policy is constantly changing. In this paper, we propose a novel HRL approach for mitigating the non-stationarity by adversarially enforcing the high-level policy to generate subgoals compatible with the current instantiation of the low-level policy. In practice, the adversarial learning is implemented by training a simple state conditioned discriminator network concurrently with the high-level policy which determines the compatibility level of subgoals. Comparison to state-of-the-art algorithms shows that our approach improves both learning efficiency and performance in challenging continuous control tasks.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Vivienne Huiling and Pajarinen, Joni and Wang, Tinghuai and K\u00e4m\u00e4r\u00e4inen, Joni-Kristian}, year={2023}, month={Jun.}, pages={10184-10191} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26213/25985", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26213", + "pdf_size": 14197868, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15066713962948466444&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff_domain": "tuni.fi;aalto.fi;huawei.com;tuni.fi", + "email": "tuni.fi;aalto.fi;huawei.com;tuni.fi", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;2;0", + "aff_unique_norm": "Tampere University;Aalto University;Huawei", + "aff_unique_dep": "Computing Sciences;Department of Electrical Engineering and Automation;Huawei Helsinki Research Center", + "aff_unique_url": "https://www.tuni.fi;https://www.aalto.fi;https://www.huawei.com", + "aff_unique_abbr": ";Aalto;Huawei", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Helsinki", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "Finland" + }, + { + "id": "article-25647", + "title": "Steganography of Steganographic Networks", + "track": "main", + "status": "Technical", + "abstract": "Steganography is a technique for covert communication between two parties. With the rapid development of deep neural networks (DNN), more and more steganographic networks are proposed recently, which are shown to be promising to achieve good performance. Unlike the traditional handcrafted steganographic tools, a steganographic network is relatively large in size. It raises concerns on how to covertly transmit the steganographic network in public channels, which is a crucial stage in the pipeline of steganography in real world applications. To address such an issue, we propose a novel scheme for steganography of steganographic networks in this paper. Unlike the existing steganographic schemes which focus on the subtle modification of the cover data to accommodate the secrets. We propose to disguise a steganographic network (termed as the secret DNN model) into a stego DNN model which performs an ordinary machine learning task (termed as the stego task). During the model disguising, we select and tune a subset of filters in the secret DNN model to preserve its function on the secret task, where the remaining filters are reactivated according to a partial optimization strategy to disguise the whole secret DNN model into a stego DNN model. The secret DNN model can be recovered from the stego DNN model when needed. Various experiments have been conducted to demonstrate the advantage of our proposed method for covert communication of steganographic networks as well as general DNN models.", + "primary_area": "domain s of application", + "author": "Guobiao Li; Sheng Li; Meiling Li; Xinpeng Zhang; Zhenxing Qian", + "authorids": "", + "aff": "Fudan University; Fudan University; Fudan University; Fudan University; Fudan University", + "bibtex": "@article{Li_Li_Li_Zhang_Qian_2023, title={Steganography of Steganographic Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25647}, DOI={10.1609/aaai.v37i4.25647}, abstractNote={Steganography is a technique for covert communication between two parties. With the rapid development of deep neural networks (DNN), more and more steganographic networks are proposed recently, which are shown to be promising to achieve good performance. Unlike the traditional handcrafted steganographic tools, a steganographic network is relatively large in size. It raises concerns on how to covertly transmit the steganographic network in public channels, which is a crucial stage in the pipeline of steganography in real world applications. To address such an issue, we propose a novel scheme for steganography of steganographic networks in this paper. Unlike the existing steganographic schemes which focus on the subtle modification of the cover data to accommodate the secrets. We propose to disguise a steganographic network (termed as the secret DNN model) into a stego DNN model which performs an ordinary machine learning task (termed as the stego task). During the model disguising, we select and tune a subset of filters in the secret DNN model to preserve its function on the secret task, where the remaining filters are reactivated according to a partial optimization strategy to disguise the whole secret DNN model into a stego DNN model. The secret DNN model can be recovered from the stego DNN model when needed. Various experiments have been conducted to demonstrate the advantage of our proposed method for covert communication of steganographic networks as well as general DNN models.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Guobiao and Li, Sheng and Li, Meiling and Zhang, Xinpeng and Qian, Zhenxing}, year={2023}, month={Jun.}, pages={5178-5186} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25647/25419", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25647", + "pdf_size": 381421, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12921205586685572710&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26050", + "title": "Stepdown SLOPE for Controlled Feature Selection", + "track": "main", + "status": "Technical", + "abstract": "Sorted L-One Penalized Estimation (SLOPE) has shown the nice theoretical property as well as empirical behavior recently on the false discovery rate (FDR) control of high-dimensional feature selection by adaptively imposing the non-increasing sequence of tuning parameters on the sorted L1 penalties. This paper goes beyond the previous concern limited to the FDR control by considering the stepdown-based SLOPE in order to control the probability of k or more false rejections (k-FWER) and the false discovery proportion (FDP). Two new SLOPEs, called k-SLOPE and F-SLOPE, are proposed to realize k-FWER and FDP control respectively, where the stepdown procedure is injected into the SLOPE scheme. For the proposed stepdown SLOPEs, we establish their theoretical guarantees on controlling k-FWER and FDP under the orthogonal design setting, and also provide an intuitive guideline for the choice of regularization parameter sequence in much general setting. Empirical evaluations on simulated data validate the effectiveness of our approaches on controlled feature selection and support our theoretical findings.", + "primary_area": "machine learning ii", + "author": "Jingxuan Liang; Xuelin Zhang; Hong Chen; Weifu Li; Xin Tang", + "authorids": "", + "aff": "College of Science, Huazhong Agricultural University; College of Informatics, Huazhong Agricultural University; College of Science, Huazhong Agricultural University + Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education + Key Laboratory of Smart Farming for Agricultural Animals + Hubei Engineering Technology Research Center of Agricultural Big Data; College of Science, Huazhong Agricultural University + Engineering Research Center of Intelligent Technology for Agriculture, Ministry of Education + Key Laboratory of Smart Farming for Agricultural Animals + Hubei Engineering Technology Research Center of Agricultural Big Data; Ping An Property & Casualty Insurance Company", + "bibtex": "@article{Liang_Zhang_Chen_Li_Tang_2023, title={Stepdown SLOPE for Controlled Feature Selection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26050}, DOI={10.1609/aaai.v37i7.26050}, abstractNote={Sorted L-One Penalized Estimation (SLOPE) has shown the nice theoretical property as well as empirical behavior recently on the false discovery rate (FDR) control of high-dimensional feature selection by adaptively imposing the non-increasing sequence of tuning parameters on the sorted L1 penalties. This paper goes beyond the previous concern limited to the FDR control by considering the stepdown-based SLOPE in order to control the probability of k or more false rejections (k-FWER) and the false discovery proportion (FDP). Two new SLOPEs, called k-SLOPE and F-SLOPE, are proposed to realize k-FWER and FDP control respectively, where the stepdown procedure is injected into the SLOPE scheme. For the proposed stepdown SLOPEs, we establish their theoretical guarantees on controlling k-FWER and FDP under the orthogonal design setting, and also provide an intuitive guideline for the choice of regularization parameter sequence in much general setting. Empirical evaluations on simulated data validate the effectiveness of our approaches on controlled feature selection and support our theoretical findings.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Jingxuan and Zhang, Xuelin and Chen, Hong and Li, Weifu and Tang, Xin}, year={2023}, month={Jun.}, pages={8728-8736} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26050/25822", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26050", + "pdf_size": 358754, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:qsdmO2Hgj0QJ:scholar.google.com/&scioq=Stepdown+SLOPE+for+Controlled+Feature+Selection&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff_domain": "mail.hzau.edu.cn; ;mail.hzau.edu.cn; ; ", + "email": "mail.hzau.edu.cn; ;mail.hzau.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1+2+3;0+1+2+3;4", + "aff_unique_norm": "Huazhong Agricultural University;Engineering Research Center of Intelligent Technology for Agriculture;Key Laboratory of Smart Farming for Agricultural Animals;Hubei Engineering Technology Research Center;Ping An Property & Casualty Insurance Company", + "aff_unique_dep": "College of Science;Ministry of Education;;Agricultural Big Data;", + "aff_unique_url": "http://www.hzau.edu.cn/;;;;https://www.pingan.com", + "aff_unique_abbr": "HAU;;;;Ping An", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0+0+0;0+0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25268", + "title": "StereoDistill: Pick the Cream from LiDAR for Distilling Stereo-Based 3D Object Detection", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we propose a cross-modal distillation method named StereoDistill to narrow the gap between the stereo and LiDAR-based approaches via distilling the stereo detectors from the superior LiDAR model at the response level, which is usually overlooked in 3D object detection distillation.\nThe key designs of StereoDistill are: the X-component Guided Distillation~(XGD) for regression and the Cross-anchor Logit Distillation~(CLD) for classification. In XGD, instead of empirically adopting a threshold to select the high-quality teacher predictions as soft targets, we decompose the predicted 3D box into \nsub-components and retain the corresponding part for distillation if the teacher component pilot is consistent with ground truth to largely boost the number of positive predictions and alleviate the mimicking difficulty of the student model. For CLD, we aggregate the probability distribution of all anchors at the same position to encourage the highest probability anchor rather than individually distill the distribution at the anchor level. \nFinally, our StereoDistill achieves state-of-the-art results for stereo-based 3D detection on the KITTI test benchmark and extensive experiments on KITTI and Argoverse Dataset validate the effectiveness.", + "primary_area": "computer vision ii", + "author": "Zhe Liu; Xiaoqing Ye; Xiao Tan; Errui Ding; Xiang Bai", + "authorids": "", + "aff": "Huazhong University of Science and Technology; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Huazhong University of Science and Technology", + "bibtex": "@article{Liu_Ye_Tan_Ding_Bai_2023, title={StereoDistill: Pick the Cream from LiDAR for Distilling Stereo-Based 3D Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25268}, DOI={10.1609/aaai.v37i2.25268}, abstractNote={In this paper, we propose a cross-modal distillation method named StereoDistill to narrow the gap between the stereo and LiDAR-based approaches via distilling the stereo detectors from the superior LiDAR model at the response level, which is usually overlooked in 3D object detection distillation.\nThe key designs of StereoDistill are: the X-component Guided Distillation~(XGD) for regression and the Cross-anchor Logit Distillation~(CLD) for classification. In XGD, instead of empirically adopting a threshold to select the high-quality teacher predictions as soft targets, we decompose the predicted 3D box into sub-components and retain the corresponding part for distillation if the teacher component pilot is consistent with ground truth to largely boost the number of positive predictions and alleviate the mimicking difficulty of the student model. For CLD, we aggregate the probability distribution of all anchors at the same position to encourage the highest probability anchor rather than individually distill the distribution at the anchor level. Finally, our StereoDistill achieves state-of-the-art results for stereo-based 3D detection on the KITTI test benchmark and extensive experiments on KITTI and Argoverse Dataset validate the effectiveness.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zhe and Ye, Xiaoqing and Tan, Xiao and Ding, Errui and Bai, Xiang}, year={2023}, month={Jun.}, pages={1790-1798} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25268/25040", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25268", + "pdf_size": 1158482, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1263834407598498855&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff_domain": "hust.edu.cn;whu.edu.cn;gmail.com;baidu.com;hust.edu.cn", + "email": "hust.edu.cn;whu.edu.cn;gmail.com;baidu.com;hust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Baidu Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hust.edu.cn;https://www.baidu.com", + "aff_unique_abbr": "HUST;Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26140", + "title": "Stochastic Contextual Bandits with Long Horizon Rewards", + "track": "main", + "status": "Technical", + "abstract": "The growing interest in complex decision-making and language modeling problems highlights the importance of sample-efficient learning over very long horizons. This work takes a step in this direction by investigating contextual linear bandits where the current reward depends on at most s prior actions and contexts (not necessarily consecutive), up to a time horizon of h. In order to avoid polynomial dependence on h, we propose new algorithms that leverage sparsity to discover the dependence pattern and arm parameters jointly. We consider both the data-poor (T= h) regimes and derive respective regret upper bounds O(d square-root(sT) +min(q, T) and O( square-root(sdT) ), with sparsity s, feature dimension d, total time horizon T, and q that is adaptive to the reward dependence pattern. Complementing upper bounds, we also show that learning over a single trajectory brings inherent challenges: While the dependence pattern and arm parameters form a rank-1 matrix, circulant matrices are not isometric over rank-1 manifolds and sample complexity indeed benefits from the sparse reward dependence structure. Our results necessitate a new analysis to address long-range temporal dependencies across data and avoid polynomial dependence on the reward horizon h. Specifically, we utilize connections to the restricted isometry property of circulant matrices formed by dependent sub-Gaussian vectors and establish new guarantees that are also of independent interest.", + "primary_area": "machine learning iii", + "author": "Yuzhen Qin; Yingcong Li; Fabio Pasqualetti; Maryam Fazel; Samet Oymak", + "authorids": "", + "aff": "University of California, Riverside; University of California, Riverside; University of California, Riverside; University of Washington; University of Michigan", + "bibtex": "@article{Qin_Li_Pasqualetti_Fazel_Oymak_2023, title={Stochastic Contextual Bandits with Long Horizon Rewards}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26140}, DOI={10.1609/aaai.v37i8.26140}, abstractNote={The growing interest in complex decision-making and language modeling problems highlights the importance of sample-efficient learning over very long horizons. This work takes a step in this direction by investigating contextual linear bandits where the current reward depends on at most s prior actions and contexts (not necessarily consecutive), up to a time horizon of h. In order to avoid polynomial dependence on h, we propose new algorithms that leverage sparsity to discover the dependence pattern and arm parameters jointly. We consider both the data-poor (T<h) and data-rich (T>= h) regimes and derive respective regret upper bounds O(d square-root(sT) +min(q, T) and O( square-root(sdT) ), with sparsity s, feature dimension d, total time horizon T, and q that is adaptive to the reward dependence pattern. Complementing upper bounds, we also show that learning over a single trajectory brings inherent challenges: While the dependence pattern and arm parameters form a rank-1 matrix, circulant matrices are not isometric over rank-1 manifolds and sample complexity indeed benefits from the sparse reward dependence structure. Our results necessitate a new analysis to address long-range temporal dependencies across data and avoid polynomial dependence on the reward horizon h. Specifically, we utilize connections to the restricted isometry property of circulant matrices formed by dependent sub-Gaussian vectors and establish new guarantees that are also of independent interest.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Yuzhen and Li, Yingcong and Pasqualetti, Fabio and Fazel, Maryam and Oymak, Samet}, year={2023}, month={Jun.}, pages={9525-9533} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26140/25912", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26140", + "pdf_size": 1088361, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6636537124991681507&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "ucr.edu;ucr.edu;engr.ucr.edu;uw.edu;umich.edu", + "email": "ucr.edu;ucr.edu;engr.ucr.edu;uw.edu;umich.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "University of California, Riverside;University of Washington;University of Michigan", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucr.edu;https://www.washington.edu;https://www.umich.edu", + "aff_unique_abbr": "UCR;UW;UM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Riverside;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25421", + "title": "Stop-Gradient Softmax Loss for Deep Metric Learning", + "track": "main", + "status": "Technical", + "abstract": "Deep metric learning aims to learn a feature space that models the similarity between images, and feature normalization is a critical step for boosting performance. However directly optimizing L2-normalized softmax loss cause the network to fail to converge. Therefore some SOTA approaches appends a scale layer after the inner product to relieve the convergence problem, but it incurs a new problem that it's difficult to learn the best scaling parameters. In this letter, we look into the characteristic of softmax-based approaches and propose a novel learning objective function Stop-Gradient Softmax Loss (SGSL) to solve the convergence problem in softmax-based deep metric learning with L2-normalization. In addition, we found a useful trick named Remove the last BN-ReLU (RBR). It removes the last BN-ReLU in the backbone to reduce the learning burden of the model.\nExperimental results on four fine-grained image retrieval benchmarks show that our proposed approach outperforms most existing approaches, i.e., our approach achieves 75.9% on CUB-200-2011, 94.7% on CARS196 and 83.1% on SOP which outperforms other approaches at least 1.7%, 2.9% and 1.7% on Recall@1.", + "primary_area": "computer vision iii", + "author": "Lu Yang; Peng Wang; Yanning Zhang", + "authorids": "", + "aff": "School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China+National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, China; School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China+National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, China; School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China+National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, China", + "bibtex": "@article{Yang_Wang_Zhang_2023, title={Stop-Gradient Softmax Loss for Deep Metric Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25421}, DOI={10.1609/aaai.v37i3.25421}, abstractNote={Deep metric learning aims to learn a feature space that models the similarity between images, and feature normalization is a critical step for boosting performance. However directly optimizing L2-normalized softmax loss cause the network to fail to converge. Therefore some SOTA approaches appends a scale layer after the inner product to relieve the convergence problem, but it incurs a new problem that it\u2019s difficult to learn the best scaling parameters. In this letter, we look into the characteristic of softmax-based approaches and propose a novel learning objective function Stop-Gradient Softmax Loss (SGSL) to solve the convergence problem in softmax-based deep metric learning with L2-normalization. In addition, we found a useful trick named Remove the last BN-ReLU (RBR). It removes the last BN-ReLU in the backbone to reduce the learning burden of the model.\nExperimental results on four fine-grained image retrieval benchmarks show that our proposed approach outperforms most existing approaches, i.e., our approach achieves 75.9% on CUB-200-2011, 94.7% on CARS196 and 83.1% on SOP which outperforms other approaches at least 1.7%, 2.9% and 1.7% on Recall@1.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Lu and Wang, Peng and Zhang, Yanning}, year={2023}, month={Jun.}, pages={3164-3172} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25421/25193", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25421", + "pdf_size": 687748, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16737300966105943342&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.nwpu.edu.cn;nwpu.edu.cn;nwpu.edu.cn", + "email": "mail.nwpu.edu.cn;nwpu.edu.cn;nwpu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Northwestern Polytechnical University;National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.nwpu.edu.cn;", + "aff_unique_abbr": "NPU;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25165", + "title": "Store and Fetch Immediately: Everything Is All You Need for Space-Time Video Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Existing space-time video super-resolution (ST-VSR) methods fail to achieve high-quality reconstruction since they fail to fully explore the spatial-temporal correlations, long-range components in particular. Although the recurrent structure for ST-VSR adopts bidirectional propagation to aggregate information from the entire video, collecting the temporal information between the past and future via one-stage representations inevitably loses the long-range relations. To alleviate the limitation, this paper proposes an immediate storeand-fetch network to promote long-range correlation learning, where the stored information from the past and future can be refetched to help the representation of the current frame. Specifically, the proposed network consists of two modules: a backward recurrent module (BRM) and a forward recurrent module (FRM). The former first performs backward inference from future to past, while storing future super-resolution (SR) information for each frame. Following that, the latter performs forward inference from past to future to super-resolve all frames, while storing past SR information for each frame. Since FRM inherits SR information from BRM, therefore, spatial and temporal information from the entire video sequence is immediately stored and fetched, which allows drastic improvement for ST-VSR. Extensive experiments both on ST-VSR and space video super-resolution (S-VSR) as well as time video super-resolution (T-VSR) have demonstrated the effectiveness of our proposed method over other state-of-the-art methods on public datasets. Code is available https://github.com/hhhhhumengshun/SFI-STVR", + "primary_area": "computer vision i", + "author": "Mengshun Hu; Kui Jiang; Zhixiang Nie; Jiahuan Zhou; Zheng Wang", + "authorids": "", + "aff": "School of Computer Science, Wuhan University; Huawei Technologies, Cloud BU; School of Computer Science, Wuhan University; Wangxuan Institute of Computer Technology, Peking University; School of Computer Science, Wuhan University", + "bibtex": "@article{Hu_Jiang_Nie_Zhou_Wang_2023, title={Store and Fetch Immediately: Everything Is All You Need for Space-Time Video Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25165}, DOI={10.1609/aaai.v37i1.25165}, abstractNote={Existing space-time video super-resolution (ST-VSR) methods fail to achieve high-quality reconstruction since they fail to fully explore the spatial-temporal correlations, long-range components in particular. Although the recurrent structure for ST-VSR adopts bidirectional propagation to aggregate information from the entire video, collecting the temporal information between the past and future via one-stage representations inevitably loses the long-range relations. To alleviate the limitation, this paper proposes an immediate storeand-fetch network to promote long-range correlation learning, where the stored information from the past and future can be refetched to help the representation of the current frame. Specifically, the proposed network consists of two modules: a backward recurrent module (BRM) and a forward recurrent module (FRM). The former first performs backward inference from future to past, while storing future super-resolution (SR) information for each frame. Following that, the latter performs forward inference from past to future to super-resolve all frames, while storing past SR information for each frame. Since FRM inherits SR information from BRM, therefore, spatial and temporal information from the entire video sequence is immediately stored and fetched, which allows drastic improvement for ST-VSR. Extensive experiments both on ST-VSR and space video super-resolution (S-VSR) as well as time video super-resolution (T-VSR) have demonstrated the effectiveness of our proposed method over other state-of-the-art methods on public datasets. Code is available https://github.com/hhhhhumengshun/SFI-STVR}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Mengshun and Jiang, Kui and Nie, Zhixiang and Zhou, Jiahuan and Wang, Zheng}, year={2023}, month={Jun.}, pages={863-871} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25165/24937", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25165", + "pdf_size": 447229, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7317037773272615321&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "whu.edu.cn;huawei.com;whu.edu.cn;pku.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;huawei.com;whu.edu.cn;pku.edu.cn;whu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2;0", + "aff_unique_norm": "Wuhan University;Huawei Technologies;Peking University", + "aff_unique_dep": "School of Computer Science;Cloud BU;Wangxuan Institute of Computer Technology", + "aff_unique_url": "http://www.whu.edu.cn;https://www.huawei.com;http://www.pku.edu.cn", + "aff_unique_abbr": "WHU;Huawei;PKU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25709", + "title": "Strategic Facility Location with Clients That Minimize Total Waiting Time", + "track": "main", + "status": "Technical", + "abstract": "We study a non-cooperative two-sided facility location game in which facilities and clients behave strategically.\nThis is in contrast to many other facility location games in which clients simply visit their closest facility.\nFacility agents select a location on a graph to open a facility to attract as much purchasing power as possible, while client agents choose which facilities to patronize by strategically distributing their purchasing power in order to minimize their total waiting time. Here, the waiting time of a facility depends on its received total purchasing power. \nWe show that our client stage is an atomic splittable congestion game, which implies existence, uniqueness and efficient computation of a client equilibrium.\nTherefore, facility agents can efficiently predict client behavior and make strategic decisions accordingly.\nDespite that, we prove that subgame perfect equilibria do not exist in all instances of this game and that their existence is NP-hard to decide.\nOn the positive side, we provide a simple and efficient algorithm to compute 3-approximate subgame perfect equilibria.", + "primary_area": "game theory and economic paradigms", + "author": "Simon Krogmann; Pascal Lenzner; Alexander Skopalik", + "authorids": "", + "aff": "Hasso Plattner Institute, University of Potsdam; Hasso Plattner Institute, University of Potsdam; Department of Applied Mathematics, University of Twente", + "bibtex": "@article{Krogmann_Lenzner_Skopalik_2023, title={Strategic Facility Location with Clients That Minimize Total Waiting Time}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25709}, DOI={10.1609/aaai.v37i5.25709}, abstractNote={We study a non-cooperative two-sided facility location game in which facilities and clients behave strategically.\nThis is in contrast to many other facility location games in which clients simply visit their closest facility.\nFacility agents select a location on a graph to open a facility to attract as much purchasing power as possible, while client agents choose which facilities to patronize by strategically distributing their purchasing power in order to minimize their total waiting time. Here, the waiting time of a facility depends on its received total purchasing power. We show that our client stage is an atomic splittable congestion game, which implies existence, uniqueness and efficient computation of a client equilibrium.\nTherefore, facility agents can efficiently predict client behavior and make strategic decisions accordingly.\nDespite that, we prove that subgame perfect equilibria do not exist in all instances of this game and that their existence is NP-hard to decide.\nOn the positive side, we provide a simple and efficient algorithm to compute 3-approximate subgame perfect equilibria.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Krogmann, Simon and Lenzner, Pascal and Skopalik, Alexander}, year={2023}, month={Jun.}, pages={5714-5721} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25709/25481", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25709", + "pdf_size": 163121, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12668267648969322176&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 12, + "aff_domain": "hpi.de;hpi.de;utwente.nl", + "email": "hpi.de;hpi.de;utwente.nl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Hasso Plattner Institute;University of Twente", + "aff_unique_dep": ";Department of Applied Mathematics", + "aff_unique_url": "https://www.hpi.de;https://www.utwente.nl", + "aff_unique_abbr": "HPI;UT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Potsdam;", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Germany;Netherlands" + }, + { + "id": "article-25694", + "title": "Strategyproofness and Proportionality in Party-Approval Multiwinner Elections", + "track": "main", + "status": "Technical", + "abstract": "In party-approval multiwinner elections the goal is to allocate the seats of a fixed-size committee to parties based on the approval ballots of the voters over the parties. In particular, each voter can approve multiple parties and each party can be assigned multiple seats. Two central requirements in this setting are proportional representation and strategyproofness. Intuitively, proportional representation requires that every sufficiently large group of voters with similar preferences is represented in the committee. Strategyproofness demands that no voter can benefit by misreporting her true preferences. We show that these two axioms are incompatible for anonymous party-approval multiwinner voting rules, thus proving a far-reaching impossibility theorem. The proof of this result is obtained by formulating the problem in propositional logic and then letting a SAT solver show that the formula is unsatisfiable. Additionally, we demonstrate how to circumvent this impossibility by considering a weakening of strategyproofness which requires that only voters who do not approve any elected party cannot manipulate. While most common voting rules fail even this weak notion of strategyproofness, we characterize Chamberlin-Courant approval voting within the class of Thiele rules based on this strategyproofness notion.", + "primary_area": "game theory and economic paradigms", + "author": "Th\u00e9o Delemazure; Tom Demeulemeester; Manuel Eberl; Jonas Israel; Patrick Lederer", + "authorids": "", + "aff": "Paris Dauphine University, PSL, CNRS, France; Research Center for Operations Research & Statistics, KU Leuven, Belgium; Computational Logic Group, University of Innsbruck, Austria; Research Group Efficient Algorithms, Technische Universit \u00a8at Berlin, Germany; Technical University of Munich, Germany", + "bibtex": "@article{Delemazure_Demeulemeester_Eberl_Israel_Lederer_2023, title={Strategyproofness and Proportionality in Party-Approval Multiwinner Elections}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25694}, DOI={10.1609/aaai.v37i5.25694}, abstractNote={In party-approval multiwinner elections the goal is to allocate the seats of a fixed-size committee to parties based on the approval ballots of the voters over the parties. In particular, each voter can approve multiple parties and each party can be assigned multiple seats. Two central requirements in this setting are proportional representation and strategyproofness. Intuitively, proportional representation requires that every sufficiently large group of voters with similar preferences is represented in the committee. Strategyproofness demands that no voter can benefit by misreporting her true preferences. We show that these two axioms are incompatible for anonymous party-approval multiwinner voting rules, thus proving a far-reaching impossibility theorem. The proof of this result is obtained by formulating the problem in propositional logic and then letting a SAT solver show that the formula is unsatisfiable. Additionally, we demonstrate how to circumvent this impossibility by considering a weakening of strategyproofness which requires that only voters who do not approve any elected party cannot manipulate. While most common voting rules fail even this weak notion of strategyproofness, we characterize Chamberlin-Courant approval voting within the class of Thiele rules based on this strategyproofness notion.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Delemazure, Th\u00e9o and Demeulemeester, Tom and Eberl, Manuel and Israel, Jonas and Lederer, Patrick}, year={2023}, month={Jun.}, pages={5591-5599} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25694/25466", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25694", + "pdf_size": 164351, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=124270978320617546&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 16, + "aff_domain": "dauphine.eu;kuleuven.be;uibk.ac.at;tu-berlin.de;in.tum.de", + "email": "dauphine.eu;kuleuven.be;uibk.ac.at;tu-berlin.de;in.tum.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Paris Dauphine University;KU Leuven;University of Innsbruck;Technische Universit\u00e4t Berlin;Technical University of Munich", + "aff_unique_dep": ";Research Center for Operations Research & Statistics;Computational Logic Group;Research Group Efficient Algorithms;", + "aff_unique_url": "https://www.univ-paris-dauphine.fr;https://www.kuleuven.be;https://www.uibk.ac.at;https://www.tu-berlin.de;https://www.tum.de", + "aff_unique_abbr": "Paris Dauphine;KU Leuven;;TU Berlin;TUM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3;3", + "aff_country_unique": "France;Belgium;Austria;Germany" + }, + { + "id": "article-25220", + "title": "Stroke Extraction of Chinese Character Based on Deep Structure Deformable Image Registration", + "track": "main", + "status": "Technical", + "abstract": "Stroke extraction of Chinese characters plays an important role in the field of character recognition and generation. The most existing character stroke extraction methods focus on image morphological features. These methods usually lead to errors of cross strokes extraction and stroke matching due to rarely using stroke semantics and prior information. In this paper, we propose a deep learning-based character stroke extraction method that takes semantic features and prior information of strokes into consideration. This method consists of three parts: image registration-based stroke registration that establishes the rough registration of the reference strokes and the target as prior information; image semantic segmentation-based stroke segmentation that preliminarily separates target strokes into seven categories; and high-precision extraction of single strokes. In the stroke registration, we propose a structure deformable image registration network to achieve structure-deformable transformation while maintaining the stable morphology of single strokes for character images with complex structures. In order to verify the effectiveness of the method, we construct two datasets respectively for calligraphy characters and regular handwriting characters. The experimental results show that our method strongly outperforms the baselines. Code is available at https://github.com/MengLi-l1/StrokeExtraction.", + "primary_area": "computer vision i", + "author": "Meng Li; Yahan Yu; Yi Yang; Guanghao Ren; Jian Wang", + "authorids": "", + "aff": "Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences + School of Artificial Intelligence, University of Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences; Institute of Automation, Chinese Academy of Sciences", + "bibtex": "@article{Li_Yu_Yang_Ren_Wang_2023, title={Stroke Extraction of Chinese Character Based on Deep Structure Deformable Image Registration}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25220}, DOI={10.1609/aaai.v37i1.25220}, abstractNote={Stroke extraction of Chinese characters plays an important role in the field of character recognition and generation. The most existing character stroke extraction methods focus on image morphological features. These methods usually lead to errors of cross strokes extraction and stroke matching due to rarely using stroke semantics and prior information. In this paper, we propose a deep learning-based character stroke extraction method that takes semantic features and prior information of strokes into consideration. This method consists of three parts: image registration-based stroke registration that establishes the rough registration of the reference strokes and the target as prior information; image semantic segmentation-based stroke segmentation that preliminarily separates target strokes into seven categories; and high-precision extraction of single strokes. In the stroke registration, we propose a structure deformable image registration network to achieve structure-deformable transformation while maintaining the stable morphology of single strokes for character images with complex structures. In order to verify the effectiveness of the method, we construct two datasets respectively for calligraphy characters and regular handwriting characters. The experimental results show that our method strongly outperforms the baselines. Code is available at https://github.com/MengLi-l1/StrokeExtraction.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Meng and Yu, Yahan and Yang, Yi and Ren, Guanghao and Wang, Jian}, year={2023}, month={Jun.}, pages={1360-1367} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25220/24992", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25220", + "pdf_size": 9252273, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1155725780755400388&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn;ia.ac.cn", + "github": "https://github.com/MengLi-l1/StrokeExtraction", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26428", + "title": "Structurally Restricted Fragments of Numeric Planning \u2013 a Complexity Analysis", + "track": "main", + "status": "Technical", + "abstract": "Numeric planning is known to be undecidable even under severe restrictions. Prior work has investigated the decidability boundaries by restricting the expressiveness of the planning formalism in terms of the numeric functions allowed in conditions and effects. We study a well-known restricted form of Hoffmann's simple numeric planning, which is undecidable. We analyze the complexity by imposing restrictions on the causal structure, exploiting a novel method for bounding variable domain sizes. First, we show that plan existence for tasks where all numeric variables are root nodes in the causal graph is in PSPACE.\nSecond, we show that for tasks with only numeric leaf variables the problem is decidable, and that it is in PSPACE if the propositional state space has a fixed size. Our work lays a strong foundation for future investigations of structurally more complex tasks. From a practical perspective, our method allows to employ heuristics and methods that are geared towards finite variable domains (such as pattern database heuristics or decoupled search) to solve non-trivial families of numeric planning problems.", + "primary_area": "planning routing and scheduling", + "author": "Alexander Shleyfman; Daniel Gnad; Peter Jonsson", + "authorids": "", + "aff": "The Department of Computer Science, Bar-Ilan University, Ramat Gan, Israel; Department of Computer and Information Science, Link \u00a8oping University, Link \u00a8oping, Sweden; Department of Computer and Information Science, Link \u00a8oping University, Link \u00a8oping, Sweden", + "bibtex": "@article{Shleyfman_Gnad_Jonsson_2023, title={Structurally Restricted Fragments of Numeric Planning \u2013 a Complexity Analysis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26428}, DOI={10.1609/aaai.v37i10.26428}, abstractNote={Numeric planning is known to be undecidable even under severe restrictions. Prior work has investigated the decidability boundaries by restricting the expressiveness of the planning formalism in terms of the numeric functions allowed in conditions and effects. We study a well-known restricted form of Hoffmann\u2019s simple numeric planning, which is undecidable. We analyze the complexity by imposing restrictions on the causal structure, exploiting a novel method for bounding variable domain sizes. First, we show that plan existence for tasks where all numeric variables are root nodes in the causal graph is in PSPACE.\nSecond, we show that for tasks with only numeric leaf variables the problem is decidable, and that it is in PSPACE if the propositional state space has a fixed size. Our work lays a strong foundation for future investigations of structurally more complex tasks. From a practical perspective, our method allows to employ heuristics and methods that are geared towards finite variable domains (such as pattern database heuristics or decoupled search) to solve non-trivial families of numeric planning problems.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shleyfman, Alexander and Gnad, Daniel and Jonsson, Peter}, year={2023}, month={Jun.}, pages={12112-12119} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26428/26200", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26428", + "pdf_size": 167305, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1786904207834417828&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "biu.ac.il;liu.se;liu.se", + "email": "biu.ac.il;liu.se;liu.se", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Bar-Ilan University;Link\u00f6ping University", + "aff_unique_dep": "Department of Computer Science;Department of Computer and Information Science", + "aff_unique_url": "https://www.biu.ac.il;https://www.liu.se", + "aff_unique_abbr": "BIU;LiU", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Ramat Gan;Link\u00f6ping", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Israel;Sweden" + }, + { + "id": "article-25595", + "title": "Structure Aware Incremental Learning with Personalized Imitation Weights for Recommender Systems", + "track": "main", + "status": "Technical", + "abstract": "Recommender systems now consume large-scale data and play a significant role in improving user experience. Graph Neural Networks (GNNs) have emerged as one of the most effective recommender system models because they model the rich relational information. The ever-growing volume of data can make training GNNs prohibitively expensive. To address this, previous attempts propose to train the GNN models incrementally as new data blocks arrive. \nFeature and structure knowledge distillation techniques have been explored to allow the GNN model to train in a fast incremental fashion while alleviating the catastrophic forgetting problem. \nHowever, preserving the same amount of the historical information for all users is sub-optimal since it fails to take into account the dynamics of each user's change of preferences. \nFor the users whose interests shift substantially, retaining too much of the old knowledge can overly constrain the model, preventing it from quickly adapting to the users\u2019 novel interests. \nIn contrast, for users who have static preferences, model performance can benefit greatly from preserving as much of the user's long-term preferences as possible.\nIn this work, we propose a novel training strategy that adaptively learns personalized imitation weights for each user to balance the contribution from the recent data and the amount of knowledge to be distilled from previous time periods.\nWe demonstrate the effectiveness of learning imitation weights via a comparison on five diverse datasets for three state-of-art structure distillation based recommender systems. The performance shows consistent improvement over competitive incremental learning techniques.", + "primary_area": "data mining and knowledge management", + "author": "Yuening Wang; Yingxue Zhang; Antonios Valkanas; Ruiming Tang; Chen Ma; Jianye Hao; Mark Coates", + "authorids": "", + "aff": "Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; McGill University; Huawei Noah\u2019s Ark Lab; City University of Hong Kong; Huawei Noah\u2019s Ark Lab + Tianjin University; McGill University", + "bibtex": "@article{Wang_Zhang_Valkanas_Tang_Ma_Hao_Coates_2023, title={Structure Aware Incremental Learning with Personalized Imitation Weights for Recommender Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25595}, DOI={10.1609/aaai.v37i4.25595}, abstractNote={Recommender systems now consume large-scale data and play a significant role in improving user experience. Graph Neural Networks (GNNs) have emerged as one of the most effective recommender system models because they model the rich relational information. The ever-growing volume of data can make training GNNs prohibitively expensive. To address this, previous attempts propose to train the GNN models incrementally as new data blocks arrive. Feature and structure knowledge distillation techniques have been explored to allow the GNN model to train in a fast incremental fashion while alleviating the catastrophic forgetting problem. However, preserving the same amount of the historical information for all users is sub-optimal since it fails to take into account the dynamics of each user\u2019s change of preferences. For the users whose interests shift substantially, retaining too much of the old knowledge can overly constrain the model, preventing it from quickly adapting to the users\u2019 novel interests. In contrast, for users who have static preferences, model performance can benefit greatly from preserving as much of the user\u2019s long-term preferences as possible.\nIn this work, we propose a novel training strategy that adaptively learns personalized imitation weights for each user to balance the contribution from the recent data and the amount of knowledge to be distilled from previous time periods.\nWe demonstrate the effectiveness of learning imitation weights via a comparison on five diverse datasets for three state-of-art structure distillation based recommender systems. The performance shows consistent improvement over competitive incremental learning techniques.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yuening and Zhang, Yingxue and Valkanas, Antonios and Tang, Ruiming and Ma, Chen and Hao, Jianye and Coates, Mark}, year={2023}, month={Jun.}, pages={4711-4719} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25595/25367", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25595", + "pdf_size": 1495498, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9105657750642374206&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "huawei.com;huawei.com;mail.mcgill.ca;huawei.com;cityu.edu.hk;huawei.com;mcgill.ca", + "email": "huawei.com;huawei.com;mail.mcgill.ca;huawei.com;cityu.edu.hk;huawei.com;mcgill.ca", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;2;0+3;1", + "aff_unique_norm": "Huawei;McGill University;City University of Hong Kong;Tianjin University", + "aff_unique_dep": "Noah\u2019s Ark Lab;;;", + "aff_unique_url": "https://www.huawei.com;https://www.mcgill.ca;https://www.cityu.edu.hk;http://www.tju.edu.cn", + "aff_unique_abbr": "Huawei;McGill;CityU;TJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0+0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-25441", + "title": "Structure Flow-Guided Network for Real Depth Super-resolution", + "track": "main", + "status": "Technical", + "abstract": "Real depth super-resolution (DSR), unlike synthetic settings, is a challenging task due to the structural distortion and the edge noise caused by the natural degradation in real-world low-resolution (LR) depth maps. These defeats result in significant structure inconsistency between the depth map and the RGB guidance, which potentially confuses the RGB-structure guidance and thereby degrades the DSR quality. In this paper, we propose a novel structure flow-guided DSR framework, where a cross-modality flow map is learned to guide the RGB-structure information transferring for precise depth upsampling. Specifically, our framework consists of a cross-modality flow-guided upsampling network (CFUNet) and a flow-enhanced pyramid edge attention network (PEANet). CFUNet contains a trilateral self-attention module combining both the geometric and semantic correlations for reliable cross-modality flow learning. Then, the learned flow maps are combined with the grid-sampling mechanism for coarse high-resolution (HR) depth prediction. PEANet targets at integrating the learned flow map as the edge attention into a pyramid network to hierarchically learn the edge-focused guidance feature for depth edge refinement. Extensive experiments on real and synthetic DSR datasets verify that our approach achieves excellent performance compared to state-of-the-art methods. Our code is available at: https://github.com/Yuanjiayii/DSR-SFG.", + "primary_area": "computer vision iii", + "author": "Jiayi Yuan; Haobo Jiang; Xiang Li; Jianjun Qian; Jun Li; Jian Yang", + "authorids": "", + "aff": "PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education; Jiangsu Key Lab of Image and Video Understanding for Social Security; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China; PCA Lab, Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education; Jiangsu Key Lab of Image and Video Understanding for Social Security; School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China", + "bibtex": "@article{Yuan_Jiang_Li_Qian_Li_Yang_2023, title={Structure Flow-Guided Network for Real Depth Super-resolution}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25441}, DOI={10.1609/aaai.v37i3.25441}, abstractNote={Real depth super-resolution (DSR), unlike synthetic settings, is a challenging task due to the structural distortion and the edge noise caused by the natural degradation in real-world low-resolution (LR) depth maps. These defeats result in significant structure inconsistency between the depth map and the RGB guidance, which potentially confuses the RGB-structure guidance and thereby degrades the DSR quality. In this paper, we propose a novel structure flow-guided DSR framework, where a cross-modality flow map is learned to guide the RGB-structure information transferring for precise depth upsampling. Specifically, our framework consists of a cross-modality flow-guided upsampling network (CFUNet) and a flow-enhanced pyramid edge attention network (PEANet). CFUNet contains a trilateral self-attention module combining both the geometric and semantic correlations for reliable cross-modality flow learning. Then, the learned flow maps are combined with the grid-sampling mechanism for coarse high-resolution (HR) depth prediction. PEANet targets at integrating the learned flow map as the edge attention into a pyramid network to hierarchically learn the edge-focused guidance feature for depth edge refinement. Extensive experiments on real and synthetic DSR datasets verify that our approach achieves excellent performance compared to state-of-the-art methods. Our code is available at: https://github.com/Yuanjiayii/DSR-SFG.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Jiayi and Jiang, Haobo and Li, Xiang and Qian, Jianjun and Li, Jun and Yang, Jian}, year={2023}, month={Jun.}, pages={3340-3348} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25441/25213", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25441", + "pdf_size": 1374733, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17147551460140490893&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn;njust.edu.cn", + "github": "https://github.com/Yuanjiayii/DSR-SFG", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;1;2", + "aff_unique_norm": "PCA Lab;Jiangsu Key Lab of Image and Video Understanding for Social Security;Nanjing University of Science and Technology", + "aff_unique_dep": "Key Lab of Intelligent Perception and Systems for High-Dimensional Information;Image and Video Understanding for Social Security;School of Computer Science and Engineering", + "aff_unique_url": ";;http://www.nust.edu.cn", + "aff_unique_abbr": ";;NUST", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25877", + "title": "Structured BFGS Method for Optimal Doubly Stochastic Matrix Approximation", + "track": "main", + "status": "Technical", + "abstract": "Doubly stochastic matrix plays an essential role in several areas such as statistics and machine learning. In this paper we consider the optimal approximation of a square matrix in the set of doubly stochastic matrices. A structured BFGS method is proposed to solve the dual of the primal problem. The resulting algorithm builds curvature information into the diagonal components of the true Hessian, so that it takes only additional linear cost to obtain the descent direction based on the gradient information without having to explicitly store the inverse Hessian approximation. The cost is substantially fewer than quadratic complexity of the classical BFGS algorithm. Meanwhile, a Newton-based line search method is presented for finding a suitable step size, which in practice uses the existing knowledge and takes only one iteration. The global convergence of our algorithm is established. We verify the advantages of our approach on both synthetic data and real data sets. The experimental results demonstrate that our algorithm outperforms the state-of-the-art solvers and enjoys outstanding scalability.", + "primary_area": "machine learning i", + "author": "Dejun Chu; Changshui Zhang; Shiliang Sun; Qing Tao", + "authorids": "", + "aff": "School of Software, Hefei University of Technology; Department of Automation, Tsinghua University; School of Computer Science and Technology, East China Normal University; Army Academy of Artillery and Air Defense", + "bibtex": "@article{Chu_Zhang_Sun_Tao_2023, title={Structured BFGS Method for Optimal Doubly Stochastic Matrix Approximation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25877}, DOI={10.1609/aaai.v37i6.25877}, abstractNote={Doubly stochastic matrix plays an essential role in several areas such as statistics and machine learning. In this paper we consider the optimal approximation of a square matrix in the set of doubly stochastic matrices. A structured BFGS method is proposed to solve the dual of the primal problem. The resulting algorithm builds curvature information into the diagonal components of the true Hessian, so that it takes only additional linear cost to obtain the descent direction based on the gradient information without having to explicitly store the inverse Hessian approximation. The cost is substantially fewer than quadratic complexity of the classical BFGS algorithm. Meanwhile, a Newton-based line search method is presented for finding a suitable step size, which in practice uses the existing knowledge and takes only one iteration. The global convergence of our algorithm is established. We verify the advantages of our approach on both synthetic data and real data sets. The experimental results demonstrate that our algorithm outperforms the state-of-the-art solvers and enjoys outstanding scalability.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chu, Dejun and Zhang, Changshui and Sun, Shiliang and Tao, Qing}, year={2023}, month={Jun.}, pages={7193-7201} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25877/25649", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25877", + "pdf_size": 244033, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:3hZeeJ0lUgkJ:scholar.google.com/&scioq=Structured+BFGS+Method+for+Optimal+Doubly+Stochastic+Matrix+Approximation&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "gmail.com;mail.tsinghua.edu.cn;cs.ecnu.edu.cn;gmail.com", + "email": "gmail.com;mail.tsinghua.edu.cn;cs.ecnu.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Hefei University of Technology;Tsinghua University;East China Normal University;Army Academy of Artillery and Air Defense", + "aff_unique_dep": "School of Software;Department of Automation;School of Computer Science and Technology;", + "aff_unique_url": "https://www.hfut.edu.cn;https://www.tsinghua.edu.cn;http://www.ecnu.edu.cn;", + "aff_unique_abbr": ";THU;ECNU;", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Hefei;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26476", + "title": "Structured Case-Based Reasoning for Inference-Time Adaptation of Text-to-SQL Parsers", + "track": "main", + "status": "Technical", + "abstract": "Inference-time adaptation methods for semantic parsing are useful for leveraging examples from newly-observed domains without repeated fine-tuning. Existing approaches typically bias the decoder by simply concatenating input-output example pairs (cases) from the new domain at the encoder\u2019s input in a Seq-to-Seq model. Such methods cannot adequately leverage the structure of logical forms in the case examples. We propose StructCBR, a structured case-based reasoning approach, which leverages subtree-level similarity between logical forms of cases and candidate outputs, resulting in better decoder decisions. For the task of adapting Text-to-SQL models to unseen schemas, we show that exploiting case examples in a structured manner via StructCBR offers consistent performance improvements over prior inference-time adaptation methods across five different databases. To the best of our knowledge, we are the first to attempt inference-time adaptation of Text-to-SQL models, and harness trainable structured similarity between subqueries.", + "primary_area": "speech natural language processing", + "author": "Abhijeet Awasthi; Soumen Chakrabarti; Sunita Sarawagi", + "authorids": "", + "aff": "Department of Computer Science and Engineering, Indian Institute of Technology Bombay, Mumbai, India; Department of Computer Science and Engineering, Indian Institute of Technology Bombay, Mumbai, India; Department of Computer Science and Engineering, Indian Institute of Technology Bombay, Mumbai, India", + "bibtex": "@article{Awasthi_Chakrabarti_Sarawagi_2023, title={Structured Case-Based Reasoning for Inference-Time Adaptation of Text-to-SQL Parsers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26476}, DOI={10.1609/aaai.v37i11.26476}, abstractNote={Inference-time adaptation methods for semantic parsing are useful for leveraging examples from newly-observed domains without repeated fine-tuning. Existing approaches typically bias the decoder by simply concatenating input-output example pairs (cases) from the new domain at the encoder\u2019s input in a Seq-to-Seq model. Such methods cannot adequately leverage the structure of logical forms in the case examples. We propose StructCBR, a structured case-based reasoning approach, which leverages subtree-level similarity between logical forms of cases and candidate outputs, resulting in better decoder decisions. For the task of adapting Text-to-SQL models to unseen schemas, we show that exploiting case examples in a structured manner via StructCBR offers consistent performance improvements over prior inference-time adaptation methods across five different databases. To the best of our knowledge, we are the first to attempt inference-time adaptation of Text-to-SQL models, and harness trainable structured similarity between subqueries.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Awasthi, Abhijeet and Chakrabarti, Soumen and Sarawagi, Sunita}, year={2023}, month={Jun.}, pages={12536-12544} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26476/26248", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26476", + "pdf_size": 369993, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17502737134899403842&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Bombay", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.iitb.ac.in", + "aff_unique_abbr": "IIT Bombay", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Mumbai", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25473", + "title": "Style-Content Metric Learning for Multidomain Remote Sensing Object Recognition", + "track": "main", + "status": "Technical", + "abstract": "Previous remote sensing recognition approaches predominantly perform well on the training-testing dataset. However, due to large style discrepancies not only among multidomain datasets but also within a single domain, they suffer from obvious performance degradation when applied to unseen domains. In this paper, we propose a style-content metric learning framework to address the generalizable remote sensing object recognition issue. Specifically, we firstly design an inter-class dispersion metric to encourage the model to make decision based on content rather than the style, which is achieved by dispersing predictions generated from the contents of both positive sample and negative sample and the style of input image. Secondly, we propose an intra-class compactness metric to force the model to be less style-biased by compacting classifier's predictions from the content of input image and the styles of positive sample and negative sample. Lastly, we design an intra-class interaction metric to improve model's recognition accuracy by pulling in classifier's predictions obtained from the input image and positive sample. Extensive experiments on four datasets show that our style-content metric learning achieves superior generalization performance against the state-of-the-art competitors. Code and model are available at: https://github.com/wdzhao123/TSCM.", + "primary_area": "computer vision iii", + "author": "Wenda Zhao; Ruikai Yang; Yu Liu; You He", + "authorids": "", + "aff": "Dalian University of Technology; Dalian University of Technology; Tsinghua University; Tsinghua University", + "bibtex": "@article{Zhao_Yang_Liu_He_2023, title={Style-Content Metric Learning for Multidomain Remote Sensing Object Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25473}, DOI={10.1609/aaai.v37i3.25473}, abstractNote={Previous remote sensing recognition approaches predominantly perform well on the training-testing dataset. However, due to large style discrepancies not only among multidomain datasets but also within a single domain, they suffer from obvious performance degradation when applied to unseen domains. In this paper, we propose a style-content metric learning framework to address the generalizable remote sensing object recognition issue. Specifically, we firstly design an inter-class dispersion metric to encourage the model to make decision based on content rather than the style, which is achieved by dispersing predictions generated from the contents of both positive sample and negative sample and the style of input image. Secondly, we propose an intra-class compactness metric to force the model to be less style-biased by compacting classifier\u2019s predictions from the content of input image and the styles of positive sample and negative sample. Lastly, we design an intra-class interaction metric to improve model\u2019s recognition accuracy by pulling in classifier\u2019s predictions obtained from the input image and positive sample. Extensive experiments on four datasets show that our style-content metric learning achieves superior generalization performance against the state-of-the-art competitors. Code and model are available at: https://github.com/wdzhao123/TSCM.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Wenda and Yang, Ruikai and Liu, Yu and He, You}, year={2023}, month={Jun.}, pages={3624-3632} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25473/25245", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25473", + "pdf_size": 1033641, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16495684272779954238&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff_domain": "dlut.edu.cn;mail.dlut.edu.cn;126.com;163.com", + "email": "dlut.edu.cn;mail.dlut.edu.cn;126.com;163.com", + "github": "https://github.com/wdzhao123/TSCM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "Dalian University of Technology;Tsinghua University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.dlut.edu.cn/;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "DUT;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25280", + "title": "StyleTalk: One-Shot Talking Head Generation with Controllable Speaking Styles", + "track": "main", + "status": "Technical", + "abstract": "Different people speak with diverse personalized speaking styles. Although existing one-shot talking head methods have made significant progress in lip sync, natural facial expressions, and stable head motions, they still cannot generate diverse speaking styles in the final talking head videos. To tackle this problem, we propose a one-shot style-controllable talking face generation framework. In a nutshell, we aim to attain a speaking style from an arbitrary reference speaking video and then drive the one-shot portrait to speak with the reference speaking style and another piece of audio. Specifically, we first develop a style encoder to extract dynamic facial motion patterns of a style reference video and then encode them into a style code. Afterward, we introduce a style-controllable decoder to synthesize stylized facial animations from the speech content and style code. In order to integrate the reference speaking style into generated videos, we design a style-aware adaptive transformer, which enables the encoded style code to adjust the weights of the feed-forward layers accordingly. Thanks to the style-aware adaptation mechanism, the reference speaking style can be better embedded into synthesized videos during decoding. Extensive experiments demonstrate that our method is capable of generating talking head videos with diverse speaking styles from only one portrait image and an audio clip while achieving authentic visual effects. Project Page: https://github.com/FuxiVirtualHuman/styletalk.", + "primary_area": "computer vision ii", + "author": "Yifeng Ma; Suzhen Wang; Zhipeng Hu; Changjie Fan; Tangjie Lv; Yu Ding; Zhidong Deng; Xin Yu", + "authorids": "", + "aff": "Department of Computer Science and Technology, BNRist, THUAI, State Key Laboratory of Intelligent Technology and Systems, Tsinghua University+Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab; Virtual Human Group, Netease Fuxi AI Lab+Zhejiang University; Department of Computer Science and Technology, BNRist, THUAI, State Key Laboratory of Intelligent Technology and Systems, Tsinghua University; University of Technology Sydney", + "bibtex": "@article{Ma_Wang_Hu_Fan_Lv_Ding_Deng_Yu_2023, title={StyleTalk: One-Shot Talking Head Generation with Controllable Speaking Styles}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25280}, DOI={10.1609/aaai.v37i2.25280}, abstractNote={Different people speak with diverse personalized speaking styles. Although existing one-shot talking head methods have made significant progress in lip sync, natural facial expressions, and stable head motions, they still cannot generate diverse speaking styles in the final talking head videos. To tackle this problem, we propose a one-shot style-controllable talking face generation framework. In a nutshell, we aim to attain a speaking style from an arbitrary reference speaking video and then drive the one-shot portrait to speak with the reference speaking style and another piece of audio. Specifically, we first develop a style encoder to extract dynamic facial motion patterns of a style reference video and then encode them into a style code. Afterward, we introduce a style-controllable decoder to synthesize stylized facial animations from the speech content and style code. In order to integrate the reference speaking style into generated videos, we design a style-aware adaptive transformer, which enables the encoded style code to adjust the weights of the feed-forward layers accordingly. Thanks to the style-aware adaptation mechanism, the reference speaking style can be better embedded into synthesized videos during decoding. Extensive experiments demonstrate that our method is capable of generating talking head videos with diverse speaking styles from only one portrait image and an audio clip while achieving authentic visual effects. Project Page: https://github.com/FuxiVirtualHuman/styletalk.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Yifeng and Wang, Suzhen and Hu, Zhipeng and Fan, Changjie and Lv, Tangjie and Ding, Yu and Deng, Zhidong and Yu, Xin}, year={2023}, month={Jun.}, pages={1896-1904} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25280/25052", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25280", + "pdf_size": 2725692, + "gs_citation": 90, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15670035772233396522&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.tsinghua.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;tsinghua.edu.cn;uts.edu.au", + "email": "mails.tsinghua.edu.cn;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;corp.netease.com;tsinghua.edu.cn;uts.edu.au", + "github": "https://github.com/FuxiVirtualHuman/styletalk", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;1+2;1;1;1+2;0;3", + "aff_unique_norm": "Tsinghua University;Netease Fuxi AI Lab;Zhejiang University;University of Technology Sydney", + "aff_unique_dep": "Department of Computer Science and Technology;Virtual Human Group;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.netease.com;https://www.zju.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "THU;Netease;ZJU;UTS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0+0;0;0;0+0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-25510", + "title": "Submodular Maximization under the Intersection of Matroid and Knapsack Constraints", + "track": "main", + "status": "Technical", + "abstract": "Submodular maximization arises in many applications, and has attracted a lot of research attentions from various areas such as artificial intelligence, finance and operations research. Previous studies mainly consider only one kind of constraint, while many real-world problems often involve several constraints. In this paper, we consider the problem of submodular maximization under the intersection of two commonly used constraints, i.e., k-matroid constraint and m-knapsack constraint, and propose a new algorithm SPROUT by incorporating partial enumeration into the simultaneous greedy framework. We prove that SPROUT can achieve a polynomial-time approximation guarantee better than the state-of-the-art algorithms. Then, we introduce the random enumeration and smooth techniques into SPROUT to improve its efficiency, resulting in the SPROUT++ algorithm, which can keep a similar approximation guarantee. Experiments on the applications of movie recommendation and weighted max-cut demonstrate the superiority of SPROUT++ in practice.", + "primary_area": "constraint satisfaction and optimization", + "author": "Yu-Ran Gu; Chao Bian; Chao Qian", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", + "bibtex": "@article{Gu_Bian_Qian_2023, title={Submodular Maximization under the Intersection of Matroid and Knapsack Constraints}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25510}, DOI={10.1609/aaai.v37i4.25510}, abstractNote={Submodular maximization arises in many applications, and has attracted a lot of research attentions from various areas such as artificial intelligence, finance and operations research. Previous studies mainly consider only one kind of constraint, while many real-world problems often involve several constraints. In this paper, we consider the problem of submodular maximization under the intersection of two commonly used constraints, i.e., k-matroid constraint and m-knapsack constraint, and propose a new algorithm SPROUT by incorporating partial enumeration into the simultaneous greedy framework. We prove that SPROUT can achieve a polynomial-time approximation guarantee better than the state-of-the-art algorithms. Then, we introduce the random enumeration and smooth techniques into SPROUT to improve its efficiency, resulting in the SPROUT++ algorithm, which can keep a similar approximation guarantee. Experiments on the applications of movie recommendation and weighted max-cut demonstrate the superiority of SPROUT++ in practice.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gu, Yu-Ran and Bian, Chao and Qian, Chao}, year={2023}, month={Jun.}, pages={3959-3967} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25510/25282", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25510", + "pdf_size": 238968, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9872216412591778814&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing University", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26384", + "title": "Subspace-Aware Exploration for Sparse-Reward Multi-Agent Tasks", + "track": "main", + "status": "Technical", + "abstract": "Exploration under sparse rewards is a key challenge for multi-agent reinforcement learning problems. One possible solution to this issue is to exploit inherent task structures for an acceleration of exploration. In this paper, we present a novel exploration approach, which encodes a special structural prior on the reward function into exploration, for sparse-reward multi-agent tasks. Specifically, a novel entropic exploration objective which encodes the structural prior is proposed to accelerate the discovery of rewards. By maximizing the lower bound of this objective, we then propose an algorithm with moderate computational cost, which can be applied to practical tasks. Under the sparse-reward setting, we show that the proposed algorithm significantly outperforms the state-of-the-art algorithms in the multiple-particle environment, the Google Research Football and StarCraft II micromanagement tasks. To the best of our knowledge, on some hard tasks (such as 27m_vs_30m}) which have relatively larger number of agents and need non-trivial strategies to defeat enemies, our method is the first to learn winning strategies under the sparse-reward setting.", + "primary_area": "multiagent systems", + "author": "Pei Xu; Junge Zhang; Qiyue Yin; Chao Yu; Yaodong Yang; Kaiqi Huang", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences + CRISE, Institute of Automation, Chinese Academy of Sciences + CAS, Center for Excellence in Brain Science and Intelligence Technology; CRISE, Institute of Automation, Chinese Academy of Sciences; CRISE, Institute of Automation, Chinese Academy of Sciences; School of Computer Science and Engineering, Sun Yat-sen University; Beijing Institute for General AI + Institute for AI, Peking University; School of Artificial Intelligence, University of Chinese Academy of Sciences + CRISE, Institute of Automation, Chinese Academy of Sciences + CAS, Center for Excellence in Brain Science and Intelligence Technology", + "bibtex": "@article{Xu_Zhang_Yin_Yu_Yang_Huang_2023, title={Subspace-Aware Exploration for Sparse-Reward Multi-Agent Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26384}, DOI={10.1609/aaai.v37i10.26384}, abstractNote={Exploration under sparse rewards is a key challenge for multi-agent reinforcement learning problems. One possible solution to this issue is to exploit inherent task structures for an acceleration of exploration. In this paper, we present a novel exploration approach, which encodes a special structural prior on the reward function into exploration, for sparse-reward multi-agent tasks. Specifically, a novel entropic exploration objective which encodes the structural prior is proposed to accelerate the discovery of rewards. By maximizing the lower bound of this objective, we then propose an algorithm with moderate computational cost, which can be applied to practical tasks. Under the sparse-reward setting, we show that the proposed algorithm significantly outperforms the state-of-the-art algorithms in the multiple-particle environment, the Google Research Football and StarCraft II micromanagement tasks. To the best of our knowledge, on some hard tasks (such as 27m_vs_30m}) which have relatively larger number of agents and need non-trivial strategies to defeat enemies, our method is the first to learn winning strategies under the sparse-reward setting.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Pei and Zhang, Junge and Yin, Qiyue and Yu, Chao and Yang, Yaodong and Huang, Kaiqi}, year={2023}, month={Jun.}, pages={11717-11725} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26384/26156", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26384", + "pdf_size": 1384818, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14864643689612254089&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;mail.sysu.edu.cn;pku.edu.cn;nlpr.ia.ac.cn", + "email": "ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;mail.sysu.edu.cn;pku.edu.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+1;1;1;2;3+4;0+1+1", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;Sun Yat-sen University;Beijing Institute for General AI;Peking University", + "aff_unique_dep": "School of Artificial Intelligence;Institute of Automation;School of Computer Science and Engineering;;Institute for AI", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.ia.cas.cn;http://www.sysu.edu.cn;http://www general-ai.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "UCAS;CAS;SYSU;;PKU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0;0;0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26318", + "title": "Substructure Aware Graph Neural Networks", + "track": "main", + "status": "Technical", + "abstract": "Despite the great achievements of Graph Neural Networks (GNNs) in graph learning, conventional GNNs struggle to break through the upper limit of the expressiveness of first-order Weisfeiler-Leman graph isomorphism test algorithm (1-WL) due to the consistency of the propagation paradigm of GNNs with the 1-WL.Based on the fact that it is easier to distinguish the original graph through subgraphs, we propose a novel framework neural network framework called Substructure Aware Graph Neural Networks (SAGNN) to address these issues. We first propose a Cut subgraph which can be obtained from the original graph by continuously and selectively removing edges. Then we extend the random walk encoding paradigm to the return probability of the rooted node on the subgraph to capture the structural information and use it as a node feature to improve the expressiveness of GNNs. We theoretically prove that our framework is more powerful than 1-WL, and is superior in structure perception. Our extensive experiments demonstrate the effectiveness of our framework, achieving state-of-the-art performance on a variety of well-proven graph tasks, and GNNs equipped with our framework perform flawlessly even in 3-WL failed graphs. Specifically, our framework achieves a maximum performance improvement of 83% compared to the base models and 32% compared to the previous state-of-the-art methods.", + "primary_area": "machine learning iv", + "author": "DingYi Zeng; Wanlong Liu; Wenyu Chen; Li Zhou; Malu Zhang; Hong Qu", + "authorids": "", + "aff": "School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China; School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China", + "bibtex": "@article{Zeng_Liu_Chen_Zhou_Zhang_Qu_2023, title={Substructure Aware Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26318}, DOI={10.1609/aaai.v37i9.26318}, abstractNote={Despite the great achievements of Graph Neural Networks (GNNs) in graph learning, conventional GNNs struggle to break through the upper limit of the expressiveness of first-order Weisfeiler-Leman graph isomorphism test algorithm (1-WL) due to the consistency of the propagation paradigm of GNNs with the 1-WL.Based on the fact that it is easier to distinguish the original graph through subgraphs, we propose a novel framework neural network framework called Substructure Aware Graph Neural Networks (SAGNN) to address these issues. We first propose a Cut subgraph which can be obtained from the original graph by continuously and selectively removing edges. Then we extend the random walk encoding paradigm to the return probability of the rooted node on the subgraph to capture the structural information and use it as a node feature to improve the expressiveness of GNNs. We theoretically prove that our framework is more powerful than 1-WL, and is superior in structure perception. Our extensive experiments demonstrate the effectiveness of our framework, achieving state-of-the-art performance on a variety of well-proven graph tasks, and GNNs equipped with our framework perform flawlessly even in 3-WL failed graphs. Specifically, our framework achieves a maximum performance improvement of 83% compared to the base models and 32% compared to the previous state-of-the-art methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zeng, DingYi and Liu, Wanlong and Chen, Wenyu and Zhou, Li and Zhang, Malu and Qu, Hong}, year={2023}, month={Jun.}, pages={11129-11137} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26318/26090", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26318", + "pdf_size": 1219040, + "gs_citation": 62, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17991111400264893875&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff_domain": "std.uestc.edu.cn;std.uestc.edu.cn;uestc.edu.cn;std.uestc.edu.cn;uestc.edu.cn;uestc.edu.cn", + "email": "std.uestc.edu.cn;std.uestc.edu.cn;uestc.edu.cn;std.uestc.edu.cn;uestc.edu.cn;uestc.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.uestc.edu.cn", + "aff_unique_abbr": "UESTC", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Chengdu", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27072", + "title": "Sudoku Assistant \u2013 an AI-Powered App to Help Solve Pen-and-Paper Sudokus", + "track": "demonstrations", + "status": "Technical", + "abstract": "The Sudoku Assistant app is an AI assistant that uses a combination of machine learning and constraint programming techniques, to interpret and explain a pen-and-paper Sudoku scanned with a smartphone.\nAlthough the demo is about Sudoku, the underlying techniques are equally applicable to other constraint solving problems like timetabling, scheduling, and vehicle routing.", + "primary_area": "", + "author": "Tias Guns; Emilio Gamba; Maxime Mulamba; Ignace Bleukx; Senne Berden; Milan Pesa", + "authorids": "", + "aff": "Vrije Universiteit Brussel+KU Leuven; Vrije Universiteit Brussel+KU Leuven; Vrije Universiteit Brussel+KU Leuven; KU Leuven; KU Leuven; KU Leuven", + "bibtex": "@article{Guns_Gamba_Mulamba_Bleukx_Berden_Pesa_2024, title={Sudoku Assistant \u2013 an AI-Powered App to Help Solve Pen-and-Paper Sudokus}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27072}, DOI={10.1609/aaai.v37i13.27072}, abstractNote={The Sudoku Assistant app is an AI assistant that uses a combination of machine learning and constraint programming techniques, to interpret and explain a pen-and-paper Sudoku scanned with a smartphone.\nAlthough the demo is about Sudoku, the underlying techniques are equally applicable to other constraint solving problems like timetabling, scheduling, and vehicle routing.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guns, Tias and Gamba, Emilio and Mulamba, Maxime and Bleukx, Ignace and Berden, Senne and Pesa, Milan}, year={2024}, month={Jul.}, pages={16440-16442} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27072/26844", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27072", + "pdf_size": 998971, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8423191048512490532&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 9, + "aff_domain": "kuleuven.be;vub.be;vub.be;kuleuven.be;kuleuven.be;kuleuven.be", + "email": "kuleuven.be;vub.be;vub.be;kuleuven.be;kuleuven.be;kuleuven.be", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;1;1;1", + "aff_unique_norm": "Vrije Universiteit Brussel;Katholieke Universiteit Leuven", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.vub.be;https://www.kuleuven.be", + "aff_unique_abbr": "VUB;KU Leuven", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Brussels;", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0", + "aff_country_unique": "Belgium" + }, + { + "id": "article-26506", + "title": "SumREN: Summarizing Reported Speech about Events in News", + "track": "main", + "status": "Technical", + "abstract": "A primary objective of news articles is to establish the factual record for an event, frequently achieved by conveying both the details of the specified event (i.e., the 5 Ws; Who, What, Where, When and Why regarding the event) and how people reacted to it (i.e., reported statements). However, existing work on news summarization almost exclusively focuses on the event details. In this work, we propose the novel task of summarizing the reactions of different speakers, as expressed by their reported statements, to a given event. To this end, we create a new multi-document summarization benchmark, SumREN, comprising 745 summaries of reported statements from various public figures obtained from 633 news articles discussing 132 events. We propose an automatic silver-training data generation approach for our task, which helps smaller models like BART achieve GPT-3 level performance on this task. Finally, we introduce a pipeline-based framework for summarizing reported speech, which we empirically show to generate summaries that are more abstractive and factual than baseline query-focused summarization approaches.", + "primary_area": "speech natural language processing", + "author": "Revanth Gangi Reddy; Heba Elfardy; Hou Pong Chan; Kevin Small; Heng Ji", + "authorids": "", + "aff": "University of Illinois Urbana-Champaign; Amazon Alexa; University of Macau; Amazon Alexa; Amazon Alexa", + "bibtex": "@article{Gangi Reddy_Elfardy_Chan_Small_Ji_2023, title={SumREN: Summarizing Reported Speech about Events in News}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26506}, DOI={10.1609/aaai.v37i11.26506}, abstractNote={A primary objective of news articles is to establish the factual record for an event, frequently achieved by conveying both the details of the specified event (i.e., the 5 Ws; Who, What, Where, When and Why regarding the event) and how people reacted to it (i.e., reported statements). However, existing work on news summarization almost exclusively focuses on the event details. In this work, we propose the novel task of summarizing the reactions of different speakers, as expressed by their reported statements, to a given event. To this end, we create a new multi-document summarization benchmark, SumREN, comprising 745 summaries of reported statements from various public figures obtained from 633 news articles discussing 132 events. We propose an automatic silver-training data generation approach for our task, which helps smaller models like BART achieve GPT-3 level performance on this task. Finally, we introduce a pipeline-based framework for summarizing reported speech, which we empirically show to generate summaries that are more abstractive and factual than baseline query-focused summarization approaches.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gangi Reddy, Revanth and Elfardy, Heba and Chan, Hou Pong and Small, Kevin and Ji, Heng}, year={2023}, month={Jun.}, pages={12808-12817} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26506/26278", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26506", + "pdf_size": 142819, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9035084363416539442&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff_domain": "illinois.edu;amazon.com;amazon.com;amazon.com;um.edu.mo", + "email": "illinois.edu;amazon.com;amazon.com;amazon.com;um.edu.mo", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;1", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Amazon;University of Macau", + "aff_unique_dep": ";Amazon Alexa;", + "aff_unique_url": "https://illinois.edu;https://www.amazon.com/alexa;https://www.um.edu.mo", + "aff_unique_abbr": "UIUC;Amazon Alexa;UM", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;Macau" + }, + { + "id": "article-26985", + "title": "Summarization Attack via Paraphrasing (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Many natural language processing models are perceived to be fragile on adversarial attacks. Recent work on adversarial attack has demonstrated a high success rate on sentiment analysis as well as classification models. However, attacks to summarization models have not been well studied. Summarization tasks are rarely influenced by word substitution, since advanced abstractive summary models utilize sentence level information. In this paper, we propose a paraphrasing-based attack method to attack summarization models. We first rank the sentences in the document according to their impacts to summarization. Then, we apply paraphrasing procedure to generate adversarial samples. Finally, we test our algorithm on benchmarks datasets against others methods. Our approach achieved the highest success rate and the lowest sentence substitution rate. In addition, the adversarial samples have high semantic similarity with the original sentences.", + "primary_area": "", + "author": "Jiyao Li; Wei Liu", + "authorids": "", + "aff": "University of Technology Sydney; University of Technology Sydney", + "bibtex": "@article{Li_Liu_2024, title={Summarization Attack via Paraphrasing (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26985}, DOI={10.1609/aaai.v37i13.26985}, abstractNote={Many natural language processing models are perceived to be fragile on adversarial attacks. Recent work on adversarial attack has demonstrated a high success rate on sentiment analysis as well as classification models. However, attacks to summarization models have not been well studied. Summarization tasks are rarely influenced by word substitution, since advanced abstractive summary models utilize sentence level information. In this paper, we propose a paraphrasing-based attack method to attack summarization models. We first rank the sentences in the document according to their impacts to summarization. Then, we apply paraphrasing procedure to generate adversarial samples. Finally, we test our algorithm on benchmarks datasets against others methods. Our approach achieved the highest success rate and the lowest sentence substitution rate. In addition, the adversarial samples have high semantic similarity with the original sentences.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jiyao and Liu, Wei}, year={2024}, month={Jul.}, pages={16250-16251} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26985/26757", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26985", + "pdf_size": 87730, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3293149697135768937&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "student.uts.edu.au;uts.edu.au", + "email": "student.uts.edu.au;uts.edu.au", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Technology Sydney", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uts.edu.au", + "aff_unique_abbr": "UTS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25381", + "title": "Super-efficient Echocardiography Video Segmentation via Proxy- and Kernel-Based Semi-supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "Automatic segmentation of left ventricular endocardium in echocardiography videos is critical for assessing various cardiac functions and improving the diagnosis of cardiac diseases. It is yet a challenging task due to heavy speckle noise, significant shape variability of cardiac structure, and limited labeled data. Particularly, the real-time demand in clinical practice makes this task even harder. In this paper, we propose a novel proxy- and kernel-based semi-supervised segmentation network (PKEcho-Net) to comprehensively address these challenges. We first propose a multi-scale region proxy (MRP) mechanism to model the region-wise contexts, in which a learnable region proxy with an arbitrary shape is developed in each layer of the encoder, allowing the network to identify homogeneous semantics and hence alleviate the influence of speckle noise on segmentation. To sufficiently and efficiently exploit temporal consistency, different from traditional methods which only utilize the temporal contexts of two neighboring frames via feature warping or self-attention mechanism, we formulate the semi-supervised segmentation with a group of learnable kernels, which can naturally and uniformly encode the appearances of left ventricular endocardium, as well as extracting the inter-frame contexts across the whole video to resist the fast shape variability of cardiac structures. Extensive experiments have been conducted on two famous public echocardiography video datasets, EchoNet-Dynamic and CAMUS. Our model achieves the best performance-efficiency trade-off when compared with other state-of-the-art approaches, attaining comparative accuracy with a much faster speed. The code is available at https://github.com/JingyinLin/PKEcho-Net.", + "primary_area": "computer vision iii", + "author": "Huisi Wu; Jingyin Lin; Wende Xie; Jing Qin", + "authorids": "", + "aff": "College of Computer Science and Software Engineering, Shenzhen University; College of Computer Science and Software Engineering, Shenzhen University; College of Computer Science and Software Engineering, Shenzhen University; Centre for Smart Health, The Hong Kong Polytechnic University", + "bibtex": "@article{Wu_Lin_Xie_Qin_2023, title={Super-efficient Echocardiography Video Segmentation via Proxy- and Kernel-Based Semi-supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25381}, DOI={10.1609/aaai.v37i3.25381}, abstractNote={Automatic segmentation of left ventricular endocardium in echocardiography videos is critical for assessing various cardiac functions and improving the diagnosis of cardiac diseases. It is yet a challenging task due to heavy speckle noise, significant shape variability of cardiac structure, and limited labeled data. Particularly, the real-time demand in clinical practice makes this task even harder. In this paper, we propose a novel proxy- and kernel-based semi-supervised segmentation network (PKEcho-Net) to comprehensively address these challenges. We first propose a multi-scale region proxy (MRP) mechanism to model the region-wise contexts, in which a learnable region proxy with an arbitrary shape is developed in each layer of the encoder, allowing the network to identify homogeneous semantics and hence alleviate the influence of speckle noise on segmentation. To sufficiently and efficiently exploit temporal consistency, different from traditional methods which only utilize the temporal contexts of two neighboring frames via feature warping or self-attention mechanism, we formulate the semi-supervised segmentation with a group of learnable kernels, which can naturally and uniformly encode the appearances of left ventricular endocardium, as well as extracting the inter-frame contexts across the whole video to resist the fast shape variability of cardiac structures. Extensive experiments have been conducted on two famous public echocardiography video datasets, EchoNet-Dynamic and CAMUS. Our model achieves the best performance-efficiency trade-off when compared with other state-of-the-art approaches, attaining comparative accuracy with a much faster speed. The code is available at https://github.com/JingyinLin/PKEcho-Net.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Huisi and Lin, Jingyin and Xie, Wende and Qin, Jing}, year={2023}, month={Jun.}, pages={2803-2811} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25381/25153", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25381", + "pdf_size": 5864291, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8669298989836414752&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "szu.edu.cn; ; ; ", + "email": "szu.edu.cn; ; ; ", + "github": "https://github.com/JingyinLin/PKEcho-Net", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Shenzhen University;The Hong Kong Polytechnic University", + "aff_unique_dep": "College of Computer Science and Software Engineering;Centre for Smart Health", + "aff_unique_url": "https://www.szu.edu.cn;https://www.polyu.edu.hk", + "aff_unique_abbr": "SZU;PolyU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25335", + "title": "Superpoint Transformer for 3D Scene Instance Segmentation", + "track": "main", + "status": "Technical", + "abstract": "Most existing methods realize 3D instance segmentation by extending those models used for 3D object detection or 3D semantic segmentation. However, these non-straightforward methods suffer from two drawbacks: 1) Imprecise bounding boxes or unsatisfactory semantic predictions limit the performance of the overall 3D instance segmentation framework. 2) Existing method requires a time-consuming intermediate step of aggregation. To address these issues, this paper proposes a novel end-to-end 3D instance segmentation method based on Superpoint Transformer, named as SPFormer. It groups potential features from point clouds into superpoints, and directly predicts instances through query vectors without relying on the results of object detection or semantic segmentation. The key step in this framework is a novel query decoder with transformers that can capture the instance information through the superpoint cross-attention mechanism and generate the superpoint masks of the instances. Through bipartite matching based on superpoint masks, SPFormer can implement the network training without the intermediate aggregation step, which accelerates the network. Extensive experiments on ScanNetv2 and S3DIS benchmarks verify that our method is concise yet efficient. Notably, SPFormer exceeds compared state-of-the-art methods by 4.3% on ScanNetv2 hidden test set in terms of mAP and keeps fast inference speed (247ms per frame) simultaneously. Code is available at https://github.com/sunjiahao1999/SPFormer.", + "primary_area": "computer vision ii", + "author": "Jiahao Sun; Chunmei Qing; Junpeng Tan; Xiangmin Xu", + "authorids": "", + "aff": "School of Electronic and Information Engineering, South China University of Technology, China; School of Electronic and Information Engineering, South China University of Technology, China; School of Electronic and Information Engineering, South China University of Technology, China; School of Future Technology, South China University of Technology, China", + "bibtex": "@article{Sun_Qing_Tan_Xu_2023, title={Superpoint Transformer for 3D Scene Instance Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25335}, DOI={10.1609/aaai.v37i2.25335}, abstractNote={Most existing methods realize 3D instance segmentation by extending those models used for 3D object detection or 3D semantic segmentation. However, these non-straightforward methods suffer from two drawbacks: 1) Imprecise bounding boxes or unsatisfactory semantic predictions limit the performance of the overall 3D instance segmentation framework. 2) Existing method requires a time-consuming intermediate step of aggregation. To address these issues, this paper proposes a novel end-to-end 3D instance segmentation method based on Superpoint Transformer, named as SPFormer. It groups potential features from point clouds into superpoints, and directly predicts instances through query vectors without relying on the results of object detection or semantic segmentation. The key step in this framework is a novel query decoder with transformers that can capture the instance information through the superpoint cross-attention mechanism and generate the superpoint masks of the instances. Through bipartite matching based on superpoint masks, SPFormer can implement the network training without the intermediate aggregation step, which accelerates the network. Extensive experiments on ScanNetv2 and S3DIS benchmarks verify that our method is concise yet efficient. Notably, SPFormer exceeds compared state-of-the-art methods by 4.3% on ScanNetv2 hidden test set in terms of mAP and keeps fast inference speed (247ms per frame) simultaneously. Code is available at https://github.com/sunjiahao1999/SPFormer.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Jiahao and Qing, Chunmei and Tan, Junpeng and Xu, Xiangmin}, year={2023}, month={Jun.}, pages={2393-2401} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25335/25107", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25335", + "pdf_size": 14338935, + "gs_citation": 113, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15182166888435253230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "mail.scut.edu.cn;scut.edu.cn;gmail.com;scut.edu.cn", + "email": "mail.scut.edu.cn;scut.edu.cn;gmail.com;scut.edu.cn", + "github": "https://github.com/sunjiahao1999/SPFormer", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Electronic and Information Engineering", + "aff_unique_url": "http://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25863", + "title": "Supervised Contrastive Few-Shot Learning for High-Frequency Time Series", + "track": "main", + "status": "Technical", + "abstract": "Significant progress has been made in representation learning, especially with recent success on self-supervised contrastive learning. However, for time series with less intuitive or semantic meaning, sampling bias may be inevitably encountered in unsupervised approaches. Although supervised contrastive learning has shown superior performance by leveraging label information, it may also suffer from class collapse. In this study, we consider a realistic scenario in industry with limited annotation information available. A supervised contrastive framework is developed for high-frequency time series representation and classification, wherein a novel variant of supervised contrastive loss is proposed to include multiple augmentations while induce spread within each class. Experiments on four mainstream public datasets as well as a series of sensitivity and ablation analyses demonstrate that the learned representations are effective and robust compared with the direct supervised learning and self-supervised learning, notably under the minimal few-shot situation.", + "primary_area": "machine learning i", + "author": "Xi Chen; Cheng Ge; Ming Wang; Jin Wang", + "authorids": "", + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Chen_Ge_Wang_Wang_2023, title={Supervised Contrastive Few-Shot Learning for High-Frequency Time Series}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25863}, DOI={10.1609/aaai.v37i6.25863}, abstractNote={Significant progress has been made in representation learning, especially with recent success on self-supervised contrastive learning. However, for time series with less intuitive or semantic meaning, sampling bias may be inevitably encountered in unsupervised approaches. Although supervised contrastive learning has shown superior performance by leveraging label information, it may also suffer from class collapse. In this study, we consider a realistic scenario in industry with limited annotation information available. A supervised contrastive framework is developed for high-frequency time series representation and classification, wherein a novel variant of supervised contrastive loss is proposed to include multiple augmentations while induce spread within each class. Experiments on four mainstream public datasets as well as a series of sensitivity and ablation analyses demonstrate that the learned representations are effective and robust compared with the direct supervised learning and self-supervised learning, notably under the minimal few-shot situation.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xi and Ge, Cheng and Wang, Ming and Wang, Jin}, year={2023}, month={Jun.}, pages={7069-7077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25863/25635", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25863", + "pdf_size": 2653239, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3415808522700876835&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;taobao.com;alipay.com", + "email": "alibaba-inc.com;alibaba-inc.com;taobao.com;alipay.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25833", + "title": "Sustaining Fairness via Incremental Learning", + "track": "main", + "status": "Technical", + "abstract": "Machine learning systems are often deployed for making critical decisions like credit lending, hiring, etc. While making decisions, such systems often encode the user's demographic information (like gender, age) in their intermediate representations. This can lead to decisions that are biased towards specific demographics. Prior work has focused on debiasing intermediate representations to ensure fair decisions. However, these approaches fail to remain fair with changes in the task or demographic distribution. To ensure fairness in the wild, it is important for a system to adapt to such changes as it accesses new data in an incremental fashion. In this work, we propose to address this issue by introducing the problem of learning fair representations in an incremental learning setting. To this end, we present Fairness-aware Incremental Representation Learning (FaIRL), a representation learning system that can sustain fairness while incrementally learning new tasks. FaIRL is able to achieve fairness and learn new tasks by controlling the rate-distortion function of the learned representations. Our empirical evaluations show that FaIRL is able to make fair decisions while achieving high performance on the target task, outperforming several baselines.", + "primary_area": "machine learning i", + "author": "Somnath Basu Roy Chowdhury; Snigdha Chaturvedi", + "authorids": "", + "aff": "University of North Carolina at Chapel Hill; University of North Carolina at Chapel Hill", + "bibtex": "@article{Basu Roy Chowdhury_Chaturvedi_2023, title={Sustaining Fairness via Incremental Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25833}, DOI={10.1609/aaai.v37i6.25833}, abstractNote={Machine learning systems are often deployed for making critical decisions like credit lending, hiring, etc. While making decisions, such systems often encode the user\u2019s demographic information (like gender, age) in their intermediate representations. This can lead to decisions that are biased towards specific demographics. Prior work has focused on debiasing intermediate representations to ensure fair decisions. However, these approaches fail to remain fair with changes in the task or demographic distribution. To ensure fairness in the wild, it is important for a system to adapt to such changes as it accesses new data in an incremental fashion. In this work, we propose to address this issue by introducing the problem of learning fair representations in an incremental learning setting. To this end, we present Fairness-aware Incremental Representation Learning (FaIRL), a representation learning system that can sustain fairness while incrementally learning new tasks. FaIRL is able to achieve fairness and learn new tasks by controlling the rate-distortion function of the learned representations. Our empirical evaluations show that FaIRL is able to make fair decisions while achieving high performance on the target task, outperforming several baselines.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Basu Roy Chowdhury, Somnath and Chaturvedi, Snigdha}, year={2023}, month={Jun.}, pages={6797-6805} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25833/25605", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25833", + "pdf_size": 2371706, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15617198122449767459&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.unc.edu;cs.unc.edu", + "email": "cs.unc.edu;cs.unc.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of North Carolina", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unc.edu", + "aff_unique_abbr": "UNC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chapel Hill", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25753", + "title": "SwiftAvatar: Efficient Auto-Creation of Parameterized Stylized Character on Arbitrary Avatar Engines", + "track": "main", + "status": "Technical", + "abstract": "The creation of a parameterized stylized character involves careful selection of numerous parameters, also known as the \"avatar vectors\" that can be interpreted by the avatar engine. Existing unsupervised avatar vector estimation methods that auto-create avatars for users, however, often fail to work because of the domain gap between realistic faces and stylized avatar images. To this end, we propose SwiftAvatar, a novel avatar auto-creation framework that is evidently superior to previous works. SwiftAvatar introduces dual-domain generators to create pairs of realistic faces and avatar images using shared latent codes. The latent codes can then be bridged with the avatar vectors as pairs, by performing GAN inversion on the avatar images rendered from the engine using avatar vectors. Through this way, we are able to synthesize paired data in high-quality as many as possible, consisting of avatar vectors and their corresponding realistic faces. We also propose semantic augmentation to improve the diversity of synthesis. Finally, a light-weight avatar vector estimator is trained on the synthetic pairs to implement efficient auto-creation. Our experiments demonstrate the effectiveness and efficiency of SwiftAvatar on two different avatar engines. The superiority and advantageous flexibility of SwiftAvatar are also verified in both subjective and objective evaluations.", + "primary_area": "humans and ai", + "author": "Shizun Wang; Weihong Zeng; Xu Wang; Hao Yang; Li Chen; Chuang Zhang; Ming Wu; Yi Yuan; Yunzhao Zeng; Min Zheng; Jing Liu", + "authorids": "", + "aff": "Beijing University of Posts and Telecommunications+Douyin Vision; Douyin Vision; Douyin Vision; Douyin Vision; Douyin Vision; Beijing University of Posts and Telecommunications+Douyin Vision; Beijing University of Posts and Telecommunications+Douyin Vision; Douyin Vision; Douyin Vision; Douyin Vision; Douyin Vision", + "bibtex": "@article{Wang_Zeng_Wang_Yang_Chen_Zhang_Wu_Yuan_Zeng_Zheng_Liu_2023, title={SwiftAvatar: Efficient Auto-Creation of Parameterized Stylized Character on Arbitrary Avatar Engines}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25753}, DOI={10.1609/aaai.v37i5.25753}, abstractNote={The creation of a parameterized stylized character involves careful selection of numerous parameters, also known as the "avatar vectors" that can be interpreted by the avatar engine. Existing unsupervised avatar vector estimation methods that auto-create avatars for users, however, often fail to work because of the domain gap between realistic faces and stylized avatar images. To this end, we propose SwiftAvatar, a novel avatar auto-creation framework that is evidently superior to previous works. SwiftAvatar introduces dual-domain generators to create pairs of realistic faces and avatar images using shared latent codes. The latent codes can then be bridged with the avatar vectors as pairs, by performing GAN inversion on the avatar images rendered from the engine using avatar vectors. Through this way, we are able to synthesize paired data in high-quality as many as possible, consisting of avatar vectors and their corresponding realistic faces. We also propose semantic augmentation to improve the diversity of synthesis. Finally, a light-weight avatar vector estimator is trained on the synthetic pairs to implement efficient auto-creation. Our experiments demonstrate the effectiveness and efficiency of SwiftAvatar on two different avatar engines. The superiority and advantageous flexibility of SwiftAvatar are also verified in both subjective and objective evaluations.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Shizun and Zeng, Weihong and Wang, Xu and Yang, Hao and Chen, Li and Zhang, Chuang and Wu, Ming and Yuan, Yi and Zeng, Yunzhao and Zheng, Min and Liu, Jing}, year={2023}, month={Jun.}, pages={6101-6109} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25753/25525", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25753", + "pdf_size": 3837008, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2444383006552160192&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "bupt.edu.cn;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bupt.edu.cn;bupt.edu.cn;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "email": "bupt.edu.cn;bytedance.com;bytedance.com;bytedance.com;bytedance.com;bupt.edu.cn;bupt.edu.cn;bytedance.com;bytedance.com;bytedance.com;bytedance.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0+1;1;1;1;1;0+1;0+1;1;1;1;1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Douyin Vision", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;", + "aff_unique_abbr": "BUPT;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25105", + "title": "SwinRDM: Integrate SwinRNN with Diffusion Model towards High-Resolution and High-Quality Weather Forecasting", + "track": "main", + "status": "Technical", + "abstract": "Data-driven medium-range weather forecasting has attracted much attention in recent years. However, the forecasting accuracy at high resolution is unsatisfactory currently. Pursuing high-resolution and high-quality weather forecasting, we develop a data-driven model SwinRDM which integrates an improved version of SwinRNN with a diffusion model. SwinRDM performs predictions at 0.25-degree resolution and achieves superior forecasting accuracy to IFS (Integrated Forecast System), the state-of-the-art operational NWP model, on representative atmospheric variables including 500 hPa geopotential (Z500), 850 hPa temperature (T850), 2-m temperature (T2M), and total precipitation (TP), at lead times of up to 5 days. We propose to leverage a two-step strategy to achieve high-resolution predictions at 0.25-degree considering the trade-off between computation memory and forecasting accuracy. Recurrent predictions for future atmospheric fields are firstly performed at 1.40625-degree resolution, and then a diffusion-based super-resolution model is leveraged to recover the high spatial resolution and finer-scale atmospheric details. SwinRDM pushes forward the performance and potential of data-driven models for a large margin towards operational applications.", + "primary_area": "computer vision i", + "author": "Lei Chen; Fei Du; Yuan Hu; Zhibin Wang; Fan Wang", + "authorids": "", + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Chen_Du_Hu_Wang_Wang_2023, title={SwinRDM: Integrate SwinRNN with Diffusion Model towards High-Resolution and High-Quality Weather Forecasting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25105}, DOI={10.1609/aaai.v37i1.25105}, abstractNote={Data-driven medium-range weather forecasting has attracted much attention in recent years. However, the forecasting accuracy at high resolution is unsatisfactory currently. Pursuing high-resolution and high-quality weather forecasting, we develop a data-driven model SwinRDM which integrates an improved version of SwinRNN with a diffusion model. SwinRDM performs predictions at 0.25-degree resolution and achieves superior forecasting accuracy to IFS (Integrated Forecast System), the state-of-the-art operational NWP model, on representative atmospheric variables including 500 hPa geopotential (Z500), 850 hPa temperature (T850), 2-m temperature (T2M), and total precipitation (TP), at lead times of up to 5 days. We propose to leverage a two-step strategy to achieve high-resolution predictions at 0.25-degree considering the trade-off between computation memory and forecasting accuracy. Recurrent predictions for future atmospheric fields are firstly performed at 1.40625-degree resolution, and then a diffusion-based super-resolution model is leveraged to recover the high spatial resolution and finer-scale atmospheric details. SwinRDM pushes forward the performance and potential of data-driven models for a large margin towards operational applications.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Lei and Du, Fei and Hu, Yuan and Wang, Zhibin and Wang, Fan}, year={2023}, month={Jun.}, pages={322-330} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25105/24877", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25105", + "pdf_size": 6817352, + "gs_citation": 55, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17357922864886738706&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25816", + "title": "Symbolic Metamodels for Interpreting Black-Boxes Using Primitive Functions", + "track": "main", + "status": "Technical", + "abstract": "One approach for interpreting black-box machine learning models is to find a global approximation of the model using simple interpretable functions, which is called a metamodel (a model of the model). Approximating the black-box with\na metamodel can be used to 1) estimate instance-wise feature importance; 2) understand the functional form of the model; 3) analyze feature interactions. In this work, we propose a new method for finding interpretable metamodels. Our approach utilizes Kolmogorov superposition theorem, which expresses multivariate functions as a composition of univariate functions (our primitive parameterized\nfunctions). This composition can be represented in the form of a tree. Inspired by symbolic regression, we use a modified form of genetic programming to search over different tree configurations. Gradient descent (GD) is used to optimize the parameters of a given configuration. Our method is a novel memetic algorithm that uses GD not only for training numerical constants but also for the training\nof building blocks. Using several experiments, we show that our method outperforms recent metamodeling approaches suggested for interpreting black-boxes.", + "primary_area": "machine learning i", + "author": "Mahed Abroshan; Saumitra Mishra; Mohammad Mahdi Khalili", + "authorids": "", + "aff": "The Alan Turing Institute, London, UK; JP Morgan AI Research, London, UK + The Alan Turing Institute; Yahoo! Research, NYC, NY, USA + CSE Department, The Ohio State University, Columbus, Ohio, USA", + "bibtex": "@article{Abroshan_Mishra_Khalili_2023, title={Symbolic Metamodels for Interpreting Black-Boxes Using Primitive Functions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25816}, DOI={10.1609/aaai.v37i6.25816}, abstractNote={One approach for interpreting black-box machine learning models is to find a global approximation of the model using simple interpretable functions, which is called a metamodel (a model of the model). Approximating the black-box with\na metamodel can be used to 1) estimate instance-wise feature importance; 2) understand the functional form of the model; 3) analyze feature interactions. In this work, we propose a new method for finding interpretable metamodels. Our approach utilizes Kolmogorov superposition theorem, which expresses multivariate functions as a composition of univariate functions (our primitive parameterized\nfunctions). This composition can be represented in the form of a tree. Inspired by symbolic regression, we use a modified form of genetic programming to search over different tree configurations. Gradient descent (GD) is used to optimize the parameters of a given configuration. Our method is a novel memetic algorithm that uses GD not only for training numerical constants but also for the training\nof building blocks. Using several experiments, we show that our method outperforms recent metamodeling approaches suggested for interpreting black-boxes.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abroshan, Mahed and Mishra, Saumitra and Khalili, Mohammad Mahdi}, year={2023}, month={Jun.}, pages={6649-6657} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25816/25588", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25816", + "pdf_size": 787777, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9816056948607982359&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "tuirng.ac.uk;jpmorgan.com;yahooinc.com", + "email": "tuirng.ac.uk;jpmorgan.com;yahooinc.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+0;2+3", + "aff_unique_norm": "The Alan Turing Institute;JP Morgan AI Research;Yahoo! Research;The Ohio State University", + "aff_unique_dep": ";AI Research;;CSE Department", + "aff_unique_url": "https://www.turing.ac.uk;https://www.jpmorgan.com/global/research;https://research.yahoo.com;https://www.osu.edu", + "aff_unique_abbr": "ATI;JPM AI;Yahoo! Res;OSU", + "aff_campus_unique_index": "0;0;2+3", + "aff_campus_unique": "London;;New York City;Columbus", + "aff_country_unique_index": "0;0+0;1+1", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-25208", + "title": "Symbolic Replay: Scene Graph as Prompt for Continual Learning on VQA Task", + "track": "main", + "status": "Technical", + "abstract": "VQA is an ambitious task aiming to answer any image-related question. However, in reality, it is hard to build such a system once for all since the needs of users are continuously updated, and the system has to implement new functions. Thus, Continual Learning (CL) ability is a must in developing advanced VQA systems. Recently, a pioneer work split a VQA dataset into disjoint answer sets to study this topic. However, CL on VQA involves not only the expansion of label sets (new Answer sets). It is crucial to study how to answer questions when deploying VQA systems to new environments (new Visual scenes) and how to answer questions requiring new functions (new Question types). Thus, we propose CLOVE, a benchmark for Continual Learning On Visual quEstion answering, which contains scene- and function-incremental settings for the two aforementioned CL scenarios. In terms of methodology, the main difference between CL on VQA and classification is that the former additionally involves expanding and preventing forgetting of reasoning mechanisms, while the latter focusing on class representation. Thus, we propose a real-data-free replay-based method tailored for CL on VQA, named Scene Graph as Prompt for Symbolic Replay. Using a piece of scene graph as a prompt, it replays pseudo scene graphs to represent the past images, along with correlated QA pairs. A unified VQA model is also proposed to utilize the current and replayed data to enhance its QA ability. Finally, experimental results reveal challenges in CLOVE and demonstrate the effectiveness of our method. Code and data\nare available at https://github.com/showlab/CLVQA.", + "primary_area": "computer vision i", + "author": "Stan Weixian Lei; Difei Gao; Jay Zhangjie Wu; Yuxuan Wang; Wei Liu; Mengmi Zhang; Mike Zheng Shou", + "authorids": "", + "aff": "Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Tencent Data Platform; CFAR and I2R, Agency for Science, Technology, and Research (A*STAR), Singapore; Show Lab, National University of Singapore", + "bibtex": "@article{Lei_Gao_Wu_Wang_Liu_Zhang_Shou_2023, title={Symbolic Replay: Scene Graph as Prompt for Continual Learning on VQA Task}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25208}, DOI={10.1609/aaai.v37i1.25208}, abstractNote={VQA is an ambitious task aiming to answer any image-related question. However, in reality, it is hard to build such a system once for all since the needs of users are continuously updated, and the system has to implement new functions. Thus, Continual Learning (CL) ability is a must in developing advanced VQA systems. Recently, a pioneer work split a VQA dataset into disjoint answer sets to study this topic. However, CL on VQA involves not only the expansion of label sets (new Answer sets). It is crucial to study how to answer questions when deploying VQA systems to new environments (new Visual scenes) and how to answer questions requiring new functions (new Question types). Thus, we propose CLOVE, a benchmark for Continual Learning On Visual quEstion answering, which contains scene- and function-incremental settings for the two aforementioned CL scenarios. In terms of methodology, the main difference between CL on VQA and classification is that the former additionally involves expanding and preventing forgetting of reasoning mechanisms, while the latter focusing on class representation. Thus, we propose a real-data-free replay-based method tailored for CL on VQA, named Scene Graph as Prompt for Symbolic Replay. Using a piece of scene graph as a prompt, it replays pseudo scene graphs to represent the past images, along with correlated QA pairs. A unified VQA model is also proposed to utilize the current and replayed data to enhance its QA ability. Finally, experimental results reveal challenges in CLOVE and demonstrate the effectiveness of our method. Code and data\nare available at https://github.com/showlab/CLVQA.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lei, Stan Weixian and Gao, Difei and Wu, Jay Zhangjie and Wang, Yuxuan and Liu, Wei and Zhang, Mengmi and Shou, Mike Zheng}, year={2023}, month={Jun.}, pages={1250-1259} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25208/24980", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25208", + "pdf_size": 2733919, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9148477166511628477&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "u.nus.edu;gmail.com;u.nus.edu;u.nus.edu;columbia.edu;i2r.a-star.edu.sg;gmail.com", + "email": "u.nus.edu;gmail.com;u.nus.edu;u.nus.edu;columbia.edu;i2r.a-star.edu.sg;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;0", + "aff_unique_norm": "National University of Singapore;Tencent;Agency for Science, Technology, and Research", + "aff_unique_dep": "Show Lab;Data Platform;CFAR and I2R", + "aff_unique_url": "https://www.nus.edu.sg;https://www.tencent.com;https://www.a-star.edu.sg", + "aff_unique_abbr": "NUS;Tencent;A*STAR", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Singapore;", + "aff_country_unique_index": "0;0;0;0;1;0;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-25173", + "title": "Symmetry-Aware Transformer-Based Mirror Detection", + "track": "main", + "status": "Technical", + "abstract": "Mirror detection aims to identify the mirror regions in the given input image. Existing works mainly focus on integrating the semantic features and structural features to mine specific relations between mirror and non-mirror regions, or introducing mirror properties like depth or chirality to help analyze the existence of mirrors. In this work, we observe that a real object typically forms a loose symmetry relationship with its corresponding reflection in the mirror, which is beneficial in distinguishing mirrors from real objects. Based on this observation, we propose a dual-path Symmetry-Aware Transformer-based mirror detection Network (SATNet), which includes two novel modules: Symmetry-Aware Attention Module (SAAM) and Contrast and Fusion Decoder Module (CFDM). Specifically, we first adopt a transformer backbone to model global information aggregation in images, extracting multi-scale features in two paths. We then feed the high-level dual-path features to SAAMs to capture the symmetry relations. Finally, we fuse the dual-path features and refine our prediction maps progressively with CFDMs to obtain the final mirror mask. Experimental results show that SATNet outperforms both RGB and RGB-D mirror detection methods on all available mirror detection datasets.", + "primary_area": "computer vision i", + "author": "Tianyu Huang; Bowen Dong; Jiaying Lin; Xiaohui Liu; Rynson W.H. Lau; Wangmeng Zuo", + "authorids": "", + "aff": "Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology; City University of Hong Kong; Harbin Institute of Technology; City University of Hong Kong; Harbin Institute of Technology+Peng Cheng Laboratory", + "bibtex": "@article{Huang_Dong_Lin_Liu_W.H. Lau_Zuo_2023, title={Symmetry-Aware Transformer-Based Mirror Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25173}, DOI={10.1609/aaai.v37i1.25173}, abstractNote={Mirror detection aims to identify the mirror regions in the given input image. Existing works mainly focus on integrating the semantic features and structural features to mine specific relations between mirror and non-mirror regions, or introducing mirror properties like depth or chirality to help analyze the existence of mirrors. In this work, we observe that a real object typically forms a loose symmetry relationship with its corresponding reflection in the mirror, which is beneficial in distinguishing mirrors from real objects. Based on this observation, we propose a dual-path Symmetry-Aware Transformer-based mirror detection Network (SATNet), which includes two novel modules: Symmetry-Aware Attention Module (SAAM) and Contrast and Fusion Decoder Module (CFDM). Specifically, we first adopt a transformer backbone to model global information aggregation in images, extracting multi-scale features in two paths. We then feed the high-level dual-path features to SAAMs to capture the symmetry relations. Finally, we fuse the dual-path features and refine our prediction maps progressively with CFDMs to obtain the final mirror mask. Experimental results show that SATNet outperforms both RGB and RGB-D mirror detection methods on all available mirror detection datasets.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Tianyu and Dong, Bowen and Lin, Jiaying and Liu, Xiaohui and W.H. Lau, Rynson and Zuo, Wangmeng}, year={2023}, month={Jun.}, pages={935-943} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25173/24945", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25173", + "pdf_size": 7007006, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13029556534855938975&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com;my.cityu.edu.hk;gmail.com;cityu.edu.hk;hit.edu.cn", + "email": "gmail.com;gmail.com;my.cityu.edu.hk;gmail.com;cityu.edu.hk;hit.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;2;0;2;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory;City University of Hong Kong", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.hit.edu.cn/;http://www.pcl.ac.cn;https://www.cityu.edu.hk", + "aff_unique_abbr": "HIT;PCL;CityU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0+0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26233", + "title": "Symphony in the Latent Space: Provably Integrating High-Dimensional Techniques with Non-linear Machine Learning Models", + "track": "main", + "status": "Technical", + "abstract": "This paper revisits building machine learning algorithms that involve interactions between entities, such as those between financial assets in an actively managed portfolio, or interactions between users in a social network. Our goal is to forecast the future evolution of ensembles of multivariate time series in such applications (e.g., the future return of a financial asset or the future popularity of a Twitter account). Designing ML algorithms for such systems requires addressing the challenges of high-dimensional interactions and non-linearity. Existing approaches usually adopt an ad-hoc approach to integrating high-dimensional techniques into non-linear models and recent studies have shown these approaches have questionable efficacy in time-evolving interacting systems. \n\n\nTo this end, we propose a novel framework, which we dub as the additive influence model. Under our modeling assumption, we show that it is possible to decouple the learning of high-dimensional interactions from the learning of non-linear feature interactions. To learn the high-dimensional interactions, we leverage kernel-based techniques, with provable guarantees, to embed the entities in a low-dimensional latent space. To learn the non-linear feature-response interactions, we generalize prominent machine learning techniques, including designing a new statistically sound non-parametric method and an ensemble learning algorithm optimized for vector regressions. \nExtensive experiments on two common applications demonstrate that our new algorithms deliver significantly stronger forecasting power compared to standard and recently proposed methods.", + "primary_area": "machine learning iv", + "author": "Qiong Wu; Jian Li; Zhenming Liu; Yanhua Li; Mihai Cucuringu", + "authorids": "", + "aff": "College of William & Mary; Tsinghua University; College of William & Mary; Worcester Polytechnic Institute; University of Oxford + The Alan Turing Institute", + "bibtex": "@article{Wu_Li_Liu_Li_Cucuringu_2023, title={Symphony in the Latent Space: Provably Integrating High-Dimensional Techniques with Non-linear Machine Learning Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26233}, DOI={10.1609/aaai.v37i9.26233}, abstractNote={This paper revisits building machine learning algorithms that involve interactions between entities, such as those between financial assets in an actively managed portfolio, or interactions between users in a social network. Our goal is to forecast the future evolution of ensembles of multivariate time series in such applications (e.g., the future return of a financial asset or the future popularity of a Twitter account). Designing ML algorithms for such systems requires addressing the challenges of high-dimensional interactions and non-linearity. Existing approaches usually adopt an ad-hoc approach to integrating high-dimensional techniques into non-linear models and recent studies have shown these approaches have questionable efficacy in time-evolving interacting systems. To this end, we propose a novel framework, which we dub as the additive influence model. Under our modeling assumption, we show that it is possible to decouple the learning of high-dimensional interactions from the learning of non-linear feature interactions. To learn the high-dimensional interactions, we leverage kernel-based techniques, with provable guarantees, to embed the entities in a low-dimensional latent space. To learn the non-linear feature-response interactions, we generalize prominent machine learning techniques, including designing a new statistically sound non-parametric method and an ensemble learning algorithm optimized for vector regressions. Extensive experiments on two common applications demonstrate that our new algorithms deliver significantly stronger forecasting power compared to standard and recently proposed methods.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Qiong and Li, Jian and Liu, Zhenming and Li, Yanhua and Cucuringu, Mihai}, year={2023}, month={Jun.}, pages={10361-10369} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26233/26005", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26233", + "pdf_size": 628238, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3399229674728123656&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "wm.edu;tsinghua.edu.cn;wm.edu;wpi.edu;turing.ac.uk", + "email": "wm.edu;tsinghua.edu.cn;wm.edu;wpi.edu;turing.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;2;3+4", + "aff_unique_norm": "College of William & Mary;Tsinghua University;Worcester Polytechnic Institute;University of Oxford;The Alan Turing Institute", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.wm.edu;https://www.tsinghua.edu.cn;https://www.wpi.edu;https://www.ox.ac.uk;https://www.turing.ac.uk", + "aff_unique_abbr": "WM;THU;WPI;Oxford;ATI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;2+2", + "aff_country_unique": "United States;China;United Kingdom" + }, + { + "id": "article-26361", + "title": "Synchronization and Diversity of Solutions", + "track": "main", + "status": "Technical", + "abstract": "A central computational problem in the realm of automata theory is the problem of determining whether a finite automaton A has a synchronizing word. This problem has found applications in a variety of subfields of artificial intelligence, including planning, robotics, and multi-agent systems. In this work, we study this problem within the framework of diversity of solutions, an up-and-coming trend in the field of artificial intelligence where the goal is to compute a set of solutions that are sufficiently distinct from one another. We define a notion of diversity of solutions that is suitable for contexts were solutions are strings that may have distinct lengths. Using our notion of diversity, we show that for each fixed r \u2208 N, each fixed finite automaton A, and each finite automaton B given at the input, the problem of determining the existence of a diverse set {w1,w2, . . . ,wr} \u2286 L(B) of words that are synchronizing for A can be solved in polynomial time. Finally, we generalize this result to the realm of conformant planning, where the goal is to devise plans that achieve a goal irrespectively of initial conditions and of nondeterminism that may occur during their execution.", + "primary_area": "multiagent systems", + "author": "Emmanuel Arrighi; Henning Fernau; Mateus de Oliveira Oliveira; Petra Wolf", + "authorids": "", + "aff": "University of Bergen, Bergen, Norway; University of Trier, Trier, Germany; University of Bergen, Bergen, Norway + Stockholm University, Stockholm, Sweden; University of Bergen, Bergen, Norway + University of Trier, Trier, Germany", + "bibtex": "@article{Arrighi_Fernau_de Oliveira Oliveira_Wolf_2023, title={Synchronization and Diversity of Solutions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26361}, DOI={10.1609/aaai.v37i10.26361}, abstractNote={A central computational problem in the realm of automata theory is the problem of determining whether a finite automaton A has a synchronizing word. This problem has found applications in a variety of subfields of artificial intelligence, including planning, robotics, and multi-agent systems. In this work, we study this problem within the framework of diversity of solutions, an up-and-coming trend in the field of artificial intelligence where the goal is to compute a set of solutions that are sufficiently distinct from one another. We define a notion of diversity of solutions that is suitable for contexts were solutions are strings that may have distinct lengths. Using our notion of diversity, we show that for each fixed r \u2208 N, each fixed finite automaton A, and each finite automaton B given at the input, the problem of determining the existence of a diverse set {w1,w2, . . . ,wr} \u2286 L(B) of words that are synchronizing for A can be solved in polynomial time. Finally, we generalize this result to the realm of conformant planning, where the goal is to devise plans that achieve a goal irrespectively of initial conditions and of nondeterminism that may occur during their execution.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Arrighi, Emmanuel and Fernau, Henning and de Oliveira Oliveira, Mateus and Wolf, Petra}, year={2023}, month={Jun.}, pages={11516-11524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26361/26133", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26361", + "pdf_size": 178414, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14638241831774874473&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff_domain": "uib.no;uni-trier.de;dsv.su.se;wolfp.net", + "email": "uib.no;uni-trier.de;dsv.su.se;wolfp.net", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0+2;0+1", + "aff_unique_norm": "University of Bergen;University of Trier;Stockholm University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uib.no;https://www.uni-trier.de;https://www.su.se", + "aff_unique_abbr": "uib;UT;SU", + "aff_campus_unique_index": "0;1;0+2;0+1", + "aff_campus_unique": "Bergen;Trier;Stockholm", + "aff_country_unique_index": "0;1;0+2;0+1", + "aff_country_unique": "Norway;Germany;Sweden" + }, + { + "id": "article-25388", + "title": "Synthetic Data Can Also Teach: Synthesizing Effective Data for Unsupervised Visual Representation Learning", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning (CL), a self-supervised learning approach, can effectively learn visual representations from unlabeled data. Given the CL training data, generative models can be trained to generate synthetic data to supplement the real data. Using both synthetic and real data for CL training has the potential to improve the quality of learned representations. However, synthetic data usually has lower quality than real data, and using synthetic data may not improve CL compared with using real data. To tackle this problem, we propose a data generation framework with two methods to improve CL training by joint sample generation and contrastive learning. The first approach generates hard samples for the main model. The generator is jointly learned with the main model to dynamically customize hard samples based on the training state of the main model. Besides, a pair of data generators are proposed to generate similar but distinct samples as positive pairs. In joint learning, the hardness of a positive pair is progressively increased by decreasing their similarity. Experimental results on multiple datasets show superior accuracy and data efficiency of the proposed data generation methods applied to CL. For example, about 4.0%, 3.5%, and 2.6% accuracy improvements for linear classification are observed on ImageNet-100, CIFAR-100, and CIFAR-10, respectively. Besides, up to 2\u00d7 data efficiency for linear classification and up to 5\u00d7 data efficiency for transfer learning are achieved.", + "primary_area": "computer vision iii", + "author": "Yawen Wu; Zhepeng Wang; Dewen Zeng; Yiyu Shi; Jingtong Hu", + "authorids": "", + "aff": "University of Pittsburgh; George Mason University; University of Notre Dame; University of Notre Dame; University of Pittsburgh", + "bibtex": "@article{Wu_Wang_Zeng_Shi_Hu_2023, title={Synthetic Data Can Also Teach: Synthesizing Effective Data for Unsupervised Visual Representation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25388}, DOI={10.1609/aaai.v37i3.25388}, abstractNote={Contrastive learning (CL), a self-supervised learning approach, can effectively learn visual representations from unlabeled data. Given the CL training data, generative models can be trained to generate synthetic data to supplement the real data. Using both synthetic and real data for CL training has the potential to improve the quality of learned representations. However, synthetic data usually has lower quality than real data, and using synthetic data may not improve CL compared with using real data. To tackle this problem, we propose a data generation framework with two methods to improve CL training by joint sample generation and contrastive learning. The first approach generates hard samples for the main model. The generator is jointly learned with the main model to dynamically customize hard samples based on the training state of the main model. Besides, a pair of data generators are proposed to generate similar but distinct samples as positive pairs. In joint learning, the hardness of a positive pair is progressively increased by decreasing their similarity. Experimental results on multiple datasets show superior accuracy and data efficiency of the proposed data generation methods applied to CL. For example, about 4.0%, 3.5%, and 2.6% accuracy improvements for linear classification are observed on ImageNet-100, CIFAR-100, and CIFAR-10, respectively. Besides, up to 2\u00d7 data efficiency for linear classification and up to 5\u00d7 data efficiency for transfer learning are achieved.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yawen and Wang, Zhepeng and Zeng, Dewen and Shi, Yiyu and Hu, Jingtong}, year={2023}, month={Jun.}, pages={2866-2874} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25388/25160", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25388", + "pdf_size": 1146723, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=871214303485256754&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "pitt.edu;gmu.edu;nd.edu;nd.edu;pitt.edu", + "email": "pitt.edu;gmu.edu;nd.edu;nd.edu;pitt.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;0", + "aff_unique_norm": "University of Pittsburgh;George Mason University;University of Notre Dame", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.pitt.edu;https://www.gmu.edu;https://www.nd.edu", + "aff_unique_abbr": "Pitt;GMU;Notre Dame", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26284", + "title": "T-distributed Spherical Feature Representation for Imbalanced Classification", + "track": "main", + "status": "Technical", + "abstract": "Real-world classification tasks often show an extremely imbalanced problem. The extreme imbalance will cause a strong bias that the decision boundary of the classifier is completely dominated by the categories with abundant samples, which are also called the head categories. Current methods have alleviated the imbalanced impact from mainly three aspects: class re-balance, decoupling and domain adaptation. However, the existing criterion with the winner-take-all strategy still leads to the crowding problem in the eigenspace. The head categories with many samples can extract features more accurately, but occupy most of the eigenspace. The tail categories sharing the rest of the narrow eigenspace are too crowded together to accurately extract features. Above these issues, we propose a novel T-distributed spherical metric for equalized eigenspace in the imbalanced classification, which has the following innovations: 1) We design the T-distributed spherical metric, which has the characteristics of high kurtosis. Instead of the winner-take-all strategy, the T-distributed spherical metric produces a high logit only when the extracted feature is close enough to the category center, without a strong bias against other categories. 2) The T-distributed spherical metric is integrated into the classifier, which is able to equalize the eigenspace for alleviating the crowding issue in the imbalanced problem. The equalized eigenspace by the T-distributed spherical classifier is capable of improving the accuracy of the tail categories while maintaining the accuracy of the head, which significantly promotes the intraclass compactness and interclass separability of features. Extensive experiments on large-scale imbalanced datasets verify our method, which shows superior results in the long-tailed CIFAR-100/-10 with the imbalanced ratio IR = 100/50. Our method also achieves excellent results on the large-scale ImageNet-LT dataset and the iNaturalist dataset with various backbones. In addition, we provide a case study of the real clinical classification of pancreatic tumor subtypes with 6 categories. Among them, the largest number of PDAC accounts for 315 cases, and the least CP has only 8 cases. After 4-fold cross-validation, we achieved a top-1 accuracy of 69.04%.", + "primary_area": "machine learning iv", + "author": "Xiaoyu Yang; Yufei Chen; Xiaodong Yue; Shaoxun Xu; Chao Ma", + "authorids": "", + "aff": "College of Electronics and Information Engineering, Tongji University, Shanghai, China; College of Electronics and Information Engineering, Tongji University, Shanghai, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China+Artificial Intelligence Institute of Shanghai University, Shanghai, China+VLN Lab, NAVI MedTech Co., Ltd. Shanghai, China; College of Electronics and Information Engineering, Tongji University, Shanghai, China; Department of Radiology, Changhai Hospital of Shanghai, Shanghai, China", + "bibtex": "@article{Yang_Chen_Yue_Xu_Ma_2023, title={T-distributed Spherical Feature Representation for Imbalanced Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26284}, DOI={10.1609/aaai.v37i9.26284}, abstractNote={Real-world classification tasks often show an extremely imbalanced problem. The extreme imbalance will cause a strong bias that the decision boundary of the classifier is completely dominated by the categories with abundant samples, which are also called the head categories. Current methods have alleviated the imbalanced impact from mainly three aspects: class re-balance, decoupling and domain adaptation. However, the existing criterion with the winner-take-all strategy still leads to the crowding problem in the eigenspace. The head categories with many samples can extract features more accurately, but occupy most of the eigenspace. The tail categories sharing the rest of the narrow eigenspace are too crowded together to accurately extract features. Above these issues, we propose a novel T-distributed spherical metric for equalized eigenspace in the imbalanced classification, which has the following innovations: 1) We design the T-distributed spherical metric, which has the characteristics of high kurtosis. Instead of the winner-take-all strategy, the T-distributed spherical metric produces a high logit only when the extracted feature is close enough to the category center, without a strong bias against other categories. 2) The T-distributed spherical metric is integrated into the classifier, which is able to equalize the eigenspace for alleviating the crowding issue in the imbalanced problem. The equalized eigenspace by the T-distributed spherical classifier is capable of improving the accuracy of the tail categories while maintaining the accuracy of the head, which significantly promotes the intraclass compactness and interclass separability of features. Extensive experiments on large-scale imbalanced datasets verify our method, which shows superior results in the long-tailed CIFAR-100/-10 with the imbalanced ratio IR = 100/50. Our method also achieves excellent results on the large-scale ImageNet-LT dataset and the iNaturalist dataset with various backbones. In addition, we provide a case study of the real clinical classification of pancreatic tumor subtypes with 6 categories. Among them, the largest number of PDAC accounts for 315 cases, and the least CP has only 8 cases. After 4-fold cross-validation, we achieved a top-1 accuracy of 69.04%.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Xiaoyu and Chen, Yufei and Yue, Xiaodong and Xu, Shaoxun and Ma, Chao}, year={2023}, month={Jun.}, pages={10825-10833} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26284/26056", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26284", + "pdf_size": 2334817, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2295376393126753858&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "tongji.edu.cn; ; ; ; ", + "email": "tongji.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1+1+2;0;3", + "aff_unique_norm": "Tongji University;Shanghai University;NAVI MedTech Co., Ltd.;Changhai Hospital of Shanghai", + "aff_unique_dep": "College of Electronics and Information Engineering;School of Computer Engineering and Science;VLN Lab;Department of Radiology", + "aff_unique_url": "http://www.tongji.edu.cn;https://www.shu.edu.cn;;", + "aff_unique_abbr": "Tongji;SHU;;", + "aff_campus_unique_index": "0;0;0+0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0+0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25553", + "title": "T2-GNN: Graph Neural Networks for Graphs with Incomplete Features and Structure via Teacher-Student Distillation", + "track": "main", + "status": "Technical", + "abstract": "Graph Neural Networks (GNNs) have been a prevailing technique for tackling various analysis tasks on graph data. A key premise for the remarkable performance of GNNs relies on complete and trustworthy initial graph descriptions (i.e., node features and graph structure), which is often not satisfied since real-world graphs are often incomplete due to various unavoidable factors. In particular, GNNs face greater challenges when both node features and graph structure are incomplete at the same time. The existing methods either focus on feature completion or structure completion. They usually rely on the matching relationship between features and structure, or employ joint learning of node representation and feature (or structure) completion in the hope of achieving mutual benefit. However, recent studies confirm that the mutual interference between features and structure leads to the degradation of GNN performance. When both features and structure are incomplete, the mismatch between features and structure caused by the missing randomness exacerbates the interference between the two, which may trigger incorrect completions that negatively affect node representation. To this end, in this paper we propose a general GNN framework based on teacher-student distillation to improve the performance of GNNs on incomplete graphs, namely T2-GNN. To avoid the interference between features and structure, we separately design feature-level and structure-level teacher models to provide targeted guidance for student model (base GNNs, such as GCN) through distillation. Then we design two personalized methods to obtain well-trained feature and structure teachers. To ensure that the knowledge of the teacher model is comprehensively and effectively distilled to the student model, we further propose a dual distillation mode to enable the student to acquire as much expert knowledge as possible. Extensive experiments on eight benchmark datasets demonstrate the effectiveness and robustness of the new framework on graphs with incomplete features and structure.", + "primary_area": "data mining and knowledge management", + "author": "Cuiying Huo; Di Jin; Yawen Li; Dongxiao He; Yu-Bin Yang; Lingfei Wu", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China; College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China + State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, P.R. China; School of Economics and Management, Beijing University of Posts and Telecommunications, Beijing, P.R. China; College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, P.R. China; Pinterest, New York, USA", + "bibtex": "@article{Huo_Jin_Li_He_Yang_Wu_2023, title={T2-GNN: Graph Neural Networks for Graphs with Incomplete Features and Structure via Teacher-Student Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25553}, DOI={10.1609/aaai.v37i4.25553}, abstractNote={Graph Neural Networks (GNNs) have been a prevailing technique for tackling various analysis tasks on graph data. A key premise for the remarkable performance of GNNs relies on complete and trustworthy initial graph descriptions (i.e., node features and graph structure), which is often not satisfied since real-world graphs are often incomplete due to various unavoidable factors. In particular, GNNs face greater challenges when both node features and graph structure are incomplete at the same time. The existing methods either focus on feature completion or structure completion. They usually rely on the matching relationship between features and structure, or employ joint learning of node representation and feature (or structure) completion in the hope of achieving mutual benefit. However, recent studies confirm that the mutual interference between features and structure leads to the degradation of GNN performance. When both features and structure are incomplete, the mismatch between features and structure caused by the missing randomness exacerbates the interference between the two, which may trigger incorrect completions that negatively affect node representation. To this end, in this paper we propose a general GNN framework based on teacher-student distillation to improve the performance of GNNs on incomplete graphs, namely T2-GNN. To avoid the interference between features and structure, we separately design feature-level and structure-level teacher models to provide targeted guidance for student model (base GNNs, such as GCN) through distillation. Then we design two personalized methods to obtain well-trained feature and structure teachers. To ensure that the knowledge of the teacher model is comprehensively and effectively distilled to the student model, we further propose a dual distillation mode to enable the student to acquire as much expert knowledge as possible. Extensive experiments on eight benchmark datasets demonstrate the effectiveness and robustness of the new framework on graphs with incomplete features and structure.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huo, Cuiying and Jin, Di and Li, Yawen and He, Dongxiao and Yang, Yu-Bin and Wu, Lingfei}, year={2023}, month={Jun.}, pages={4339-4346} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25553/25325", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25553", + "pdf_size": 275379, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5591465699033747650&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "tju.edu.cn;tju.edu.cn;bupt.edu.cn;tju.edu.cn;nju.edu.cn;email.wm.edu", + "email": "tju.edu.cn;tju.edu.cn;bupt.edu.cn;tju.edu.cn;nju.edu.cn;email.wm.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;2;0;1;3", + "aff_unique_norm": "Tianjin University;Nanjing University;Beijing University of Posts and Telecommunications;Pinterest", + "aff_unique_dep": "College of Intelligence and Computing;State Key Laboratory for Novel Software Technology;School of Economics and Management;", + "aff_unique_url": "http://www.tju.edu.cn;http://www.nju.edu.cn;http://www.bupt.edu.cn;https://www.pinterest.com", + "aff_unique_abbr": "Tianjin University;Nanjing U;BUPT;Pinterest", + "aff_campus_unique_index": "0;0+1;2;0;1;3", + "aff_campus_unique": "Tianjin;Nanjing;Beijing;New York", + "aff_country_unique_index": "0;0+0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26272", + "title": "T2G-FORMER: Organizing Tabular Features into Relation Graphs Promotes Heterogeneous Feature Interaction", + "track": "main", + "status": "Technical", + "abstract": "Recent development of deep neural networks (DNNs) for tabular learning has largely benefited from the capability of DNNs for automatic feature interaction. However, the heterogeneity nature of tabular features makes such features relatively independent, and developing effective methods to promote tabular feature interaction still remains an open problem. In this paper, we propose a novel Graph Estimator, which automatically estimates the relations among tabular features and builds graphs by assigning edges between related features. Such relation graphs organize independent tabular features into a kind of graph data such that interaction of nodes (tabular features) can be conducted in an orderly fashion. Based on our proposed Graph Estimator, we present a bespoke Transformer network tailored for tabular learning, called T2G-Former, which processes tabular data by performing tabular feature interaction guided by the relation graphs. A specific Cross-level Readout collects salient features predicted by the layers in T2G-Former across different levels, and attains global semantics for final prediction. Comprehensive experiments show that our T2G-Former achieves superior performance among DNNs and is competitive with non-deep Gradient Boosted Decision Tree models. The code and detailed results are available at https://github.com/jyansir/t2g-former.", + "primary_area": "machine learning iv", + "author": "Jiahuan Yan; Jintai Chen; Yixuan Wu; Danny Z. Chen; Jian Wu", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; School of Medicine, Zhejiang University; Department of Computer Science and Engineering, University of Notre Dame; The First Affiliated Hospital, and Department of Public Health, Zhejiang University School of Medicine", + "bibtex": "@article{Yan_Chen_Wu_Chen_Wu_2023, title={T2G-FORMER: Organizing Tabular Features into Relation Graphs Promotes Heterogeneous Feature Interaction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26272}, DOI={10.1609/aaai.v37i9.26272}, abstractNote={Recent development of deep neural networks (DNNs) for tabular learning has largely benefited from the capability of DNNs for automatic feature interaction. However, the heterogeneity nature of tabular features makes such features relatively independent, and developing effective methods to promote tabular feature interaction still remains an open problem. In this paper, we propose a novel Graph Estimator, which automatically estimates the relations among tabular features and builds graphs by assigning edges between related features. Such relation graphs organize independent tabular features into a kind of graph data such that interaction of nodes (tabular features) can be conducted in an orderly fashion. Based on our proposed Graph Estimator, we present a bespoke Transformer network tailored for tabular learning, called T2G-Former, which processes tabular data by performing tabular feature interaction guided by the relation graphs. A specific Cross-level Readout collects salient features predicted by the layers in T2G-Former across different levels, and attains global semantics for final prediction. Comprehensive experiments show that our T2G-Former achieves superior performance among DNNs and is competitive with non-deep Gradient Boosted Decision Tree models. The code and detailed results are available at https://github.com/jyansir/t2g-former.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Jiahuan and Chen, Jintai and Wu, Yixuan and Chen, Danny Z. and Wu, Jian}, year={2023}, month={Jun.}, pages={10720-10728} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26272/26044", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26272", + "pdf_size": 639258, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15825741608900590895&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;gmail.com;zju.edu.cn;nd.edu;zju.edu.cn", + "email": "zju.edu.cn;gmail.com;zju.edu.cn;nd.edu;zju.edu.cn", + "github": "https://github.com/jyansir/t2g-former", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "Zhejiang University;University of Notre Dame;Zhejiang University School of Medicine", + "aff_unique_dep": "College of Computer Science and Technology;Department of Computer Science and Engineering;Department of Public Health", + "aff_unique_url": "http://www.zju.edu.cn;https://www.nd.edu;http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU;Notre Dame;Zhejiang University", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-27027", + "title": "TA-DA: Topic-Aware Domain Adaptation for Scientific Keyphrase Identification and Classification (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Keyphrase identification and classification is a Natural Language Processing and Information Retrieval task that involves extracting relevant groups of words from a given text related to the main topic. In this work, we focus on extracting keyphrases from scientific documents. We introduce TA-DA, a Topic-Aware Domain Adaptation framework for keyphrase extraction that integrates Multi-Task Learning with Adversarial Training and Domain Adaptation. Our approach improves performance over baseline models by up to 5% in the exact match of the F1-score.", + "primary_area": "", + "author": "R\u0103zvan-Alexandru Sm\u0103du; George-Eduard Zaharia; Andrei-Marius Avram; Dumitru-Clementin Cercel; Mihai Dascalu; Florin Pop", + "authorids": "", + "aff": "Faculty of Automatic Control and Computers, University Politehnica of Bucharest; Faculty of Automatic Control and Computers, University Politehnica of Bucharest; Faculty of Automatic Control and Computers, University Politehnica of Bucharest; Faculty of Automatic Control and Computers, University Politehnica of Bucharest; Faculty of Automatic Control and Computers, University Politehnica of Bucharest; Faculty of Automatic Control and Computers, University Politehnica of Bucharest + National Institute for Research and Development in Informatics - ICI Bucharest, Romania", + "bibtex": "@article{Sm\u0103du_Zaharia_Avram_Cercel_Dascalu_Pop_2024, title={TA-DA: Topic-Aware Domain Adaptation for Scientific Keyphrase Identification and Classification (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27027}, DOI={10.1609/aaai.v37i13.27027}, abstractNote={Keyphrase identification and classification is a Natural Language Processing and Information Retrieval task that involves extracting relevant groups of words from a given text related to the main topic. In this work, we focus on extracting keyphrases from scientific documents. We introduce TA-DA, a Topic-Aware Domain Adaptation framework for keyphrase extraction that integrates Multi-Task Learning with Adversarial Training and Domain Adaptation. Our approach improves performance over baseline models by up to 5% in the exact match of the F1-score.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sm\u0103du, R\u0103zvan-Alexandru and Zaharia, George-Eduard and Avram, Andrei-Marius and Cercel, Dumitru-Clementin and Dascalu, Mihai and Pop, Florin}, year={2024}, month={Jul.}, pages={16334-16335} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27027/26799", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27027", + "pdf_size": 233514, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14795098239239385811&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "stud.acs.upb.ro;stud.acs.upb.ro;stud.acs.upb.ro;upb.ro;upb.ro;upb.ro", + "email": "stud.acs.upb.ro;stud.acs.upb.ro;stud.acs.upb.ro;upb.ro;upb.ro;upb.ro", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0+1", + "aff_unique_norm": "University Politehnica of Bucharest;National Institute for Research and Development in Informatics", + "aff_unique_dep": "Faculty of Automatic Control and Computers;ICI", + "aff_unique_url": "https://www.upb.ro;", + "aff_unique_abbr": "UPB;ICI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "Romania" + }, + { + "id": "article-25868", + "title": "TC-DWA:Text Clustering with Dual Word-Level Augmentation", + "track": "main", + "status": "Technical", + "abstract": "The pre-trained language models, e.g., ELMo and BERT, have recently achieved promising performance improvement in a wide range of NLP tasks, because they can output strong contextualized embedded features of words. Inspired by their great success, in this paper we target at fine-tuning them to effectively handle the text clustering task, i.e., a classic and fundamental challenge in machine learning. Accordingly, we propose a novel BERT-based method, namely Text Clustering with Dual Word-level Augmentation (TCDWA). To be specific, we formulate a self-training objective and enhance it with a dual word-level augmentation technique. First, we suppose that each text contains several most informative words, called anchor words, supporting the full text semantics. We use the embedded features of anchor words as augmented data, which are selected by ranking the norm-based attention weights of words. Second, we formulate an expectation form of word augmentation, which is equivalent to generating infinite augmented features, and further suggest a tractable approximation of Taylor expansion for efficient optimization. To evaluate the effectiveness of TCDWA, we conduct extensive experiments on several benchmark text datasets. The results demonstrate that TCDWA consistently outperforms the state-of-the-art baseline methods. Code available: https://github.com/BoCheng-96/TC-DWA.", + "primary_area": "machine learning i", + "author": "Bo Cheng; Ximing Li; Yi Chang", + "authorids": "", + "aff": "School of Artificial Intelligence, Jilin University, China+International Center of Future Science, Jilin University, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China; School of Artificial Intelligence, Jilin University, China+International Center of Future Science, Jilin University, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China", + "bibtex": "@article{Cheng_Li_Chang_2023, title={TC-DWA:Text Clustering with Dual Word-Level Augmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25868}, DOI={10.1609/aaai.v37i6.25868}, abstractNote={The pre-trained language models, e.g., ELMo and BERT, have recently achieved promising performance improvement in a wide range of NLP tasks, because they can output strong contextualized embedded features of words. Inspired by their great success, in this paper we target at fine-tuning them to effectively handle the text clustering task, i.e., a classic and fundamental challenge in machine learning. Accordingly, we propose a novel BERT-based method, namely Text Clustering with Dual Word-level Augmentation (TCDWA). To be specific, we formulate a self-training objective and enhance it with a dual word-level augmentation technique. First, we suppose that each text contains several most informative words, called anchor words, supporting the full text semantics. We use the embedded features of anchor words as augmented data, which are selected by ranking the norm-based attention weights of words. Second, we formulate an expectation form of word augmentation, which is equivalent to generating infinite augmented features, and further suggest a tractable approximation of Taylor expansion for efficient optimization. To evaluate the effectiveness of TCDWA, we conduct extensive experiments on several benchmark text datasets. The results demonstrate that TCDWA consistently outperforms the state-of-the-art baseline methods. Code available: https://github.com/BoCheng-96/TC-DWA.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Bo and Li, Ximing and Chang, Yi}, year={2023}, month={Jun.}, pages={7113-7121} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25868/25640", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25868", + "pdf_size": 706931, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8318401904844200661&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;gmail.com;jlu.edu.cn", + "email": "gmail.com;gmail.com;jlu.edu.cn", + "github": "https://github.com/BoCheng-96/TC-DWA", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+1;0+0;0+0+1", + "aff_unique_norm": "Jilin University;Engineering Research Center of Knowledge-Driven Human-Machine Intelligence", + "aff_unique_dep": "School of Artificial Intelligence;Ministry of Education", + "aff_unique_url": "http://www.jlu.edu.cn;", + "aff_unique_abbr": "JLU;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26848", + "title": "THMA: Tencent HD Map AI System for Creating HD Map Annotations", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Nowadays, autonomous vehicle technology is becoming more and more mature. Critical to progress and safety, high-definition (HD) maps, a type of centimeter-level map collected using a laser sensor, provide accurate descriptions of the surrounding environment. The key challenge of HD map production is efficient, high-quality collection and annotation of large-volume datasets. Due to the demand for high quality, HD map production requires significant manual human effort to create annotations, a very time-consuming and costly process for the map industry. In order to reduce manual annotation burdens, many artificial intelligence (AI) algorithms have been developed to pre-label the HD maps. However, there still exists a large gap between AI algorithms and the traditional manual HD map production pipelines in accuracy and robustness. Furthermore, it is also very resource-costly to build large-scale annotated datasets and advanced machine learning algorithms for AI-based HD map automatic labeling systems. In this paper, we introduce the Tencent HD Map AI (THMA) system, an innovative end-to-end, AI-based, active learning HD map labeling system capable of producing and labeling HD maps with a scale of hundreds of thousands of kilometers. In THMA, we train AI models directly from massive HD map datasets via supervised, self-supervised, and weakly supervised learning to achieve high accuracy and efficiency required by downstream users. THMA has been deployed by the Tencent Map team to provide services to downstream companies and users, serving over 1,000 labeling workers and producing more than 30,000 kilometers of HD map data per day at most. More than 90 percent of the HD map data in Tencent Map is labeled automatically by THMA, accelerating the traditional HD map labeling process by more than ten times.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Kun Tang; Xu Cao; Zhipeng Cao; Tong Zhou; Erlong Li; Ao Liu; Shengtao Zou; Chang Liu; Shuqi Mei; Elena Sizikova; Chao Zheng", + "authorids": "", + "aff": "T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China + Center of Data Science, New York University, New York, United States; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; T Lab, Tencent, Beijing, China; Center of Data Science, New York University, New York, United States; T Lab, Tencent, Beijing, China", + "bibtex": "@article{Tang_Cao_Cao_Zhou_Li_Liu_Zou_Liu_Mei_Sizikova_Zheng_2024, title={THMA: Tencent HD Map AI System for Creating HD Map Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26848}, DOI={10.1609/aaai.v37i13.26848}, abstractNote={Nowadays, autonomous vehicle technology is becoming more and more mature. Critical to progress and safety, high-definition (HD) maps, a type of centimeter-level map collected using a laser sensor, provide accurate descriptions of the surrounding environment. The key challenge of HD map production is efficient, high-quality collection and annotation of large-volume datasets. Due to the demand for high quality, HD map production requires significant manual human effort to create annotations, a very time-consuming and costly process for the map industry. In order to reduce manual annotation burdens, many artificial intelligence (AI) algorithms have been developed to pre-label the HD maps. However, there still exists a large gap between AI algorithms and the traditional manual HD map production pipelines in accuracy and robustness. Furthermore, it is also very resource-costly to build large-scale annotated datasets and advanced machine learning algorithms for AI-based HD map automatic labeling systems. In this paper, we introduce the Tencent HD Map AI (THMA) system, an innovative end-to-end, AI-based, active learning HD map labeling system capable of producing and labeling HD maps with a scale of hundreds of thousands of kilometers. In THMA, we train AI models directly from massive HD map datasets via supervised, self-supervised, and weakly supervised learning to achieve high accuracy and efficiency required by downstream users. THMA has been deployed by the Tencent Map team to provide services to downstream companies and users, serving over 1,000 labeling workers and producing more than 30,000 kilometers of HD map data per day at most. More than 90 percent of the HD map data in Tencent Map is labeled automatically by THMA, accelerating the traditional HD map labeling process by more than ten times.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tang, Kun and Cao, Xu and Cao, Zhipeng and Zhou, Tong and Li, Erlong and Liu, Ao and Zou, Shengtao and Liu, Chang and Mei, Shuqi and Sizikova, Elena and Zheng, Chao}, year={2024}, month={Jul.}, pages={15585-15593} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26848/26620", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26848", + "pdf_size": 14712559, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12315681077561813406&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "nyu.edu;tencent.com; ; ; ; ; ; ; ; ; ", + "email": "nyu.edu;tencent.com; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0+1;0;0;0;0;0;0;0;1;0", + "aff_unique_norm": "Tencent;New York University", + "aff_unique_dep": "T Lab;Center of Data Science", + "aff_unique_url": "https://www.tencent.com;https://www.nyu.edu", + "aff_unique_abbr": "Tencent;NYU", + "aff_campus_unique_index": "0;0+1;0;0;0;0;0;0;0;1;0", + "aff_campus_unique": "Beijing;New York", + "aff_country_unique_index": "0;0+1;0;0;0;0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25614", + "title": "TOT\uff1aTopology-Aware Optimal Transport for Multimodal Hate Detection", + "track": "main", + "status": "Technical", + "abstract": "Multimodal hate detection, which aims to identify the harmful content online such as memes, is crucial for building a wholesome internet environment. Previous work has made enlightening exploration in detecting explicit hate remarks. However, most of their approaches neglect the analysis of implicit harm, which is particularly challenging as explicit text markers and demographic visual cues are often twisted or missing. The leveraged cross-modal attention mechanisms also suffer from the distributional modality gap and lack logical interpretability. To address these \nsemantic gap issues, we propose TOT: a topology-aware optimal transport framework to decipher the implicit harm in memes scenario, which formulates the cross-modal aligning problem as solutions for optimal transportation plans. Specifically, we leverage an optimal transport kernel method to capture complementary information from multiple modalities. The kernel embedding provides a non-linear transformation ability to reproduce a kernel Hilbert space (RKHS), which reflects significance for eliminating the distributional modality gap. Moreover, we perceive the topology information based on aligned representations to conduct bipartite graph path reasoning. The newly achieved state-of-the-art performance on two publicly available benchmark datasets, together with further visual analysis, demonstrate the superiority of TOT in capturing implicit cross-modal alignment.", + "primary_area": "data mining and knowledge management", + "author": "Linhao Zhang; Li Jin; Xian Sun; Guangluan Xu; Zequn Zhang; Xiaoyu Li; Nayu Liu; Qing Liu; Shiyao Yan", + "authorids": "", + "aff": "Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute + School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute + School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute; Aerospace Information Research Institute, Chinese Academy of Sciences + Key Laboratory of Network Information System Technology (NIST), Aerospace Information Research Institute + School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences", + "bibtex": "@article{Zhang_Jin_Sun_Xu_Zhang_Li_Liu_Liu_Yan_2023, title={TOT\uff1aTopology-Aware Optimal Transport for Multimodal Hate Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25614}, DOI={10.1609/aaai.v37i4.25614}, abstractNote={Multimodal hate detection, which aims to identify the harmful content online such as memes, is crucial for building a wholesome internet environment. Previous work has made enlightening exploration in detecting explicit hate remarks. However, most of their approaches neglect the analysis of implicit harm, which is particularly challenging as explicit text markers and demographic visual cues are often twisted or missing. The leveraged cross-modal attention mechanisms also suffer from the distributional modality gap and lack logical interpretability. To address these semantic gap issues, we propose TOT: a topology-aware optimal transport framework to decipher the implicit harm in memes scenario, which formulates the cross-modal aligning problem as solutions for optimal transportation plans. Specifically, we leverage an optimal transport kernel method to capture complementary information from multiple modalities. The kernel embedding provides a non-linear transformation ability to reproduce a kernel Hilbert space (RKHS), which reflects significance for eliminating the distributional modality gap. Moreover, we perceive the topology information based on aligned representations to conduct bipartite graph path reasoning. The newly achieved state-of-the-art performance on two publicly available benchmark datasets, together with further visual analysis, demonstrate the superiority of TOT in capturing implicit cross-modal alignment.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Linhao and Jin, Li and Sun, Xian and Xu, Guangluan and Zhang, Zequn and Li, Xiaoyu and Liu, Nayu and Liu, Qing and Yan, Shiyao}, year={2023}, month={Jun.}, pages={4884-4892} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25614/25386", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25614", + "pdf_size": 11185230, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18321807882655643323&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.ucas.ac.cn;gmail.com; ; ; ; ; ; ; ", + "email": "mails.ucas.ac.cn;gmail.com; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1+2;0+1;0+1;0+1;0+1;0+1;0+1+2;0+1;0+1+2", + "aff_unique_norm": "Chinese Academy of Sciences;Aerospace Information Research Institute;University of Chinese Academy of Sciences", + "aff_unique_dep": "Aerospace Information Research Institute;Key Laboratory of Network Information System Technology (NIST);School of Electronic, Electrical and Communication Engineering", + "aff_unique_url": "http://www.cas.ac.cn;;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;;UCAS", + "aff_campus_unique_index": ";;;;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0+0;0+0;0+0;0+0;0+0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-27063", + "title": "TUTORING: Instruction-Grounded Conversational Agent for Language Learners", + "track": "demonstrations", + "status": "Technical", + "abstract": "In this paper, we propose Tutoring bot, a generative chatbot trained on a large scale of tutor-student conversations for English-language learning. To mimic a human tutor's behavior in language education, the tutor bot leverages diverse educational instructions and grounds to each instruction as additional input context for the tutor response generation. As a single instruction generally involves multiple dialogue turns to give the student sufficient speaking practice, the tutor bot is required to monitor and capture when the current instruction should be kept or switched to the next instruction. For that, the tutor bot is learned to not only generate responses but also infer its teaching action and progress on the current conversation simultaneously by a multi-task learning scheme. Our Tutoring bot is deployed under a non-commercial use license at https://tutoringai.com.", + "primary_area": "", + "author": "Hyungjoo Chae; Minjin Kim; Chaehyeong Kim; Wonseok Jeong; Hyejoong Kim; Junmyung Lee; Jinyoung Yeo", + "authorids": "", + "aff": "Department of Computer Science, Yonsei University + Tutoring, Market Designers Inc.; Department of Artificial Intelligence, Yonsei University; Department of Artificial Intelligence, Yonsei University; Tutoring, Market Designers Inc.; Tutoring, Market Designers Inc.; Tutoring, Market Designers Inc.; Department of Computer Science, Yonsei University + Department of Artificial Intelligence, Yonsei University + Tutoring, Market Designers Inc.", + "bibtex": "@article{Chae_Kim_Kim_Jeong_Kim_Lee_Yeo_2024, title={TUTORING: Instruction-Grounded Conversational Agent for Language Learners}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27063}, DOI={10.1609/aaai.v37i13.27063}, abstractNote={In this paper, we propose Tutoring bot, a generative chatbot trained on a large scale of tutor-student conversations for English-language learning. To mimic a human tutor\u2019s behavior in language education, the tutor bot leverages diverse educational instructions and grounds to each instruction as additional input context for the tutor response generation. As a single instruction generally involves multiple dialogue turns to give the student sufficient speaking practice, the tutor bot is required to monitor and capture when the current instruction should be kept or switched to the next instruction. For that, the tutor bot is learned to not only generate responses but also infer its teaching action and progress on the current conversation simultaneously by a multi-task learning scheme. Our Tutoring bot is deployed under a non-commercial use license at https://tutoringai.com.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chae, Hyungjoo and Kim, Minjin and Kim, Chaehyeong and Jeong, Wonseok and Kim, Hyejoong and Lee, Junmyung and Yeo, Jinyoung}, year={2024}, month={Jul.}, pages={16413-16415} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27063/26835", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27063", + "pdf_size": 217185, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9060809317679071879&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;tutoring.co.kr;tutoring.co.kr;tutoring.co.kr;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;tutoring.co.kr;tutoring.co.kr;tutoring.co.kr;yonsei.ac.kr", + "github": "", + "project": "https://tutoringai.com", + "author_num": 7, + "aff_unique_index": "0+1;0;0;1;1;1;0+0+1", + "aff_unique_norm": "Yonsei University;Market Designers Inc.", + "aff_unique_dep": "Department of Computer Science;Tutoring", + "aff_unique_url": "https://www.yonsei.ac.kr;", + "aff_unique_abbr": "Yonsei;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;1;1;1;0+0+1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "article-25286", + "title": "TaCo: Textual Attribute Recognition via Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "As textual attributes like font are core design elements of document format and page style, automatic attributes recognition favor comprehensive practical applications. Existing approaches already yield satisfactory performance in differentiating disparate attributes, but they still suffer in distinguishing similar attributes with only subtle difference. Moreover, their performance drop severely in real-world scenarios where unexpected and obvious imaging distortions appear. In this paper, we aim to tackle these problems by proposing TaCo, a contrastive framework for textual attribute recognition tailored toward the most common document scenes. Specifically, TaCo leverages contrastive learning to dispel the ambiguity trap arising from vague and open-ended attributes. \nTo realize this goal, we design the learning paradigm from three perspectives: 1) generating attribute views, 2) extracting subtle but crucial details, and 3) exploiting valued view pairs for learning, to fully unlock the pre-training potential. \nExtensive experiments show that TaCo surpasses the supervised counterparts and advances the state-of-the-art remarkably on multiple attribute recognition tasks. Online services of TaCo will be made available.", + "primary_area": "computer vision ii", + "author": "Chang Nie; Yiqing Hu; Yanqiu Qu; Hao Liu; Deqiang Jiang; Bo Ren", + "authorids": "", + "aff": "Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab", + "bibtex": "@article{Nie_Hu_Qu_Liu_Jiang_Ren_2023, title={TaCo: Textual Attribute Recognition via Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25286}, DOI={10.1609/aaai.v37i2.25286}, abstractNote={As textual attributes like font are core design elements of document format and page style, automatic attributes recognition favor comprehensive practical applications. Existing approaches already yield satisfactory performance in differentiating disparate attributes, but they still suffer in distinguishing similar attributes with only subtle difference. Moreover, their performance drop severely in real-world scenarios where unexpected and obvious imaging distortions appear. In this paper, we aim to tackle these problems by proposing TaCo, a contrastive framework for textual attribute recognition tailored toward the most common document scenes. Specifically, TaCo leverages contrastive learning to dispel the ambiguity trap arising from vague and open-ended attributes. To realize this goal, we design the learning paradigm from three perspectives: 1) generating attribute views, 2) extracting subtle but crucial details, and 3) exploiting valued view pairs for learning, to fully unlock the pre-training potential. Extensive experiments show that TaCo surpasses the supervised counterparts and advances the state-of-the-art remarkably on multiple attribute recognition tasks. Online services of TaCo will be made available.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nie, Chang and Hu, Yiqing and Qu, Yanqiu and Liu, Hao and Jiang, Deqiang and Ren, Bo}, year={2023}, month={Jun.}, pages={1949-1956} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25286/25058", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25286", + "pdf_size": 2978245, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:cLNzs_h-qpIJ:scholar.google.com/&scioq=TaCo:+Textual+Attribute+Recognition+via+Contrastive+Learning&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "YouTu Lab", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25891", + "title": "Tackling Data Heterogeneity in Federated Learning with Class Prototypes", + "track": "main", + "status": "Technical", + "abstract": "Data heterogeneity across clients in federated learning (FL) settings is a widely acknowledged challenge. In response, personalized federated learning (PFL) emerged as a framework to curate local models for clients' tasks. In PFL, a common strategy is to develop local and global models jointly - the global model (for generalization) informs the local models, and the local models (for personalization) are aggregated to update the global model. A key observation is that if we can improve the generalization ability of local models, then we can improve the generalization of global models, which in turn builds better personalized models. In this work, we consider class imbalance, an overlooked type of data heterogeneity, in the classification setting. We propose FedNH, a novel method that improves the local models' performance for both personalization and generalization by combining the uniformity and semantics of class prototypes. FedNH initially distributes class prototypes uniformly in the latent space and smoothly infuses the class semantics into class prototypes. We show that imposing uniformity helps to combat prototype collapse while infusing class semantics improves local models. Extensive experiments were conducted on popular classification datasets under the cross-device setting. Our results demonstrate the effectiveness and stability of our method over recent works.", + "primary_area": "machine learning i", + "author": "Yutong Dai; Zeyuan Chen; Junnan Li; Shelby Heinecke; Lichao Sun; Ran Xu", + "authorids": "", + "aff": "Lehigh University; Salesforce Research; Salesforce Research; Salesforce Research; Lehigh University; Salesforce Research", + "bibtex": "@article{Dai_Chen_Li_Heinecke_Sun_Xu_2023, title={Tackling Data Heterogeneity in Federated Learning with Class Prototypes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25891}, DOI={10.1609/aaai.v37i6.25891}, abstractNote={Data heterogeneity across clients in federated learning (FL) settings is a widely acknowledged challenge. In response, personalized federated learning (PFL) emerged as a framework to curate local models for clients\u2019 tasks. In PFL, a common strategy is to develop local and global models jointly - the global model (for generalization) informs the local models, and the local models (for personalization) are aggregated to update the global model. A key observation is that if we can improve the generalization ability of local models, then we can improve the generalization of global models, which in turn builds better personalized models. In this work, we consider class imbalance, an overlooked type of data heterogeneity, in the classification setting. We propose FedNH, a novel method that improves the local models\u2019 performance for both personalization and generalization by combining the uniformity and semantics of class prototypes. FedNH initially distributes class prototypes uniformly in the latent space and smoothly infuses the class semantics into class prototypes. We show that imposing uniformity helps to combat prototype collapse while infusing class semantics improves local models. Extensive experiments were conducted on popular classification datasets under the cross-device setting. Our results demonstrate the effectiveness and stability of our method over recent works.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dai, Yutong and Chen, Zeyuan and Li, Junnan and Heinecke, Shelby and Sun, Lichao and Xu, Ran}, year={2023}, month={Jun.}, pages={7314-7322} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25891/25663", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25891", + "pdf_size": 721505, + "gs_citation": 95, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6576732418036748254&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "lehigh.edu;salesforce.com;salesforce.com;salesforce.com;lehigh.edu;salesforce.com", + "email": "lehigh.edu;salesforce.com;salesforce.com;salesforce.com;lehigh.edu;salesforce.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;1", + "aff_unique_norm": "Lehigh University;Salesforce", + "aff_unique_dep": ";Salesforce Research", + "aff_unique_url": "https://www.lehigh.edu;https://research.salesforce.com", + "aff_unique_abbr": "Lehigh;Salesforce", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27041", + "title": "Tackling Safe and Efficient Multi-Agent Reinforcement Learning via Dynamic Shielding (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Multi-agent Reinforcement Learning (MARL) has been increasingly used in safety-critical applications but has no safety guarantees, especially during training. In this paper, we propose dynamic shielding, a novel decentralized MARL framework to ensure safety in both training and deployment phases. Our framework leverages Shield, a reactive system running in parallel with the reinforcement learning algorithm to monitor and correct agents' behavior. In our algorithm, shields dynamically split and merge according to the environment state in order to maintain decentralization and avoid conservative behaviors while enjoying formal safety guarantees. We demonstrate the effectiveness of MARL with dynamic shielding in the mobile navigation scenario.", + "primary_area": "", + "author": "Wenli Xiao; Yiwei Lyu; John M. Dolan", + "authorids": "", + "aff": "School of Science and Engineering, The Chinese University of Hong Kong, Shenzhen, China; Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA; Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA", + "bibtex": "@article{Xiao_Lyu_Dolan_2024, title={Tackling Safe and Efficient Multi-Agent Reinforcement Learning via Dynamic Shielding (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27041}, DOI={10.1609/aaai.v37i13.27041}, abstractNote={Multi-agent Reinforcement Learning (MARL) has been increasingly used in safety-critical applications but has no safety guarantees, especially during training. In this paper, we propose dynamic shielding, a novel decentralized MARL framework to ensure safety in both training and deployment phases. Our framework leverages Shield, a reactive system running in parallel with the reinforcement learning algorithm to monitor and correct agents\u2019 behavior. In our algorithm, shields dynamically split and merge according to the environment state in order to maintain decentralization and avoid conservative behaviors while enjoying formal safety guarantees. We demonstrate the effectiveness of MARL with dynamic shielding in the mobile navigation scenario.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Wenli and Lyu, Yiwei and Dolan, John M.}, year={2024}, month={Jul.}, pages={16362-16363} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27041/26813", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27041", + "pdf_size": 2206676, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:HW4J7FO7ahgJ:scholar.google.com/&scioq=Tackling+Safe+and+Efficient+Multi-Agent+Reinforcement+Learning+via+Dynamic+Shielding+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "link.cuhk.edu.cn;andrew.cmu.edu;andrew.cmu.edu", + "email": "link.cuhk.edu.cn;andrew.cmu.edu;andrew.cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "The Chinese University of Hong Kong;Carnegie Mellon University", + "aff_unique_dep": "School of Science and Engineering;Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.cuhk.edu.cn;https://www.cmu.edu", + "aff_unique_abbr": "CUHK;CMU", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Shenzhen;Pittsburgh", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25113", + "title": "Tagging before Alignment: Integrating Multi-Modal Tags for Video-Text Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Vision-language alignment learning for video-text retrieval arouses a lot of attention in recent years. Most of the existing methods either transfer the knowledge of image-text pretraining model to video-text retrieval task without fully exploring the multi-modal information of videos, or simply fuse multi-modal features in a brute force manner without explicit guidance. In this paper, we integrate multi-modal information in an explicit manner by tagging, and use the tags as the anchors for better video-text alignment. Various pretrained experts are utilized for extracting the information of multiple modalities, including object, person, motion, audio, etc. To take full advantage of these information, we propose the TABLE (TAgging Before aLignmEnt) network, which consists of a visual encoder, a tag encoder, a text encoder, and a tag-guiding cross-modal encoder for jointly encoding multi-frame visual features and multi-modal tags information. Furthermore, to strengthen the interaction between video and text, we build a joint cross-modal encoder with the triplet input of [vision, tag, text] and perform two additional supervised tasks, Video Text Matching (VTM) and Masked Language Modeling (MLM). Extensive experimental results demonstrate that the TABLE model is capable of achieving State-Of-The-Art (SOTA) performance on various video-text retrieval benchmarks, including MSR-VTT, MSVD, LSMDC and DiDeMo.", + "primary_area": "computer vision i", + "author": "Yizhen Chen; Jie Wang; Lijian Lin; Zhongang Qi; Jin Ma; Ying Shan", + "authorids": "", + "aff": "IPS Search, Tencent PCG; IPS Search, Tencent PCG; ARC Lab, Tencent PCG; ARC Lab, Tencent PCG; IPS Search, Tencent PCG; ARC Lab, Tencent PCG", + "bibtex": "@article{Chen_Wang_Lin_Qi_Ma_Shan_2023, title={Tagging before Alignment: Integrating Multi-Modal Tags for Video-Text Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25113}, DOI={10.1609/aaai.v37i1.25113}, abstractNote={Vision-language alignment learning for video-text retrieval arouses a lot of attention in recent years. Most of the existing methods either transfer the knowledge of image-text pretraining model to video-text retrieval task without fully exploring the multi-modal information of videos, or simply fuse multi-modal features in a brute force manner without explicit guidance. In this paper, we integrate multi-modal information in an explicit manner by tagging, and use the tags as the anchors for better video-text alignment. Various pretrained experts are utilized for extracting the information of multiple modalities, including object, person, motion, audio, etc. To take full advantage of these information, we propose the TABLE (TAgging Before aLignmEnt) network, which consists of a visual encoder, a tag encoder, a text encoder, and a tag-guiding cross-modal encoder for jointly encoding multi-frame visual features and multi-modal tags information. Furthermore, to strengthen the interaction between video and text, we build a joint cross-modal encoder with the triplet input of [vision, tag, text] and perform two additional supervised tasks, Video Text Matching (VTM) and Masked Language Modeling (MLM). Extensive experimental results demonstrate that the TABLE model is capable of achieving State-Of-The-Art (SOTA) performance on various video-text retrieval benchmarks, including MSR-VTT, MSVD, LSMDC and DiDeMo.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Yizhen and Wang, Jie and Lin, Lijian and Qi, Zhongang and Ma, Jin and Shan, Ying}, year={2023}, month={Jun.}, pages={396-404} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25113/24885", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25113", + "pdf_size": 4219380, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6615571488331751827&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "tencent.com;163.com;stu.xmu.edu.cn;tencent.com;mail.ustc.edu.cn;tencent.com", + "email": "tencent.com;163.com;stu.xmu.edu.cn;tencent.com;mail.ustc.edu.cn;tencent.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "Tencent PCG", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25106", + "title": "Take Your Model Further: A General Post-refinement Network for Light Field Disparity Estimation via BadPix Correction", + "track": "main", + "status": "Technical", + "abstract": "Most existing light field (LF) disparity estimation algorithms focus on handling occlusion, texture-less or other areas that harm LF structure to improve accuracy, while ignoring other potential modeling ideas. In this paper, we propose a novel idea called Bad Pixel (BadPix) correction for method modeling, then implement a general post-refinement network for LF disparity estimation: Bad-pixel Correction Network (BpCNet). Given an initial disparity map generated by a specific algorithm, we assume that all BadPixs on it are in a small range. Then BpCNet is modeled as a fine-grained search strategy, and a more accurate result can be obtained by evaluating the consistency of LF images in this limited range. Due to the assumption and the consistency between input and output, BpCNet can perform as a general post-refinement network, and can work on almost all existing algorithms iteratively. We demonstrate the feasibility of our theory through extensive experiments, and achieve remarkable performance on the HCI 4D Light Field Benchmark.", + "primary_area": "computer vision i", + "author": "Rongshan Chen; Hao Sheng; Da Yang; Sizhe Wang; Zhenglong Cui; Ruixuan Cong", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang; School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang+Faculty of Applied Sciences, Macao Polytechnic University; School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang; School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang; School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang; School of Computer Science and Engineering, Beihang University+Beihang Hangzhou Innovation Institute Yuhang", + "bibtex": "@article{Chen_Sheng_Yang_Wang_Cui_Cong_2023, title={Take Your Model Further: A General Post-refinement Network for Light Field Disparity Estimation via BadPix Correction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25106}, DOI={10.1609/aaai.v37i1.25106}, abstractNote={Most existing light field (LF) disparity estimation algorithms focus on handling occlusion, texture-less or other areas that harm LF structure to improve accuracy, while ignoring other potential modeling ideas. In this paper, we propose a novel idea called Bad Pixel (BadPix) correction for method modeling, then implement a general post-refinement network for LF disparity estimation: Bad-pixel Correction Network (BpCNet). Given an initial disparity map generated by a specific algorithm, we assume that all BadPixs on it are in a small range. Then BpCNet is modeled as a fine-grained search strategy, and a more accurate result can be obtained by evaluating the consistency of LF images in this limited range. Due to the assumption and the consistency between input and output, BpCNet can perform as a general post-refinement network, and can work on almost all existing algorithms iteratively. We demonstrate the feasibility of our theory through extensive experiments, and achieve remarkable performance on the HCI 4D Light Field Benchmark.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Rongshan and Sheng, Hao and Yang, Da and Wang, Sizhe and Cui, Zhenglong and Cong, Ruixuan}, year={2023}, month={Jun.}, pages={331-339} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25106/24878", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25106", + "pdf_size": 6342438, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5028211059881651150&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0+1;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Beihang University;Macao Polytechnic University", + "aff_unique_dep": "School of Computer Science and Engineering;Faculty of Applied Sciences", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.mp.polyu.edu.hk", + "aff_unique_abbr": "BUAA;MPU", + "aff_campus_unique_index": "1;1+2;1;1;1;1", + "aff_campus_unique": ";Hangzhou;Macao", + "aff_country_unique_index": "0+0;0+0+1;0+0;0+0;0+0;0+0", + "aff_country_unique": "China;Macau" + }, + { + "id": "article-26602", + "title": "Taming Continuous Posteriors for Latent Variational Dialogue Policies", + "track": "main", + "status": "Technical", + "abstract": "Utilizing amortized variational inference for latent-action reinforcement learning (RL) has been shown to be an effective approach in Task-oriented Dialogue (ToD) systems for optimizing dialogue success.Until now, categorical posteriors have been argued to be one of the main drivers of performance. In this work we revisit Gaussian variational posteriors for latent-action RL and show that they can yield even better performance than categoricals. We achieve this by introducing an improved variational inference objective for learning continuous representations without auxiliary learning objectives, which streamlines the training procedure. Moreover, we propose ways to regularize the latent dialogue policy, which helps to retain good response coherence. Using continuous latent representations our model achieves state of the art dialogue success rate on the MultiWOZ benchmark, and also compares well to categorical latent methods in response coherence.", + "primary_area": "speech natural language processing", + "author": "Marin Vlastelica; Patrick Ernst; Gyuri Szarvas", + "authorids": "", + "aff": "Autonomous Learning Group, Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Amazon Development Center Germany GmbH, Berlin, Germany; Amazon Development Center Germany GmbH, Berlin, Germany", + "bibtex": "@article{Vlastelica_Ernst_Szarvas_2023, title={Taming Continuous Posteriors for Latent Variational Dialogue Policies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26602}, DOI={10.1609/aaai.v37i11.26602}, abstractNote={Utilizing amortized variational inference for latent-action reinforcement learning (RL) has been shown to be an effective approach in Task-oriented Dialogue (ToD) systems for optimizing dialogue success.Until now, categorical posteriors have been argued to be one of the main drivers of performance. In this work we revisit Gaussian variational posteriors for latent-action RL and show that they can yield even better performance than categoricals. We achieve this by introducing an improved variational inference objective for learning continuous representations without auxiliary learning objectives, which streamlines the training procedure. Moreover, we propose ways to regularize the latent dialogue policy, which helps to retain good response coherence. Using continuous latent representations our model achieves state of the art dialogue success rate on the MultiWOZ benchmark, and also compares well to categorical latent methods in response coherence.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vlastelica, Marin and Ernst, Patrick and Szarvas, Gyuri}, year={2023}, month={Jun.}, pages={13673-13681} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26602/26374", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26602", + "pdf_size": 3218366, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12737891862874053853&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "tuebingen.mpg.de;amazon.de;amazon.de", + "email": "tuebingen.mpg.de;amazon.de;amazon.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Max Planck Institute for Intelligent Systems;Amazon Development Center Germany GmbH", + "aff_unique_dep": "Autonomous Learning Group;", + "aff_unique_url": "https://www.mpi-is.mpg.de;https://www.amazon.de", + "aff_unique_abbr": "MPI-IS;", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "T\u00fcbingen;Berlin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25155", + "title": "Target-Aware Tracking with Long-Term Context Attention", + "track": "main", + "status": "Technical", + "abstract": "Most deep trackers still follow the guidance of the siamese paradigms and use a template that contains only the target without any contextual information, which makes it difficult for the tracker to cope with large appearance changes, rapid target movement, and attraction from similar objects. To alleviate the above problem, we propose a long-term context attention (LCA) module that can perform extensive information fusion on the target and its context from long-term frames, and calculate the target correlation while enhancing target features. The complete contextual information contains the location of the target as well as the state around the target. LCA uses the target state from the previous frame to exclude the interference of similar objects and complex backgrounds, thus accurately locating the target and enabling the tracker to obtain higher robustness and regression accuracy. By embedding the LCA module in Transformer, we build a powerful online tracker with a target-aware backbone, termed as TATrack. In addition, we propose a dynamic online update algorithm based on the classification confidence of historical information without additional calculation burden. Our tracker achieves state-of-the-art performance on multiple benchmarks, with 71.1% AUC, 89.3% NP, and 73.0% AO on LaSOT, TrackingNet, and GOT-10k. The code and trained models are available on https://github.com/hekaijie123/TATrack.", + "primary_area": "computer vision i", + "author": "Kaijie He; Canlong Zhang; Sheng Xie; Zhixin Li; Zhiwen Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Guangxi Normal University, China+Guangxi key Lab of Multi-source Information Mining and Security, China; School of Computer Science and Engineering, Guangxi Normal University, China+Guangxi key Lab of Multi-source Information Mining and Security, China; School of Computer Science and Engineering, Guangxi Normal University, China; School of Computer Science and Engineering, Guangxi Normal University, China+Guangxi key Lab of Multi-source Information Mining and Security, China; School of Computer Science and Technology, Guangxi University of Science and Technology, China", + "bibtex": "@article{He_Zhang_Xie_Li_Wang_2023, title={Target-Aware Tracking with Long-Term Context Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25155}, DOI={10.1609/aaai.v37i1.25155}, abstractNote={Most deep trackers still follow the guidance of the siamese paradigms and use a template that contains only the target without any contextual information, which makes it difficult for the tracker to cope with large appearance changes, rapid target movement, and attraction from similar objects. To alleviate the above problem, we propose a long-term context attention (LCA) module that can perform extensive information fusion on the target and its context from long-term frames, and calculate the target correlation while enhancing target features. The complete contextual information contains the location of the target as well as the state around the target. LCA uses the target state from the previous frame to exclude the interference of similar objects and complex backgrounds, thus accurately locating the target and enabling the tracker to obtain higher robustness and regression accuracy. By embedding the LCA module in Transformer, we build a powerful online tracker with a target-aware backbone, termed as TATrack. In addition, we propose a dynamic online update algorithm based on the classification confidence of historical information without additional calculation burden. Our tracker achieves state-of-the-art performance on multiple benchmarks, with 71.1% AUC, 89.3% NP, and 73.0% AO on LaSOT, TrackingNet, and GOT-10k. The code and trained models are available on https://github.com/hekaijie123/TATrack.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Kaijie and Zhang, Canlong and Xie, Sheng and Li, Zhixin and Wang, Zhiwen}, year={2023}, month={Jun.}, pages={773-780} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25155/24927", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25155", + "pdf_size": 2282486, + "gs_citation": 68, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12572109909968074319&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "outlook.com;gxnu.edu.cn; ; ; ", + "email": "outlook.com;gxnu.edu.cn; ; ; ", + "github": "https://github.com/hekaijie123/TATrack", + "project": "", + "author_num": 5, + "aff_unique_index": "1;1;1;2", + "aff_unique_norm": ";Guangxi University;Guangxi University of Science and Technology", + "aff_unique_dep": ";Key Lab of Multi-source Information Mining and Security;School of Computer Science and Technology", + "aff_unique_url": ";;", + "aff_unique_abbr": ";;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;1;1", + "aff_country_unique": ";China" + }, + { + "id": "article-25134", + "title": "Target-Free Text-Guided Image Manipulation", + "track": "main", + "status": "Technical", + "abstract": "We tackle the problem of target-free text-guided image manipulation, which requires one to modify the input reference image based on the given text instruction, while no ground truth target image is observed during training. To address this challenging task, we propose a Cyclic-Manipulation GAN (cManiGAN) in this paper, which is able to realize where and how to edit the image regions of interest. Specifically, the image editor in cManiGAN learns to identify and complete the input image, while cross-modal interpreter and reasoner are deployed to verify the semantic correctness of the output image based on the input instruction. While the former utilizes factual/counterfactual description learning for authenticating the image semantics, the latter predicts the \"undo\" instruction and provides pixel-level supervision for the training of cManiGAN. With the above operational cycle-consistency, our cManiGAN can be trained in the above weakly supervised setting. We conduct extensive experiments on the datasets of CLEVR and COCO datasets, and the effectiveness and generalizability of our proposed method can be successfully verified. Project page: sites.google.com/view/wancyuanfan/projects/cmanigan.", + "primary_area": "computer vision i", + "author": "Wan-Cyuan Fan; Cheng-Fu Yang; Chiao-An Yang; Yu-Chiang Frank Wang", + "authorids": "", + "aff": "National Taiwan University; UCLA; Purdue University; NVIDIA", + "bibtex": "@article{Fan_Yang_Yang_Wang_2023, title={Target-Free Text-Guided Image Manipulation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25134}, DOI={10.1609/aaai.v37i1.25134}, abstractNote={We tackle the problem of target-free text-guided image manipulation, which requires one to modify the input reference image based on the given text instruction, while no ground truth target image is observed during training. To address this challenging task, we propose a Cyclic-Manipulation GAN (cManiGAN) in this paper, which is able to realize where and how to edit the image regions of interest. Specifically, the image editor in cManiGAN learns to identify and complete the input image, while cross-modal interpreter and reasoner are deployed to verify the semantic correctness of the output image based on the input instruction. While the former utilizes factual/counterfactual description learning for authenticating the image semantics, the latter predicts the "undo" instruction and provides pixel-level supervision for the training of cManiGAN. With the above operational cycle-consistency, our cManiGAN can be trained in the above weakly supervised setting. We conduct extensive experiments on the datasets of CLEVR and COCO datasets, and the effectiveness and generalizability of our proposed method can be successfully verified. Project page: sites.google.com/view/wancyuanfan/projects/cmanigan.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fan, Wan-Cyuan and Yang, Cheng-Fu and Yang, Chiao-An and Wang, Yu-Chiang Frank}, year={2023}, month={Jun.}, pages={588-596} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25134/24906", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25134", + "pdf_size": 1443899, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2991826763670832439&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ntu.edu.tw; ; ; ", + "email": "ntu.edu.tw; ; ; ", + "github": "", + "project": "sites.google.com/view/wancyuanfan/projects/cmanigan", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "National Taiwan University;University of California, Los Angeles;Purdue University;NVIDIA Corporation", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ntu.edu.tw;https://www.ucla.edu;https://www.purdue.edu;https://www.nvidia.com", + "aff_unique_abbr": "NTU;UCLA;Purdue;NVIDIA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "Taiwan, China;United States" + }, + { + "id": "article-26805", + "title": "Targeted Knowledge Infusion To Make Conversational AI Explainable and Safe", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Conversational Systems (CSys) represent practical and tangible outcomes of advances in NLP and AI. CSys see continuous improvements through unsupervised training of large language models (LLMs) on a humongous amount of generic training data. However, when these CSys are suggested for use in domains like Mental Health, they fail to match the acceptable standards of clinical care, such as the clinical process in Patient Health Questionnaire (PHQ-9). The talk will present, Knowledge-infused Learning (KiL), a paradigm within NeuroSymbolic AI that focuses on making machine/deep learning models (i) learn over knowledge-enriched data, (ii) learn to follow guidelines in process-oriented tasks for safe and reasonable generation, and (iii) learn to leverage multiple contexts and stratified knowledge to yield user-level explanations. KiL established Knowledge-Intensive Language Understanding, a set of tasks for assessing safety, explainability, and conceptual flow in CSys.", + "primary_area": "", + "author": "Manas Gaur", + "authorids": "", + "aff": "Department of Computer Science and Electrical Engineering, University of Maryland Baltimore County", + "bibtex": "@article{Gaur_2024, title={Targeted Knowledge Infusion To Make Conversational AI Explainable and Safe}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26805}, DOI={10.1609/aaai.v37i13.26805}, abstractNote={Conversational Systems (CSys) represent practical and tangible outcomes of advances in NLP and AI. CSys see continuous improvements through unsupervised training of large language models (LLMs) on a humongous amount of generic training data. However, when these CSys are suggested for use in domains like Mental Health, they fail to match the acceptable standards of clinical care, such as the clinical process in Patient Health Questionnaire (PHQ-9). The talk will present, Knowledge-infused Learning (KiL), a paradigm within NeuroSymbolic AI that focuses on making machine/deep learning models (i) learn over knowledge-enriched data, (ii) learn to follow guidelines in process-oriented tasks for safe and reasonable generation, and (iii) learn to leverage multiple contexts and stratified knowledge to yield user-level explanations. KiL established Knowledge-Intensive Language Understanding, a set of tasks for assessing safety, explainability, and conceptual flow in CSys.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gaur, Manas}, year={2024}, month={Jul.}, pages={15438-15438} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26805/26577", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26805", + "pdf_size": 5178926, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2869178141175894646&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "umbc.edu", + "email": "umbc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Maryland, Baltimore County", + "aff_unique_dep": "Department of Computer Science and Electrical Engineering", + "aff_unique_url": "https://www.umbc.edu", + "aff_unique_abbr": "UMBC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Baltimore County", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26761", + "title": "Task and Model Agnostic Adversarial Attack on Graph Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Adversarial attacks on Graph Neural Networks (GNNs) reveal their security vulnerabilities, limiting their adoption in safety-critical applications. However, existing attack strategies rely on the knowledge of either the GNN model being used or the predictive task being attacked. Is this knowledge necessary? For example, a graph may be used for multiple downstream tasks unknown to a practical attacker. It is thus important to test the vulnerability of GNNs to adversarial perturbations in a model and task-agnostic setting. In this work, we study this problem and show that Gnns remain vulnerable even when the downstream task and model are unknown. The proposed algorithm, TANDIS (Targeted Attack via Neighborhood DIStortion) shows that distortion of node neighborhoods is effective in drastically compromising prediction performance. Although neighborhood distortion is an NP-hard problem, TANDIS designs an effective heuristic through a novel combination of Graph Isomorphism Network with deep Q-learning. Extensive experiments on real datasets show that, on average, TANDIS is up to 50% more effective than state-of-the-art techniques, while being more than 1000 times faster.", + "primary_area": "safe and robust ai", + "author": "Kartik Sharma; Samidha Verma; Sourav Medya; Arnab Bhattacharya; Sayan Ranu", + "authorids": "", + "aff": "Georgia Institute of Technology, Atlanta, USA; Indian Institute of Technology, Delhi, India; University of Illinois, Chicago, USA; Indian Institute of Technology, Kanpur, India; Indian Institute of Technology, Delhi, India", + "bibtex": "@article{Sharma_Verma_Medya_Bhattacharya_Ranu_2023, title={Task and Model Agnostic Adversarial Attack on Graph Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26761}, DOI={10.1609/aaai.v37i12.26761}, abstractNote={Adversarial attacks on Graph Neural Networks (GNNs) reveal their security vulnerabilities, limiting their adoption in safety-critical applications. However, existing attack strategies rely on the knowledge of either the GNN model being used or the predictive task being attacked. Is this knowledge necessary? For example, a graph may be used for multiple downstream tasks unknown to a practical attacker. It is thus important to test the vulnerability of GNNs to adversarial perturbations in a model and task-agnostic setting. In this work, we study this problem and show that Gnns remain vulnerable even when the downstream task and model are unknown. The proposed algorithm, TANDIS (Targeted Attack via Neighborhood DIStortion) shows that distortion of node neighborhoods is effective in drastically compromising prediction performance. Although neighborhood distortion is an NP-hard problem, TANDIS designs an effective heuristic through a novel combination of Graph Isomorphism Network with deep Q-learning. Extensive experiments on real datasets show that, on average, TANDIS is up to 50% more effective than state-of-the-art techniques, while being more than 1000 times faster.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharma, Kartik and Verma, Samidha and Medya, Sourav and Bhattacharya, Arnab and Ranu, Sayan}, year={2023}, month={Jun.}, pages={15091-15099} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26761/26533", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26761", + "pdf_size": 9335940, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13443762043436206631&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "gatech.edu;iitd.ac.in;uic.edu;iitk.ac.in;iitd.ac.in", + "email": "gatech.edu;iitd.ac.in;uic.edu;iitk.ac.in;iitd.ac.in", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;1", + "aff_unique_norm": "Georgia Institute of Technology;Indian Institute of Technology Delhi;University of Illinois at Chicago;Indian Institute of Technology Kanpur", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.gatech.edu;https://www.iitdelhi.ac.in;https://www.uic.edu;https://www.iitk.ac.in", + "aff_unique_abbr": "Georgia Tech;IIT Delhi;UIC;IIT Kanpur", + "aff_campus_unique_index": "0;1;2;3;1", + "aff_campus_unique": "Atlanta;Delhi;Chicago;Kanpur", + "aff_country_unique_index": "0;1;0;1;1", + "aff_country_unique": "United States;India" + }, + { + "id": "article-26680", + "title": "Task-Adaptive Meta-Learning Framework for Advancing Spatial Generalizability", + "track": "aaai special track", + "status": "Technical", + "abstract": "Spatio-temporal machine learning is critically needed for a variety of societal applications, such as agricultural monitoring, hydrological forecast, and traffic management. These applications greatly rely on regional features that characterize spatial and temporal differences. However, spatio-temporal data often exhibit complex patterns and significant data variability across different locations. The labels in many real-world applications can also be limited, which makes it difficult to separately train independent models for different locations. Although meta learning has shown promise in model adaptation with small samples, existing meta learning methods remain limited in handling a large number of heterogeneous tasks, e.g., a large number of locations with varying data patterns. To bridge the gap, we propose task-adaptive formulations and a model-agnostic meta-learning framework that transforms regionally heterogeneous data into location-sensitive meta tasks. We conduct task adaptation following an easy-to-hard task hierarchy in which different meta models are adapted to tasks of different difficulty levels. One major advantage of our proposed method is that it improves the model adaptation to a large number of heterogeneous tasks. It also enhances the model generalization by automatically adapting the meta model of the corresponding difficulty level to any new tasks. We demonstrate the superiority of our proposed framework over a diverse set of baselines and state-of-the-art meta-learning frameworks. Our extensive experiments on real crop yield data show the effectiveness of the proposed method in handling spatial-related heterogeneous tasks in real societal applications.", + "primary_area": "ai for social impact", + "author": "Zhexiong Liu; Licheng Liu; Yiqun Xie; Zhenong Jin; Xiaowei Jia", + "authorids": "", + "aff": "Department of Computer Science, University of Pittsburgh, Pennsylvania 15260 USA; The University of Minnesota, Twin City, Minnesota 55108 USA; The University of Maryland, College Park, Maryland 20742 USA; The University of Minnesota, Twin City, Minnesota 55108 USA; Department of Computer Science, University of Pittsburgh, Pennsylvania 15260 USA", + "bibtex": "@article{Liu_Liu_Xie_Jin_Jia_2023, title={Task-Adaptive Meta-Learning Framework for Advancing Spatial Generalizability}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26680}, DOI={10.1609/aaai.v37i12.26680}, abstractNote={Spatio-temporal machine learning is critically needed for a variety of societal applications, such as agricultural monitoring, hydrological forecast, and traffic management. These applications greatly rely on regional features that characterize spatial and temporal differences. However, spatio-temporal data often exhibit complex patterns and significant data variability across different locations. The labels in many real-world applications can also be limited, which makes it difficult to separately train independent models for different locations. Although meta learning has shown promise in model adaptation with small samples, existing meta learning methods remain limited in handling a large number of heterogeneous tasks, e.g., a large number of locations with varying data patterns. To bridge the gap, we propose task-adaptive formulations and a model-agnostic meta-learning framework that transforms regionally heterogeneous data into location-sensitive meta tasks. We conduct task adaptation following an easy-to-hard task hierarchy in which different meta models are adapted to tasks of different difficulty levels. One major advantage of our proposed method is that it improves the model adaptation to a large number of heterogeneous tasks. It also enhances the model generalization by automatically adapting the meta model of the corresponding difficulty level to any new tasks. We demonstrate the superiority of our proposed framework over a diverse set of baselines and state-of-the-art meta-learning frameworks. Our extensive experiments on real crop yield data show the effectiveness of the proposed method in handling spatial-related heterogeneous tasks in real societal applications.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zhexiong and Liu, Licheng and Xie, Yiqun and Jin, Zhenong and Jia, Xiaowei}, year={2023}, month={Jun.}, pages={14365-14373} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26680/26452", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26680", + "pdf_size": 1212515, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9351750235543149158&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "pitt.edu;umn.edu;umd.edu;umn.edu;pitt.edu", + "email": "pitt.edu;umn.edu;umd.edu;umn.edu;pitt.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;0", + "aff_unique_norm": "University of Pittsburgh;University of Minnesota;University of Maryland", + "aff_unique_dep": "Department of Computer Science;;", + "aff_unique_url": "https://www.pitt.edu;https://www.minnesota.edu;https://www/umd.edu", + "aff_unique_abbr": "Pitt;UMN;UMD", + "aff_campus_unique_index": "1;2;1", + "aff_campus_unique": ";Twin City;College Park", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25322", + "title": "Task-Specific Scene Structure Representations", + "track": "main", + "status": "Technical", + "abstract": "Understanding the informative structures of scenes is essential for low-level vision tasks. Unfortunately, it is difficult to obtain a concrete visual definition of the informative structures because influences of visual features are task-specific. In this paper, we propose a single general neural network architecture for extracting task-specific structure guidance for scenes.\nTo do this, we first analyze traditional spectral clustering methods, which computes a set of eigenvectors to model a segmented graph forming small compact structures on image domains. We then unfold the traditional graph-partitioning problem into a learnable network, named Scene Structure Guidance Network (SSGNet), to represent the task-specific informative structures. The SSGNet yields a set of coefficients of eigenvectors that produces explicit feature representations of image structures. In addition, our SSGNet is light-weight (56K parameters), and can be used as a plug-and-play module for off-the-shelf architectures. We optimize the SSGNet without any supervision by proposing two novel training losses that enforce task-specific scene structure generation during training. Our main contribution is to show that such a simple network can achieve state-of-the-art results for several low-level vision applications including joint upsampling and image denoising. We also demonstrate that our SSGNet generalizes well on unseen datasets, compared to existing methods which use structural embedding frameworks. Our source codes are available at https://github.com/jsshin98/SSGNet.", + "primary_area": "computer vision ii", + "author": "Jisu Shin; Seunghyun Shin; Hae-Gon Jeon", + "authorids": "", + "aff": "AI Graduate School, GIST, South Korea; AI Graduate School, GIST, South Korea; AI Graduate School, GIST, South Korea", + "bibtex": "@article{Shin_Shin_Jeon_2023, title={Task-Specific Scene Structure Representations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25322}, DOI={10.1609/aaai.v37i2.25322}, abstractNote={Understanding the informative structures of scenes is essential for low-level vision tasks. Unfortunately, it is difficult to obtain a concrete visual definition of the informative structures because influences of visual features are task-specific. In this paper, we propose a single general neural network architecture for extracting task-specific structure guidance for scenes.\nTo do this, we first analyze traditional spectral clustering methods, which computes a set of eigenvectors to model a segmented graph forming small compact structures on image domains. We then unfold the traditional graph-partitioning problem into a learnable network, named Scene Structure Guidance Network (SSGNet), to represent the task-specific informative structures. The SSGNet yields a set of coefficients of eigenvectors that produces explicit feature representations of image structures. In addition, our SSGNet is light-weight (56K parameters), and can be used as a plug-and-play module for off-the-shelf architectures. We optimize the SSGNet without any supervision by proposing two novel training losses that enforce task-specific scene structure generation during training. Our main contribution is to show that such a simple network can achieve state-of-the-art results for several low-level vision applications including joint upsampling and image denoising. We also demonstrate that our SSGNet generalizes well on unseen datasets, compared to existing methods which use structural embedding frameworks. Our source codes are available at https://github.com/jsshin98/SSGNet.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shin, Jisu and Shin, Seunghyun and Jeon, Hae-Gon}, year={2023}, month={Jun.}, pages={2272-2281} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25322/25094", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25322", + "pdf_size": 1572417, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14376001799973924207&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "gm.gist.ac.kr;gm.gist.ac.kr;gist.ac.kr", + "email": "gm.gist.ac.kr;gm.gist.ac.kr;gist.ac.kr", + "github": "https://github.com/jsshin98/SSGNet", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "GIST", + "aff_unique_dep": "AI Graduate School", + "aff_unique_url": "https://www.gist.ac.kr", + "aff_unique_abbr": "GIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-27086", + "title": "Task2KB: A Public Task-Oriented Knowledge Base", + "track": "demonstrations", + "status": "Technical", + "abstract": "Search engines and conversational assistants are commonly used to help users complete their every day tasks such as booking travel, cooking, etc. While there are some existing datasets that can be used for this purpose, their coverage is limited to very few domains. In this paper, we propose a novel knowledge base, \u2018Task2KB\u2019, which is constructed using data crawled from WikiHow, an online knowledge resource offering instructional articles on a wide range of tasks. Task2KB encapsulates various types of task-related information and attributes, such as requirements, detailed step description, and available methods to complete tasks. Due to its higher coverage compared to existing related knowledge graphs, Task2KB can be highly useful in the development of general purpose task completion assistants.", + "primary_area": "", + "author": "Procheta Sen; Xi Wang; Ruiqing Xu; Emine Yilmaz", + "authorids": "", + "aff": "University of Liverpool; University College London; University of Manchester; University College London", + "bibtex": "@article{Sen_Wang_Xu_Yilmaz_2024, title={Task2KB: A Public Task-Oriented Knowledge Base}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27086}, DOI={10.1609/aaai.v37i13.27086}, abstractNote={Search engines and conversational assistants are commonly used to help users complete their every day tasks such as booking travel, cooking, etc. While there are some existing datasets that can be used for this purpose, their coverage is limited to very few domains. In this paper, we propose a novel knowledge base, \u2018Task2KB\u2019, which is constructed using data crawled from WikiHow, an online knowledge resource offering instructional articles on a wide range of tasks. Task2KB encapsulates various types of task-related information and attributes, such as requirements, detailed step description, and available methods to complete tasks. Due to its higher coverage compared to existing related knowledge graphs, Task2KB can be highly useful in the development of general purpose task completion assistants.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sen, Procheta and Wang, Xi and Xu, Ruiqing and Yilmaz, Emine}, year={2024}, month={Jul.}, pages={16482-16484} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27086/26858", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27086", + "pdf_size": 117387, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5383035959212955536&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "liverpool.ac.uk;ucl.ac.uk;outlook.com;ucl.ac.uk", + "email": "liverpool.ac.uk;ucl.ac.uk;outlook.com;ucl.ac.uk", + "github": "", + "project": "https://www.task2kb.uk", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "University of Liverpool;University College London;University of Manchester", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.liverpool.ac.uk;https://www.ucl.ac.uk;https://www.manchester.ac.uk", + "aff_unique_abbr": "Liv Uni;UCL;UoM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26670", + "title": "Taxonomizing and Measuring Representational Harms: A Look at Image Tagging", + "track": "aaai special track", + "status": "Technical", + "abstract": "In this paper, we examine computational approaches for measuring the \"fairness\" of image tagging systems, finding that they cluster into five distinct categories, each with its own analytic foundation. We also identify a range of normative concerns that are often collapsed under the terms \"unfairness,\" \"bias,\" or even \"discrimination\" when discussing problematic cases of image tagging. Specifically, we identify four types of representational harms that can be caused by image tagging systems, providing concrete examples of each. We then consider how different computational measurement approaches map to each of these types, demonstrating that there is not a one-to-one mapping. Our findings emphasize that no single measurement approach will be definitive and that it is not possible to infer from the use of a particular measurement approach which type of harm was intended to be measured. Lastly, equipped with this more granular understanding of the types of representational harms that can be caused by image tagging systems, we show that attempts to mitigate some of these types of harms may be in tension with one another.", + "primary_area": "ai for social impact", + "author": "Jared Katzman; Angelina Wang; Morgan Scheuerman; Su Lin Blodgett; Kristen Laird; Hanna Wallach; Solon Barocas", + "authorids": "", + "aff": "University of Michigan; Princeton University; University of Colorado, Boulder; Microsoft Research; Microsoft; Microsoft Research; Microsoft Research", + "bibtex": "@article{Katzman_Wang_Scheuerman_Blodgett_Laird_Wallach_Barocas_2023, title={Taxonomizing and Measuring Representational Harms: A Look at Image Tagging}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26670}, DOI={10.1609/aaai.v37i12.26670}, abstractNote={In this paper, we examine computational approaches for measuring the "fairness" of image tagging systems, finding that they cluster into five distinct categories, each with its own analytic foundation. We also identify a range of normative concerns that are often collapsed under the terms "unfairness," "bias," or even "discrimination" when discussing problematic cases of image tagging. Specifically, we identify four types of representational harms that can be caused by image tagging systems, providing concrete examples of each. We then consider how different computational measurement approaches map to each of these types, demonstrating that there is not a one-to-one mapping. Our findings emphasize that no single measurement approach will be definitive and that it is not possible to infer from the use of a particular measurement approach which type of harm was intended to be measured. Lastly, equipped with this more granular understanding of the types of representational harms that can be caused by image tagging systems, we show that attempts to mitigate some of these types of harms may be in tension with one another.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Katzman, Jared and Wang, Angelina and Scheuerman, Morgan and Blodgett, Su Lin and Laird, Kristen and Wallach, Hanna and Barocas, Solon}, year={2023}, month={Jun.}, pages={14277-14285} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26670/26442", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26670", + "pdf_size": 145547, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5569605836901273922&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;3;3;3", + "aff_unique_norm": "University of Michigan;Princeton University;University of Colorado;Microsoft Corporation", + "aff_unique_dep": ";;;Microsoft Research", + "aff_unique_url": "https://www.umich.edu;https://www.princeton.edu;https://www.colorado.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "UM;Princeton;CU;MSR", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Boulder", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25735", + "title": "Teaching to Learn: Sequential Teaching of Learners with Internal States", + "track": "main", + "status": "Technical", + "abstract": "In sequential machine teaching, a teacher\u2019s objective is to provide the optimal sequence of inputs to sequential learners in order to guide them towards the best model. However, this teaching objective considers a restricted class of learners with fixed inductive biases. In this paper, we extend the machine teaching framework to learners that can improve their inductive biases, represented as latent internal states, in order to generalize to new datasets.\nWe introduce a novel framework in which learners\u2019 inductive biases may change with the teaching interaction, which affects the learning performance in future tasks. In order to teach such learners, we propose a multi-objective control approach that takes the future performance of the learner after teaching into account. This framework provides tools for modelling learners with internal states, humans and meta-learning algorithms alike. Furthermore, we distinguish manipulative teaching, which can be done by effectively hiding data and also used for indoctrination, from teaching to learn which aims to help the learner become better at learning from new datasets in the absence of a teacher. Our empirical results demonstrate that our framework is able to reduce the number of required tasks for online meta-learning, and increases independent learning performance of simulated human users in future tasks.", + "primary_area": "humans and ai", + "author": "Mustafa Mert \u00c7elikok; Pierre-Alexandre Murena; Samuel Kaski", + "authorids": "", + "aff": "Aalto University; Aalto University; Aalto University + The University of Manchester", + "bibtex": "@article{\u00c7elikok_Murena_Kaski_2023, title={Teaching to Learn: Sequential Teaching of Learners with Internal States}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25735}, DOI={10.1609/aaai.v37i5.25735}, abstractNote={In sequential machine teaching, a teacher\u2019s objective is to provide the optimal sequence of inputs to sequential learners in order to guide them towards the best model. However, this teaching objective considers a restricted class of learners with fixed inductive biases. In this paper, we extend the machine teaching framework to learners that can improve their inductive biases, represented as latent internal states, in order to generalize to new datasets.\nWe introduce a novel framework in which learners\u2019 inductive biases may change with the teaching interaction, which affects the learning performance in future tasks. In order to teach such learners, we propose a multi-objective control approach that takes the future performance of the learner after teaching into account. This framework provides tools for modelling learners with internal states, humans and meta-learning algorithms alike. Furthermore, we distinguish manipulative teaching, which can be done by effectively hiding data and also used for indoctrination, from teaching to learn which aims to help the learner become better at learning from new datasets in the absence of a teacher. Our empirical results demonstrate that our framework is able to reduce the number of required tasks for online meta-learning, and increases independent learning performance of simulated human users in future tasks.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={\u00c7elikok, Mustafa Mert and Murena, Pierre-Alexandre and Kaski, Samuel}, year={2023}, month={Jun.}, pages={5939-5947} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25735/25507", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25735", + "pdf_size": 276472, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6883569139214168208&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff_domain": "aalto.fi;aalto.fi;aalto.fi", + "email": "aalto.fi;aalto.fi;aalto.fi", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Aalto University;The University of Manchester", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.aalto.fi;https://www.manchester.ac.uk", + "aff_unique_abbr": "Aalto;UoM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "Finland;United Kingdom" + }, + { + "id": "article-25601", + "title": "Temporal Knowledge Graph Reasoning with Historical Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "Temporal knowledge graph, serving as an effective way to store and model dynamic relations, shows promising prospects in event forecasting. However, most temporal knowledge graph reasoning methods are highly dependent on the recurrence or periodicity of events, which brings challenges to inferring future events related to entities that lack historical interaction. In fact, the current moment is often the combined effect of a small part of historical information and those unobserved underlying factors. To this end, we propose a new event forecasting model called Contrastive Event Network (CENET), based on a novel training framework of historical contrastive learning. CENET learns both the historical and non-historical dependency to distinguish the most potential entities that can best match the given query. Simultaneously, it trains representations of queries to investigate whether the current moment depends more on historical or non-historical events by launching contrastive learning. The representations further help train a binary classifier whose output is a boolean mask to indicate related entities in the search space. During the inference process, CENET employs a mask-based strategy to generate the final results. We evaluate our proposed model on five benchmark graphs. The results demonstrate that CENET significantly outperforms all existing methods in most metrics, achieving at least 8.3% relative improvement of Hits@1 over previous state-of-the-art baselines on event-based datasets.", + "primary_area": "data mining and knowledge management", + "author": "Yi Xu; Junjie Ou; Hui Xu; Luoyi Fu", + "authorids": "", + "aff": "Department of Computer Science and Engineering; Department of Computer Science and Engineering; Department of Computer Science and Engineering; Department of Computer Science and Engineering", + "bibtex": "@article{Xu_Ou_Xu_Fu_2023, title={Temporal Knowledge Graph Reasoning with Historical Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25601}, DOI={10.1609/aaai.v37i4.25601}, abstractNote={Temporal knowledge graph, serving as an effective way to store and model dynamic relations, shows promising prospects in event forecasting. However, most temporal knowledge graph reasoning methods are highly dependent on the recurrence or periodicity of events, which brings challenges to inferring future events related to entities that lack historical interaction. In fact, the current moment is often the combined effect of a small part of historical information and those unobserved underlying factors. To this end, we propose a new event forecasting model called Contrastive Event Network (CENET), based on a novel training framework of historical contrastive learning. CENET learns both the historical and non-historical dependency to distinguish the most potential entities that can best match the given query. Simultaneously, it trains representations of queries to investigate whether the current moment depends more on historical or non-historical events by launching contrastive learning. The representations further help train a binary classifier whose output is a boolean mask to indicate related entities in the search space. During the inference process, CENET employs a mask-based strategy to generate the final results. We evaluate our proposed model on five benchmark graphs. The results demonstrate that CENET significantly outperforms all existing methods in most metrics, achieving at least 8.3% relative improvement of Hits@1 over previous state-of-the-art baselines on event-based datasets.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Yi and Ou, Junjie and Xu, Hui and Fu, Luoyi}, year={2023}, month={Jun.}, pages={4765-4773} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25601/25373", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25601", + "pdf_size": 361024, + "gs_citation": 116, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11230165725309336538&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://cse.ucsd.edu", + "aff_unique_abbr": "UCSD CSE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26072", + "title": "Temporal-Frequency Co-training for Time Series Semi-supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "Semi-supervised learning (SSL) has been actively studied due to its ability to alleviate the reliance of deep learning models on labeled data. Although existing SSL methods based on pseudo-labeling strategies have made great progress, they rarely consider time-series data's intrinsic properties (e.g., temporal dependence). Learning representations by mining the inherent properties of time series has recently gained much attention. Nonetheless, how to utilize feature representations to design SSL paradigms for time series has not been explored. To this end, we propose a Time Series SSL framework via Temporal-Frequency Co-training (TS-TFC), leveraging the complementary information from two distinct views for unlabeled data learning. In particular, TS-TFC employs time-domain and frequency-domain views to train two deep neural networks simultaneously, and each view's pseudo-labels generated by label propagation in the representation space are adopted to guide the training of the other view's classifier. To enhance the discriminative of representations between categories, we propose a temporal-frequency supervised contrastive learning module, which integrates the learning difficulty of categories to improve the quality of pseudo-labels. Through co-training the pseudo-labels obtained from temporal-frequency representations, the complementary information in the two distinct views is exploited to enable the model to better learn the distribution of categories. Extensive experiments on 106 UCR datasets show that TS-TFC outperforms state-of-the-art methods, demonstrating the effectiveness and robustness of our proposed model.", + "primary_area": "machine learning ii", + "author": "Zhen Liu; Qianli Ma; Peitian Ma; Linghao Wang", + "authorids": "", + "aff": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China + Key Laboratory of Big Data and Intelligent Robot (South China University of Technology), Ministry of Education; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China", + "bibtex": "@article{Liu_Ma_Ma_Wang_2023, title={Temporal-Frequency Co-training for Time Series Semi-supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26072}, DOI={10.1609/aaai.v37i7.26072}, abstractNote={Semi-supervised learning (SSL) has been actively studied due to its ability to alleviate the reliance of deep learning models on labeled data. Although existing SSL methods based on pseudo-labeling strategies have made great progress, they rarely consider time-series data\u2019s intrinsic properties (e.g., temporal dependence). Learning representations by mining the inherent properties of time series has recently gained much attention. Nonetheless, how to utilize feature representations to design SSL paradigms for time series has not been explored. To this end, we propose a Time Series SSL framework via Temporal-Frequency Co-training (TS-TFC), leveraging the complementary information from two distinct views for unlabeled data learning. In particular, TS-TFC employs time-domain and frequency-domain views to train two deep neural networks simultaneously, and each view\u2019s pseudo-labels generated by label propagation in the representation space are adopted to guide the training of the other view\u2019s classifier. To enhance the discriminative of representations between categories, we propose a temporal-frequency supervised contrastive learning module, which integrates the learning difficulty of categories to improve the quality of pseudo-labels. Through co-training the pseudo-labels obtained from temporal-frequency representations, the complementary information in the two distinct views is exploited to enable the model to better learn the distribution of categories. Extensive experiments on 106 UCR datasets show that TS-TFC outperforms state-of-the-art methods, demonstrating the effectiveness and robustness of our proposed model.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Zhen and Ma, Qianli and Ma, Peitian and Wang, Linghao}, year={2023}, month={Jun.}, pages={8923-8931} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26072/25844", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26072", + "pdf_size": 487252, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2123660070794997741&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mail.scut.edu.cn;scut.edu.cn;163.com;gmail.com", + "email": "mail.scut.edu.cn;scut.edu.cn;163.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+0;0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26067", + "title": "Tensor Compressive Sensing Fused Low-Rankness and Local-Smoothness", + "track": "main", + "status": "Technical", + "abstract": "A plethora of previous studies indicates that making full use of multifarious intrinsic properties of primordial data is a valid pathway to recover original images from their degraded observations. Typically, both low-rankness and local-smoothness broadly exist in real-world tensor data such as hyperspectral images and videos. Modeling based on both properties has received a great deal of attention, whereas most studies concentrate on experimental performance, and theoretical investigations are still lacking. In this paper, we study the tensor compressive sensing problem based on the tensor correlated total variation, which is a new regularizer used to simultaneously capture both properties existing in the same dataset. The new regularizer has the outstanding advantage of not using a trade-off parameter to balance the two properties. The obtained theories provide a robust recovery guarantee, where the error bound shows that our model certainly benefits from both properties in ground-truth data adaptively. Moreover, based on the ADMM update procedure, we design an algorithm with a global convergence guarantee to solve this model. At last, we carry out experiments to apply our model to hyperspectral image and video restoration problems. The experimental results show that our method is prominently better than many other competing ones. Our code and Supplementary Material are available at https://github.com/fsliuxl/cs-tctv.", + "primary_area": "machine learning ii", + "author": "Xinling Liu; Jingyao Hou; Jiangjun Peng; Hailin Wang; Deyu Meng; Jianjun Wang", + "authorids": "", + "aff": "School of Mathematics and Statistics, Southwest University, Chongqing 400715, China+Macao Institute of Systems Engineering, Macau University of Science and Technology, Taipa, Macao; School of Mathematics and Information, China West Normal University, Nanchong 637002, China; School of Mathematics and Statistics and Ministry of Education Key Lab of Intelligent Networks and Network Security, Xi\u2019an Jiaotong University, Xi\u2019an 710049, China; School of Mathematics and Statistics and Ministry of Education Key Lab of Intelligent Networks and Network Security, Xi\u2019an Jiaotong University, Xi\u2019an 710049, China; School of Mathematics and Statistics and Ministry of Education Key Lab of Intelligent Networks and Network Security, Xi\u2019an Jiaotong University, Xi\u2019an 710049, China+Macao Institute of Systems Engineering, Macau University of Science and Technology, Taipa, Macao; School of Mathematics and Statistics, Southwest University, Chongqing 400715, China+Macao Institute of Systems Engineering, Macau University of Science and Technology, Taipa, Macao", + "bibtex": "@article{Liu_Hou_Peng_Wang_Meng_Wang_2023, title={Tensor Compressive Sensing Fused Low-Rankness and Local-Smoothness}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26067}, DOI={10.1609/aaai.v37i7.26067}, abstractNote={A plethora of previous studies indicates that making full use of multifarious intrinsic properties of primordial data is a valid pathway to recover original images from their degraded observations. Typically, both low-rankness and local-smoothness broadly exist in real-world tensor data such as hyperspectral images and videos. Modeling based on both properties has received a great deal of attention, whereas most studies concentrate on experimental performance, and theoretical investigations are still lacking. In this paper, we study the tensor compressive sensing problem based on the tensor correlated total variation, which is a new regularizer used to simultaneously capture both properties existing in the same dataset. The new regularizer has the outstanding advantage of not using a trade-off parameter to balance the two properties. The obtained theories provide a robust recovery guarantee, where the error bound shows that our model certainly benefits from both properties in ground-truth data adaptively. Moreover, based on the ADMM update procedure, we design an algorithm with a global convergence guarantee to solve this model. At last, we carry out experiments to apply our model to hyperspectral image and video restoration problems. The experimental results show that our method is prominently better than many other competing ones. Our code and Supplementary Material are available at https://github.com/fsliuxl/cs-tctv.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xinling and Hou, Jingyao and Peng, Jiangjun and Wang, Hailin and Meng, Deyu and Wang, Jianjun}, year={2023}, month={Jun.}, pages={8879-8887} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26067/25839", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26067", + "pdf_size": 617799, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17346101563066035979&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "163.com;163.com;gmail.com;163.com;mail.xjtu.edu.cn;163.com", + "email": "163.com;163.com;gmail.com;163.com;mail.xjtu.edu.cn;163.com", + "github": "https://github.com/fsliuxl/cs-tctv", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;3;3;3+1;0+1", + "aff_unique_norm": "Southwest University;Macau University of Science and Technology;China West Normal University;Xi'an Jiaotong University", + "aff_unique_dep": "School of Mathematics and Statistics;Macao Institute of Systems Engineering;School of Mathematics and Information;School of Mathematics and Statistics", + "aff_unique_url": ";https://www.must.edu.mo;;http://www.xjtu.edu.cn", + "aff_unique_abbr": ";MUST;;XJTU", + "aff_campus_unique_index": "0+1;2;3;3;3+1;0+1", + "aff_campus_unique": "Chongqing;Taipa;Nanchong;Xi'an", + "aff_country_unique_index": "0+1;0;0;0;0+1;0+1", + "aff_country_unique": "China;Macao" + }, + { + "id": "article-26340", + "title": "Tensorized Incomplete Multi-View Clustering with Intrinsic Graph Completion", + "track": "main", + "status": "Technical", + "abstract": "Most of the existing incomplete multi-view clustering (IMVC) methods focus on attaining a consensus representation from different views but ignore the important information hidden in the missing views and the latent intrinsic structures in each view. To tackle these issues, in this paper, a unified and novel framework, named tensorized incomplete multi-view clustering with intrinsic graph completion (TIMVC_IGC) is proposed. Firstly, owing to the effectiveness of the low-rank representation in revealing the inherent structure of the data, we exploit it to infer the missing instances and construct the complete graph for each view. Afterwards, inspired by the structural consistency, a between-view consistency constraint is imposed to guarantee the similarity of the graphs from different views. More importantly, the TIMVC_IGC simultaneously learns the low-rank structures of the different views and explores the correlations of the different graphs in a latent manifold sub-space using a low-rank tensor constraint, such that the intrinsic graphs of the different views can be obtained. Finally, a consensus representation for each sample is gained with a co-regularization term for final clustering. Experimental results on several real-world databases illustrates that the proposed method can outperform the other state-of-the-art related methods for incomplete multi-view clustering.", + "primary_area": "machine learning iv", + "author": "Shuping Zhao; Jie Wen; Lunke Fei; Bob Zhang", + "authorids": "", + "aff": "PAMI Research Group, Department of Computer and Information Science, University of Macau, Taipa, Macau; Shenzhen Key Laboratory of Visual Object Detection and Recognition, Harbin Institute of Technology, Shenzhen, Shenzhen, 518055, China; School of Computer Science, Guangdong University of Technology, Guangzhou, 510006, China; PAMI Research Group, Department of Computer and Information Science, University of Macau, Taipa, Macau", + "bibtex": "@article{Zhao_Wen_Fei_Zhang_2023, title={Tensorized Incomplete Multi-View Clustering with Intrinsic Graph Completion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26340}, DOI={10.1609/aaai.v37i9.26340}, abstractNote={Most of the existing incomplete multi-view clustering (IMVC) methods focus on attaining a consensus representation from different views but ignore the important information hidden in the missing views and the latent intrinsic structures in each view. To tackle these issues, in this paper, a unified and novel framework, named tensorized incomplete multi-view clustering with intrinsic graph completion (TIMVC_IGC) is proposed. Firstly, owing to the effectiveness of the low-rank representation in revealing the inherent structure of the data, we exploit it to infer the missing instances and construct the complete graph for each view. Afterwards, inspired by the structural consistency, a between-view consistency constraint is imposed to guarantee the similarity of the graphs from different views. More importantly, the TIMVC_IGC simultaneously learns the low-rank structures of the different views and explores the correlations of the different graphs in a latent manifold sub-space using a low-rank tensor constraint, such that the intrinsic graphs of the different views can be obtained. Finally, a consensus representation for each sample is gained with a co-regularization term for final clustering. Experimental results on several real-world databases illustrates that the proposed method can outperform the other state-of-the-art related methods for incomplete multi-view clustering.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Shuping and Wen, Jie and Fei, Lunke and Zhang, Bob}, year={2023}, month={Jun.}, pages={11327-11335} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26340/26112", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26340", + "pdf_size": 687922, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9089106114204689262&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "um.edu.mo;126.com;126.com;um.edu.mo", + "email": "um.edu.mo;126.com;126.com;um.edu.mo", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Macau;Harbin Institute of Technology;Guangdong University of Technology", + "aff_unique_dep": "Department of Computer and Information Science;Shenzhen Key Laboratory of Visual Object Detection and Recognition;School of Computer Science", + "aff_unique_url": "https://www.um.edu.mo;http://www.hit.edu.cn/;", + "aff_unique_abbr": "UMacau;HIT;", + "aff_campus_unique_index": "0;1;2;0", + "aff_campus_unique": "Taipa;Shenzhen;Guangzhou", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "Macau;China" + }, + { + "id": "article-26735", + "title": "Test Time Augmentation Meets Post-hoc Calibration: Uncertainty Quantification under Real-World Conditions", + "track": "aaai special track", + "status": "Technical", + "abstract": "Communicating the predictive uncertainty of deep neural networks transparently and reliably is important in many safety-critical applications such as medicine. However, modern neural networks tend to be poorly calibrated, resulting in wrong predictions made with a high confidence. While existing post-hoc calibration methods like temperature scaling or isotonic regression yield strongly calibrated predictions in artificial experimental settings, their efficiency can significantly reduce in real-world applications, where scarcity of labeled data or domain drifts are commonly present. In this paper, we first investigate the impact of these characteristics on post-hoc calibration and introduce an easy-to-implement extension of common post-hoc calibration methods based on test time augmentation. In extensive experiments, we demonstrate that our approach results in substantially better calibration on various architectures. We demonstrate the robustness of our proposed approach on a real-world application for skin cancer classification and show that it facilitates safe decision-making under real-world uncertainties.", + "primary_area": "safe and robust ai", + "author": "Achim Hekler; Titus J. Brinker; Florian Buettner", + "authorids": "", + "aff": "German Cancer Research Center (DKFZ) Heidelberg, Germany + Goethe University Frankfurt, Germany; German Cancer Research Center (DKFZ) Heidelberg, Germany; German Cancer Research Center (DKFZ) Heidelberg, Germany + German Cancer Consortium (DKTK), Germany + Goethe University Frankfurt, Germany", + "bibtex": "@article{Hekler_Brinker_Buettner_2023, title={Test Time Augmentation Meets Post-hoc Calibration: Uncertainty Quantification under Real-World Conditions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26735}, DOI={10.1609/aaai.v37i12.26735}, abstractNote={Communicating the predictive uncertainty of deep neural networks transparently and reliably is important in many safety-critical applications such as medicine. However, modern neural networks tend to be poorly calibrated, resulting in wrong predictions made with a high confidence. While existing post-hoc calibration methods like temperature scaling or isotonic regression yield strongly calibrated predictions in artificial experimental settings, their efficiency can significantly reduce in real-world applications, where scarcity of labeled data or domain drifts are commonly present. In this paper, we first investigate the impact of these characteristics on post-hoc calibration and introduce an easy-to-implement extension of common post-hoc calibration methods based on test time augmentation. In extensive experiments, we demonstrate that our approach results in substantially better calibration on various architectures. We demonstrate the robustness of our proposed approach on a real-world application for skin cancer classification and show that it facilitates safe decision-making under real-world uncertainties.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hekler, Achim and Brinker, Titus J. and Buettner, Florian}, year={2023}, month={Jun.}, pages={14856-14864} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26735/26507", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26735", + "pdf_size": 3342129, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3105296729099262595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "dkfz.de;dkfz.de;dkfz.de", + "email": "dkfz.de;dkfz.de;dkfz.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0+2+1", + "aff_unique_norm": "German Cancer Research Center;Goethe University Frankfurt;German Cancer Consortium", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.dkfz.de;https://www.uni-frankfurt.de;", + "aff_unique_abbr": "DKFZ;GU Frankfurt;DKTK", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Heidelberg;", + "aff_country_unique_index": "0+0;0;0+0+0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26726", + "title": "Testing the Channels of Convolutional Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Neural networks have complex structures, and thus it is hard to understand their inner workings and ensure correctness. To understand and debug convolutional neural networks (CNNs) we propose techniques for testing the channels of CNNs. We design FtGAN, an extension to GAN, that can generate test data with varying the intensity (i.e., sum of the neurons) of a channel of a target CNN. We also proposed a channel selection algorithm to find representative channels for testing. To efficiently inspect the target CNN\u2019s inference computations, we define unexpectedness score, which estimates how similar the inference computation of the test data is to that of the training data. We evaluated FtGAN with five public datasets and showed that our techniques successfully identify defective channels in five different CNN models.", + "primary_area": "safe and robust ai", + "author": "Kang Choi; Donghyun Son; Younghoon Kim; Jiwon Seo", + "authorids": "", + "aff": "Hanyang University; Hanyang University; Hanyang University; Hanyang University", + "bibtex": "@article{Choi_Son_Kim_Seo_2023, title={Testing the Channels of Convolutional Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26726}, DOI={10.1609/aaai.v37i12.26726}, abstractNote={Neural networks have complex structures, and thus it is hard to understand their inner workings and ensure correctness. To understand and debug convolutional neural networks (CNNs) we propose techniques for testing the channels of CNNs. We design FtGAN, an extension to GAN, that can generate test data with varying the intensity (i.e., sum of the neurons) of a channel of a target CNN. We also proposed a channel selection algorithm to find representative channels for testing. To efficiently inspect the target CNN\u2019s inference computations, we define unexpectedness score, which estimates how similar the inference computation of the test data is to that of the training data. We evaluated FtGAN with five public datasets and showed that our techniques successfully identify defective channels in five different CNN models.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Choi, Kang and Son, Donghyun and Kim, Younghoon and Seo, Jiwon}, year={2023}, month={Jun.}, pages={14774-14782} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26726/26498", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26726", + "pdf_size": 3962062, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11073484192280403063&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "hanyang.ac.kr;hanyang.ac.kr;hanyang.ac.kr;hanyang.ac.kr", + "email": "hanyang.ac.kr;hanyang.ac.kr;hanyang.ac.kr;hanyang.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Hanyang University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.hanyang.ac.kr", + "aff_unique_abbr": "HYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25347", + "title": "Text to Point Cloud Localization with Relation-Enhanced Transformer", + "track": "main", + "status": "Technical", + "abstract": "Automatically localizing a position based on a few natural language instructions is essential for future robots to communicate and collaborate with humans. To approach this goal, we focus on a text-to-point-cloud cross-modal localization\nproblem. Given a textual query, it aims to identify the described location from city-scale point clouds. The task involves two challenges. 1) In city-scale point clouds, similar ambient instances may exist in several locations. Searching each location in a huge point cloud with only instances as guidance may lead to less discriminative signals and incorrect results. 2) In textual descriptions, the hints are provided separately. In this case, the relations among those hints are not\nexplicitly described, leaving the difficulties of learning relations to the agent itself. To alleviate the two challenges, we propose a unified Relation-Enhanced Transformer (RET) to improve representation discriminability for both point cloud\nand nature language queries. The core of the proposed RET is a novel Relation-enhanced Self-Attention (RSA) mechanism, which explicitly encodes instance (hint)-wise relations for the two modalities. Moreover, we propose a fine-grained cross-modal matching method to further refine the location predictions in a subsequent instance-hint matching stage. Experimental results on the KITTI360Pose dataset demonstrate that our approach surpasses the previous state-of-the-art method by large margins.", + "primary_area": "computer vision ii", + "author": "Guangzhi Wang; Hehe Fan; Mohan Kankanhalli", + "authorids": "", + "aff": "Institute of Data Science, National University of Singapore; School of Computing, National University of Singapore; School of Computing, National University of Singapore", + "bibtex": "@article{Wang_Fan_Kankanhalli_2023, title={Text to Point Cloud Localization with Relation-Enhanced Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25347}, DOI={10.1609/aaai.v37i2.25347}, abstractNote={Automatically localizing a position based on a few natural language instructions is essential for future robots to communicate and collaborate with humans. To approach this goal, we focus on a text-to-point-cloud cross-modal localization\nproblem. Given a textual query, it aims to identify the described location from city-scale point clouds. The task involves two challenges. 1) In city-scale point clouds, similar ambient instances may exist in several locations. Searching each location in a huge point cloud with only instances as guidance may lead to less discriminative signals and incorrect results. 2) In textual descriptions, the hints are provided separately. In this case, the relations among those hints are not\nexplicitly described, leaving the difficulties of learning relations to the agent itself. To alleviate the two challenges, we propose a unified Relation-Enhanced Transformer (RET) to improve representation discriminability for both point cloud\nand nature language queries. The core of the proposed RET is a novel Relation-enhanced Self-Attention (RSA) mechanism, which explicitly encodes instance (hint)-wise relations for the two modalities. Moreover, we propose a fine-grained cross-modal matching method to further refine the location predictions in a subsequent instance-hint matching stage. Experimental results on the KITTI360Pose dataset demonstrate that our approach surpasses the previous state-of-the-art method by large margins.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Guangzhi and Fan, Hehe and Kankanhalli, Mohan}, year={2023}, month={Jun.}, pages={2501-2509} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25347/25119", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25347", + "pdf_size": 3559286, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13197906292885031267&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "u.nus.edu;nus.edu.sg;comp.nus.edu.sg", + "email": "u.nus.edu;nus.edu.sg;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "Institute of Data Science", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25328", + "title": "Text-DIAE: A Self-Supervised Degradation Invariant Autoencoder for Text Recognition and Document Enhancement", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we propose a Text-Degradation Invariant Auto Encoder (Text-DIAE), a self-supervised model designed to tackle two tasks, text recognition (handwritten or scene-text) and document image enhancement. We start by employing a transformer-based architecture that incorporates three pretext tasks as learning objectives to be optimized during pre-training without the usage of labelled data. Each of the pretext objectives is specifically tailored for the final downstream tasks. We conduct several ablation experiments that confirm the design choice of the selected pretext tasks. Importantly, the proposed model does not exhibit limitations of previous state-of-the-art methods based on contrastive losses, while at the same time requiring substantially fewer data samples to converge. Finally, we demonstrate that our method surpasses the state-of-the-art in existing supervised and self-supervised settings in handwritten and scene text recognition and document image enhancement. Our code and trained models will be made publicly available at https://github.com/dali92002/SSL-OCR", + "primary_area": "computer vision ii", + "author": "Mohamed Ali Souibgui; Sanket Biswas; Andres Mafla; Ali Furkan Biten; Alicia Forn\u00e9s; Yousri Kessentini; Josep Llad\u00f3s; Lluis Gomez; Dimosthenis Karatzas", + "authorids": "", + "aff": "Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Digital Research Center of Sfax, SM@RTS Laboratory, Sfax, Tunisia; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain; Computer Vision Center, Universitat Aut\u00f2noma de Barcelona, Barcelona, Spain", + "bibtex": "@article{Souibgui_Biswas_Mafla_Biten_Forn\u00e9s_Kessentini_Llad\u00f3s_Gomez_Karatzas_2023, title={Text-DIAE: A Self-Supervised Degradation Invariant Autoencoder for Text Recognition and Document Enhancement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25328}, DOI={10.1609/aaai.v37i2.25328}, abstractNote={In this paper, we propose a Text-Degradation Invariant Auto Encoder (Text-DIAE), a self-supervised model designed to tackle two tasks, text recognition (handwritten or scene-text) and document image enhancement. We start by employing a transformer-based architecture that incorporates three pretext tasks as learning objectives to be optimized during pre-training without the usage of labelled data. Each of the pretext objectives is specifically tailored for the final downstream tasks. We conduct several ablation experiments that confirm the design choice of the selected pretext tasks. Importantly, the proposed model does not exhibit limitations of previous state-of-the-art methods based on contrastive losses, while at the same time requiring substantially fewer data samples to converge. Finally, we demonstrate that our method surpasses the state-of-the-art in existing supervised and self-supervised settings in handwritten and scene text recognition and document image enhancement. Our code and trained models will be made publicly available at https://github.com/dali92002/SSL-OCR}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Souibgui, Mohamed Ali and Biswas, Sanket and Mafla, Andres and Biten, Ali Furkan and Forn\u00e9s, Alicia and Kessentini, Yousri and Llad\u00f3s, Josep and Gomez, Lluis and Karatzas, Dimosthenis}, year={2023}, month={Jun.}, pages={2330-2338} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25328/25100", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25328", + "pdf_size": 743176, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3044324191791072464&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "https://github.com/dali92002/SSL-OCR", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;0;0;0", + "aff_unique_norm": "Universitat Aut\u00f2noma de Barcelona;Digital Research Center of Sfax", + "aff_unique_dep": "Computer Vision Center;SM@RTS Laboratory", + "aff_unique_url": "https://www.uab.cat;", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "0;0;0;0;0;1;0;0;0", + "aff_campus_unique": "Barcelona;Sfax", + "aff_country_unique_index": "0;0;0;0;0;1;0;0;0", + "aff_country_unique": "Spain;Tunisia" + }, + { + "id": "article-27062", + "title": "TgrApp: Anomaly Detection and Visualization of Large-Scale Call Graphs", + "track": "demonstrations", + "status": "Technical", + "abstract": "Given a million-scale dataset of who-calls-whom data containing imperfect labels, how can we detect existing and new fraud patterns? We propose TgrApp, which extracts carefully designed features and provides visualizations to assist analysts in spotting fraudsters and suspicious behavior. Our TgrApp method has the following properties: (a) Scalable, as it is linear on the input size; and (b) Effective, as it allows natural interaction with human analysts, and is applicable in both supervised and unsupervised settings.", + "primary_area": "", + "author": "Mirela T. Cazzolato; Saranya Vijayakumar; Xinyi Zheng; Namyong Park; Meng-Chieh Lee; Duen Horng Chau; Pedro Fidalgo; Bruno Lages; Agma J. M. Traina; Christos Faloutsos", + "authorids": "", + "aff": "Carnegie Mellon University (CMU) + University of S \u02dcao Paulo (ICMC-USP); Carnegie Mellon University (CMU); Carnegie Mellon University (CMU); Carnegie Mellon University (CMU); Carnegie Mellon University (CMU); Georgia Institute of Technology; Mobileum + University Institute of Lisbon (ISCTE-IUL); Mobileum; University of S \u02dcao Paulo (ICMC-USP); Carnegie Mellon University (CMU)", + "bibtex": "@article{Cazzolato_Vijayakumar_Zheng_Park_Lee_Chau_Fidalgo_Lages_Traina_Faloutsos_2024, title={TgrApp: Anomaly Detection and Visualization of Large-Scale Call Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27062}, DOI={10.1609/aaai.v37i13.27062}, abstractNote={Given a million-scale dataset of who-calls-whom data containing imperfect labels, how can we detect existing and new fraud patterns? We propose TgrApp, which extracts carefully designed features and provides visualizations to assist analysts in spotting fraudsters and suspicious behavior. Our TgrApp method has the following properties: (a) Scalable, as it is linear on the input size; and (b) Effective, as it allows natural interaction with human analysts, and is applicable in both supervised and unsupervised settings.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cazzolato, Mirela T. and Vijayakumar, Saranya and Zheng, Xinyi and Park, Namyong and Lee, Meng-Chieh and Chau, Duen Horng and Fidalgo, Pedro and Lages, Bruno and Traina, Agma J. M. and Faloutsos, Christos}, year={2024}, month={Jul.}, pages={16410-16412} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27062/26834", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27062", + "pdf_size": 629492, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:3rMWbzoTlKMJ:scholar.google.com/&scioq=TgrApp:+Anomaly+Detection+and+Visualization+of+Large-Scale+Call+Graphs&hl=en&as_sdt=0,33", + "gs_version_total": 6, + "aff_domain": "andrew.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;gatech.edu;iscte-iul.pt;mobileum.com;icmc.usp.br;cs.cmu.edu", + "email": "andrew.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;gatech.edu;iscte-iul.pt;mobileum.com;icmc.usp.br;cs.cmu.edu", + "github": "https://github.com/mtcazzolato/tgrapp", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1;0;0;0;0;2;3+4;3;1;0", + "aff_unique_norm": "Carnegie Mellon University;University of S Paulo;Georgia Institute of Technology;Mobileum;University Institute of Lisbon", + "aff_unique_dep": ";ICMC;;;", + "aff_unique_url": "https://www.cmu.edu;https://www.icmc.usp.br;https://www.gatech.edu;;https://www.iscte-iul.pt", + "aff_unique_abbr": "CMU;ICMC-USP;Georgia Tech;;ISCTE-IUL", + "aff_campus_unique_index": "1;;1", + "aff_campus_unique": ";Sao Paulo", + "aff_country_unique_index": "0+1;0;0;0;0;0;3;1;0", + "aff_country_unique": "United States;Brazil;;Portugal" + }, + { + "id": "article-26829", + "title": "The Analysis of Deep Neural Networks by Information Theory: From Explainability to Generalization", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Despite their great success in many artificial intelligence tasks, deep neural networks (DNNs) still suffer from a few limitations, such as poor generalization behavior for out-of-distribution (OOD) data and the \"black-box\" nature. Information theory offers fresh insights to solve these challenges. In this short paper, we briefly review the recent developments in this area, and highlight our contributions.", + "primary_area": "", + "author": "Shujian Yu", + "authorids": "", + "aff": "Department of Computer Science, Vrije Universiteit Amsterdam + Department of Physics and Technology, UiT - The Arctic University of Norway", + "bibtex": "@article{Yu_2024, title={The Analysis of Deep Neural Networks by Information Theory: From Explainability to Generalization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26829}, DOI={10.1609/aaai.v37i13.26829}, abstractNote={Despite their great success in many artificial intelligence tasks, deep neural networks (DNNs) still suffer from a few limitations, such as poor generalization behavior for out-of-distribution (OOD) data and the "black-box" nature. Information theory offers fresh insights to solve these challenges. In this short paper, we briefly review the recent developments in this area, and highlight our contributions.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Shujian}, year={2024}, month={Jul.}, pages={15462-15462} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26829/26601", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26829", + "pdf_size": 53174, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3978925755519720825&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com", + "email": "gmail.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "Vrije Universiteit Amsterdam;UiT - The Arctic University of Norway", + "aff_unique_dep": "Department of Computer Science;Department of Physics and Technology", + "aff_unique_url": "https://www.vu.nl;https://www.uit.no", + "aff_unique_abbr": "VU Amsterdam;UiT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1", + "aff_country_unique": "Netherlands;Norway" + }, + { + "id": "article-26801", + "title": "The Automatic Computer Scientist", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Algorithms are ubiquitous: they track our sleep, help us find cheap flights, and even help us see black holes. However, designing novel algorithms is extremely difficult, and we do not have efficient algorithms for many fundamental problems. The goal of my research is to accelerate algorithm discovery by building an automatic computer scientist. To work towards this goal, my research focuses on inductive logic programming, a form of machine learning in which my collaborators and I have demonstrated major advances in automated algorithm discovery over the past five years. In this talk and paper, I survey these advances.", + "primary_area": "", + "author": "Andrew Cropper", + "authorids": "", + "aff": "University of Oxford", + "bibtex": "@article{Cropper_2024, title={The Automatic Computer Scientist}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26801}, DOI={10.1609/aaai.v37i13.26801}, abstractNote={Algorithms are ubiquitous: they track our sleep, help us find cheap flights, and even help us see black holes. However, designing novel algorithms is extremely difficult, and we do not have efficient algorithms for many fundamental problems. The goal of my research is to accelerate algorithm discovery by building an automatic computer scientist. To work towards this goal, my research focuses on inductive logic programming, a form of machine learning in which my collaborators and I have demonstrated major advances in automated algorithm discovery over the past five years. In this talk and paper, I survey these advances.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cropper, Andrew}, year={2024}, month={Jul.}, pages={15434-15434} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26801/26573", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26801", + "pdf_size": 45986, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3368458158218300506&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "cs.ox.ac.uk", + "email": "cs.ox.ac.uk", + "github": "https://github.com/metagol/metagol", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Oxford", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ox.ac.uk", + "aff_unique_abbr": "Oxford", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25252", + "title": "The Devil Is in the Frequency: Geminated Gestalt Autoencoder for Self-Supervised Visual Pre-training", + "track": "main", + "status": "Technical", + "abstract": "The self-supervised Masked Image Modeling (MIM) schema, following \"mask-and-reconstruct\" pipeline of recovering contents from masked image, has recently captured the increasing interest in the community, owing to the excellent ability of learning visual representation from unlabeled data. Aiming at learning representations with high semantics abstracted, a group of works attempts to reconstruct non-semantic pixels with large-ratio masking strategy, which may suffer from \"over-smoothing\" problem, while others directly infuse semantics into targets in off-line way requiring extra data. Different from them, we shift the perspective to the Fourier domain which naturally has global perspective and present a new Masked Image Modeling (MIM), termed Geminated Gestalt Autoencoder (Ge^2-AE) for visual pre-training. Specifically, we equip our model with geminated decoders in charge of reconstructing image contents from both pixel and frequency space, where each other serves as not only the complementation but also the reciprocal constraints. Through this way, more robust representations can be learned in the pre-trained encoders, of which the effectiveness is confirmed by the juxtaposing experimental results on downstream recognition tasks. We also conduct several quantitative and qualitative experiments to investigate the learning behavior of our method. To our best knowledge, this is the first MIM work to solve the visual pre-training through the lens of frequency domain.", + "primary_area": "computer vision ii", + "author": "Hao Liu; Xinghua Jiang; Xin Li; Antai Guo; Yiqing Hu; Deqiang Jiang; Bo Ren", + "authorids": "", + "aff": "Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab; Tencent YouTu Lab", + "bibtex": "@article{Liu_Jiang_Li_Guo_Hu_Jiang_Ren_2023, title={The Devil Is in the Frequency: Geminated Gestalt Autoencoder for Self-Supervised Visual Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25252}, DOI={10.1609/aaai.v37i2.25252}, abstractNote={The self-supervised Masked Image Modeling (MIM) schema, following "mask-and-reconstruct" pipeline of recovering contents from masked image, has recently captured the increasing interest in the community, owing to the excellent ability of learning visual representation from unlabeled data. Aiming at learning representations with high semantics abstracted, a group of works attempts to reconstruct non-semantic pixels with large-ratio masking strategy, which may suffer from "over-smoothing" problem, while others directly infuse semantics into targets in off-line way requiring extra data. Different from them, we shift the perspective to the Fourier domain which naturally has global perspective and present a new Masked Image Modeling (MIM), termed Geminated Gestalt Autoencoder (Ge^2-AE) for visual pre-training. Specifically, we equip our model with geminated decoders in charge of reconstructing image contents from both pixel and frequency space, where each other serves as not only the complementation but also the reciprocal constraints. Through this way, more robust representations can be learned in the pre-trained encoders, of which the effectiveness is confirmed by the juxtaposing experimental results on downstream recognition tasks. We also conduct several quantitative and qualitative experiments to investigate the learning behavior of our method. To our best knowledge, this is the first MIM work to solve the visual pre-training through the lens of frequency domain.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Hao and Jiang, Xinghua and Li, Xin and Guo, Antai and Hu, Yiqing and Jiang, Deqiang and Ren, Bo}, year={2023}, month={Jun.}, pages={1649-1656} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25252/25024", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25252", + "pdf_size": 2535505, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13108922105797150510&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "YouTu Lab", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26012", + "title": "The Effect of Diversity in Meta-Learning", + "track": "main", + "status": "Technical", + "abstract": "Recent studies show that task distribution plays a vital role in the meta-learner's performance. Conventional wisdom is that task diversity should improve the performance of meta-learning. In this work, we find evidence to the contrary; (i) our experiments draw into question the efficacy of our learned models: similar manifolds can be learned with a subset of the data (lower task diversity). This finding questions the advantage of providing more data to the model, and (ii) adding diversity to the task distribution (higher task diversity) sometimes hinders the model and does not lead to a significant improvement in performance as previously believed. To strengthen our findings, we provide both empirical and theoretical evidence.", + "primary_area": "machine learning ii", + "author": "Ramnath Kumar; Tristan Deleu; Yoshua Bengio", + "authorids": "", + "aff": "Google Research, India; Mila, Qu\u00b4ebec Artificial Intelligence Institute, Universit\u00b4e de Montr\u00b4eal; CIFAR, IV ADO", + "bibtex": "@article{Kumar_Deleu_Bengio_2023, title={The Effect of Diversity in Meta-Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26012}, DOI={10.1609/aaai.v37i7.26012}, abstractNote={Recent studies show that task distribution plays a vital role in the meta-learner\u2019s performance. Conventional wisdom is that task diversity should improve the performance of meta-learning. In this work, we find evidence to the contrary; (i) our experiments draw into question the efficacy of our learned models: similar manifolds can be learned with a subset of the data (lower task diversity). This finding questions the advantage of providing more data to the model, and (ii) adding diversity to the task distribution (higher task diversity) sometimes hinders the model and does not lead to a significant improvement in performance as previously believed. To strengthen our findings, we provide both empirical and theoretical evidence.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Ramnath and Deleu, Tristan and Bengio, Yoshua}, year={2023}, month={Jun.}, pages={8396-8404} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26012/25784", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26012", + "pdf_size": 313893, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12213071059734001817&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "google.com; ; ", + "email": "google.com; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Google;Universit\u00b4e de Montr\u00b4eal;CIFAR", + "aff_unique_dep": "Google Research;Qu\u00b4ebec Artificial Intelligence Institute;", + "aff_unique_url": "https://research.google;https://www.mila.quebec;https://www.cifar.ca", + "aff_unique_abbr": "Google Research;Mila;CIFAR", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "India;Montreal;", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "India;Canada" + }, + { + "id": "article-25740", + "title": "The Effect of Modeling Human Rationality Level on Learning Rewards from Multiple Feedback Types", + "track": "main", + "status": "Technical", + "abstract": "When inferring reward functions from human behavior (be it demonstrations, comparisons, physical corrections, or e-stops), it has proven useful to model the human as making noisy-rational choices, with a \"rationality coefficient\" capturing how much noise or entropy we expect to see in the human behavior. Prior work typically sets the rationality level to a constant value, regardless of the type, or quality, of human feedback. However, in many settings, giving one type of feedback (e.g. a demonstration) may be much more difficult than a different type of feedback (e.g. answering a comparison query). Thus, we expect to see more or less noise depending on the type of human feedback. In this work, we advocate that grounding the rationality coefficient in real data for each feedback type, rather than assuming a default value, has a significant positive effect on reward learning. We test this in both simulated experiments and in a user study with real human feedback. We find that overestimating human rationality can have dire effects on reward learning accuracy and regret. We also find that fitting the rationality coefficient to human data enables better reward learning, even when the human deviates significantly from the noisy-rational choice model due to systematic biases. Further, we find that the rationality level affects the informativeness of each feedback type: surprisingly, demonstrations are not always the most informative---when the human acts very suboptimally, comparisons actually become more informative, even when the rationality level is the same for both. Ultimately, our results emphasize the importance and advantage of paying attention to the assumed human-rationality-level, especially when agents actively learn from multiple types of human feedback.", + "primary_area": "humans and ai", + "author": "Gaurav R. Ghosal; Matthew Zurek; Daniel S. Brown; Anca D. Dragan", + "authorids": "", + "aff": "University of California, Berkeley; University of Wisconsin, Madison; University of Utah; University of California, Berkeley", + "bibtex": "@article{Ghosal_Zurek_Brown_Dragan_2023, title={The Effect of Modeling Human Rationality Level on Learning Rewards from Multiple Feedback Types}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25740}, DOI={10.1609/aaai.v37i5.25740}, abstractNote={When inferring reward functions from human behavior (be it demonstrations, comparisons, physical corrections, or e-stops), it has proven useful to model the human as making noisy-rational choices, with a "rationality coefficient" capturing how much noise or entropy we expect to see in the human behavior. Prior work typically sets the rationality level to a constant value, regardless of the type, or quality, of human feedback. However, in many settings, giving one type of feedback (e.g. a demonstration) may be much more difficult than a different type of feedback (e.g. answering a comparison query). Thus, we expect to see more or less noise depending on the type of human feedback. In this work, we advocate that grounding the rationality coefficient in real data for each feedback type, rather than assuming a default value, has a significant positive effect on reward learning. We test this in both simulated experiments and in a user study with real human feedback. We find that overestimating human rationality can have dire effects on reward learning accuracy and regret. We also find that fitting the rationality coefficient to human data enables better reward learning, even when the human deviates significantly from the noisy-rational choice model due to systematic biases. Further, we find that the rationality level affects the informativeness of each feedback type: surprisingly, demonstrations are not always the most informative---when the human acts very suboptimally, comparisons actually become more informative, even when the rationality level is the same for both. Ultimately, our results emphasize the importance and advantage of paying attention to the assumed human-rationality-level, especially when agents actively learn from multiple types of human feedback.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosal, Gaurav R. and Zurek, Matthew and Brown, Daniel S. and Dragan, Anca D.}, year={2023}, month={Jun.}, pages={5983-5992} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25740/25512", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25740", + "pdf_size": 876794, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12985965196360117859&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "berkeley.edu;cs.wisc.edu;cs.utah.edu;berkeley.edu", + "email": "berkeley.edu;cs.wisc.edu;cs.utah.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of California, Berkeley;University of Wisconsin;University of Utah", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.berkeley.edu;https://www.wisc.edu;https://www.utah.edu", + "aff_unique_abbr": "UC Berkeley;UW;Utah", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Berkeley;Madison;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25770", + "title": "The Effect of Preferences in Abstract Argumentation under a Claim-Centric View", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we study the effect of preferences in abstract argumentation under a claim-centric perspective. Recent work has revealed that semantical and computational properties can change when reasoning is performed on \n claim-level rather than on the argument-level, while under certain \n natural restrictions (arguments with the same claims have the \n same outgoing attacks) these properties are conserved. We now investigate\n these effects when, in addition, preferences have to be taken into account and consider four prominent reductions to handle preferences between arguments.\n As we shall see, these reductions give rise to \n different classes of claim-augmented argumentation frameworks, and behave \n differently in terms of semantic properties and computational complexity.\n This strengthens the view that the actual choice for handling preferences \n has to be taken with care.", + "primary_area": "knowledge representation and reasoning", + "author": "Michael Bernreiter; Wolfgang Dvorak; Anna Rapberger; Stefan Woltran", + "authorids": "", + "aff": "Institute of Logic and Computation, TU Wien, Austria; Institute of Logic and Computation, TU Wien, Austria; Institute of Logic and Computation, TU Wien, Austria; Institute of Logic and Computation, TU Wien, Austria", + "bibtex": "@article{Bernreiter_Dvorak_Rapberger_Woltran_2023, title={The Effect of Preferences in Abstract Argumentation under a Claim-Centric View}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25770}, DOI={10.1609/aaai.v37i5.25770}, abstractNote={In this paper, we study the effect of preferences in abstract argumentation under a claim-centric perspective. Recent work has revealed that semantical and computational properties can change when reasoning is performed on claim-level rather than on the argument-level, while under certain natural restrictions (arguments with the same claims have the same outgoing attacks) these properties are conserved. We now investigate these effects when, in addition, preferences have to be taken into account and consider four prominent reductions to handle preferences between arguments. As we shall see, these reductions give rise to different classes of claim-augmented argumentation frameworks, and behave differently in terms of semantic properties and computational complexity. This strengthens the view that the actual choice for handling preferences has to be taken with care.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bernreiter, Michael and Dvorak, Wolfgang and Rapberger, Anna and Woltran, Stefan}, year={2023}, month={Jun.}, pages={6253-6261} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25770/25542", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25770", + "pdf_size": 144060, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10062432139082463959&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "dbai.tuwien.ac.at;dbai.tuwien.ac.at;dbai.tuwien.ac.at;dbai.tuwien.ac.at", + "email": "dbai.tuwien.ac.at;dbai.tuwien.ac.at;dbai.tuwien.ac.at;dbai.tuwien.ac.at", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "TU Wien", + "aff_unique_dep": "Institute of Logic and Computation", + "aff_unique_url": "https://www.tuwien.ac.at", + "aff_unique_abbr": "TU Wien", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Austria" + }, + { + "id": "article-25526", + "title": "The Expressive Power of Ad-Hoc Constraints for Modelling CSPs", + "track": "main", + "status": "Technical", + "abstract": "Ad-hoc constraints (also called generic constraints) are important for modelling Constraint Satisfaction Problems (CSPs). Many representations have been proposed to define ad-hoc constraints, such as tables, decision diagrams, binary constraint trees, automata and context-free grammars. However, prior works mainly focus on efficient Generalized Arc Consistency (GAC) propagators of ad-hoc constraints using the representations. In this paper, we ask a more fundamental question which bears on modelling constraints in a CSP as ad-hoc constraints, how the choice of constraints and operations affect tractability. Rather than ad-hoc constraints and their GAC propagators, our focus is on their expressive power in terms of succinctness (polysize) and cost of operations/queries (polytime). We use a large set of constraint families to investigate the expressive power of 14 existing ad-hoc constraints. We show a complete map of the succinctness of the ad-hoc constraints. We also present results on the tractability of applying various operations and queries on the ad-hoc constraints. Finally, we give case studies illustrating how our results can be useful for questions in the modelling of CSPs.", + "primary_area": "constraint satisfaction and optimization", + "author": "Ruiwei Wang; Roland H.C. Yap", + "authorids": "", + "aff": "School of Computing, National University of Singapore, 13 Computing Drive, 117417, Singapore; School of Computer Science, Beijing University of Posts and Telecommunications, Beijing 100876, China", + "bibtex": "@article{Wang_Yap_2023, title={The Expressive Power of Ad-Hoc Constraints for Modelling CSPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25526}, DOI={10.1609/aaai.v37i4.25526}, abstractNote={Ad-hoc constraints (also called generic constraints) are important for modelling Constraint Satisfaction Problems (CSPs). Many representations have been proposed to define ad-hoc constraints, such as tables, decision diagrams, binary constraint trees, automata and context-free grammars. However, prior works mainly focus on efficient Generalized Arc Consistency (GAC) propagators of ad-hoc constraints using the representations. In this paper, we ask a more fundamental question which bears on modelling constraints in a CSP as ad-hoc constraints, how the choice of constraints and operations affect tractability. Rather than ad-hoc constraints and their GAC propagators, our focus is on their expressive power in terms of succinctness (polysize) and cost of operations/queries (polytime). We use a large set of constraint families to investigate the expressive power of 14 existing ad-hoc constraints. We show a complete map of the succinctness of the ad-hoc constraints. We also present results on the tractability of applying various operations and queries on the ad-hoc constraints. Finally, we give case studies illustrating how our results can be useful for questions in the modelling of CSPs.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Ruiwei and Yap, Roland H.C.}, year={2023}, month={Jun.}, pages={4104-4114} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25526/25298", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25526", + "pdf_size": 288413, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15861331001586875071&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "comp.nus.edu.sg;comp.nus.edu.sg", + "email": "comp.nus.edu.sg;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "National University of Singapore;Beijing University of Posts and Telecommunications", + "aff_unique_dep": "School of Computing;School of Computer Science", + "aff_unique_url": "https://www.nus.edu.sg;http://www.bupt.edu.cn/", + "aff_unique_abbr": "NUS;BUPT", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Singapore;Beijing", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Singapore;China" + }, + { + "id": "article-26209", + "title": "The Implicit Regularization of Momentum Gradient Descent in Overparametrized Models", + "track": "main", + "status": "Technical", + "abstract": "The study of the implicit regularization induced by gradient-based optimization in deep learning is a long-standing pursuit. In the present paper, we characterize the implicit regularization of momentum gradient descent (MGD) in the continuous-time view, so-called momentum gradient flow (MGF). We show that the components of weight vector are learned for a deep linear neural networks at different evolution rates, and this evolution gap increases with the depth. Firstly, we show that if the depth equals one, the evolution gap between the weight vector components is linear, which is consistent with the performance of ridge. In particular, we establish a tight coupling between MGF and ridge for the least squares regression. In detail, we show that when the regularization parameter of ridge is inversely proportional to the square of the time parameter of MGF, the risk of MGF is no more than 1.54 times that of ridge, and their relative Bayesian risks are almost indistinguishable. Secondly, if the model becomes deeper, i.e. the depth is greater than or equal to 2, the evolution gap becomes more significant, which implies an implicit bias towards sparse solutions. The numerical experiments strongly support our theoretical results.", + "primary_area": "machine learning iii", + "author": "Li Wang; Zhiguo Fu; Yingcong Zhou; Zili Yan", + "authorids": "", + "aff": "School of Computer Science and Information Technology & KLAS, Northeast Normal University, China; School of Computer Science and Information Technology & KLAS, Northeast Normal University, China; School of Computer Science and Information Technology & KLAS, Northeast Normal University, China; School of Mathematics and Statistics, Beihua University, China", + "bibtex": "@article{Wang_Fu_Zhou_Yan_2023, title={The Implicit Regularization of Momentum Gradient Descent in Overparametrized Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26209}, DOI={10.1609/aaai.v37i8.26209}, abstractNote={The study of the implicit regularization induced by gradient-based optimization in deep learning is a long-standing pursuit. In the present paper, we characterize the implicit regularization of momentum gradient descent (MGD) in the continuous-time view, so-called momentum gradient flow (MGF). We show that the components of weight vector are learned for a deep linear neural networks at different evolution rates, and this evolution gap increases with the depth. Firstly, we show that if the depth equals one, the evolution gap between the weight vector components is linear, which is consistent with the performance of ridge. In particular, we establish a tight coupling between MGF and ridge for the least squares regression. In detail, we show that when the regularization parameter of ridge is inversely proportional to the square of the time parameter of MGF, the risk of MGF is no more than 1.54 times that of ridge, and their relative Bayesian risks are almost indistinguishable. Secondly, if the model becomes deeper, i.e. the depth is greater than or equal to 2, the evolution gap becomes more significant, which implies an implicit bias towards sparse solutions. The numerical experiments strongly support our theoretical results.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Li and Fu, Zhiguo and Zhou, Yingcong and Yan, Zili}, year={2023}, month={Jun.}, pages={10149-10156} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26209/25981", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26209", + "pdf_size": 3478086, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2985501302937640422&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "nenu.edu.cn;nenu.edu.cn;163.com;163.com", + "email": "nenu.edu.cn;nenu.edu.cn;163.com;163.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Northeast Normal University;Beihua University", + "aff_unique_dep": "School of Computer Science and Information Technology;School of Mathematics and Statistics", + "aff_unique_url": "http://www.nenu.edu.cn;", + "aff_unique_abbr": "NENU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26006", + "title": "The Influence of Dimensions on the Complexity of Computing Decision Trees", + "track": "main", + "status": "Technical", + "abstract": "A decision tree recursively splits a feature space \\mathbb{R}^d and then assigns class labels based on the resulting partition. Decision trees have been part of the basic machine-learning toolkit for decades. A large body of work considers heuristic algorithms that compute a decision tree from training data, usually aiming to minimize in particular the size of the resulting tree. In contrast, little is known about the complexity of the underlying computational problem of computing a minimum-size tree for the given training data. We study this problem with respect to the number d of dimensions of the feature space \\mathbb{R}^d, which contains n training examples. We show that it can be solved in O(n^(2d + 1)) time, but under reasonable complexity-theoretic assumptions it is not possible to achieve f(d) * n^o(d / log d) running time. The problem is solvable in (dR)^O(dR) * n^(1+o(1)) time, if there are exactly two classes and R is an upper bound on the number of tree leaves labeled with the first class.", + "primary_area": "machine learning ii", + "author": "Stephen G. Kobourov; Maarten L\u00f6ffler; Fabrizio Montecchiani; Marcin Pilipczuk; Ignaz Rutter; Raimund Seidel; Manuel Sorge; Jules Wulms", + "authorids": "", + "aff": "University of Arizona, Department of Computer Science; Utrecht University, Department of Information and Computing Sciences; University of Perugia, Department of Engineering; University of Warsaw, Faculty of Mathematics, Informatics, and Mechanics; University of Passau, Faculty of Computer Science and Mathematics; Saarland University, Department of Computer Science; TU Wien, Institute of Logic and Computation; TU Wien, Institute of Logic and Computation", + "bibtex": "@article{Kobourov_L\u00f6ffler_Montecchiani_Pilipczuk_Rutter_Seidel_Sorge_Wulms_2023, title={The Influence of Dimensions on the Complexity of Computing Decision Trees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26006}, DOI={10.1609/aaai.v37i7.26006}, abstractNote={A decision tree recursively splits a feature space \\mathbb{R}^d and then assigns class labels based on the resulting partition. Decision trees have been part of the basic machine-learning toolkit for decades. A large body of work considers heuristic algorithms that compute a decision tree from training data, usually aiming to minimize in particular the size of the resulting tree. In contrast, little is known about the complexity of the underlying computational problem of computing a minimum-size tree for the given training data. We study this problem with respect to the number d of dimensions of the feature space \\mathbb{R}^d, which contains n training examples. We show that it can be solved in O(n^(2d + 1)) time, but under reasonable complexity-theoretic assumptions it is not possible to achieve f(d) * n^o(d / log d) running time. The problem is solvable in (dR)^O(dR) * n^(1+o(1)) time, if there are exactly two classes and R is an upper bound on the number of tree leaves labeled with the first class.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kobourov, Stephen G. and L\u00f6ffler, Maarten and Montecchiani, Fabrizio and Pilipczuk, Marcin and Rutter, Ignaz and Seidel, Raimund and Sorge, Manuel and Wulms, Jules}, year={2023}, month={Jun.}, pages={8343-8350} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26006/25778", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26006", + "pdf_size": 196982, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17026513998978155146&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 12, + "aff_domain": "cs.arizona.edu;uu.nl;unipg.it;mimuw.edu.pl;fim.uni-passau.de;cs.uni-saarland.de;ac.tuwien.ac.at;ac.tuwien.ac.at", + "email": "cs.arizona.edu;uu.nl;unipg.it;mimuw.edu.pl;fim.uni-passau.de;cs.uni-saarland.de;ac.tuwien.ac.at;ac.tuwien.ac.at", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;4;5;6;6", + "aff_unique_norm": "University of Arizona;Utrecht University;University of Perugia;University of Warsaw;University of Passau;Saarland University;TU Wien", + "aff_unique_dep": "Department of Computer Science;Department of Information and Computing Sciences;Department of Engineering;Faculty of Mathematics, Informatics, and Mechanics;Faculty of Computer Science and Mathematics;Department of Computer Science;Institute of Logic and Computation", + "aff_unique_url": "https://www.arizona.edu;https://www.uu.nl;https://www.unipg.it;https://www.uw.edu.pl;https://www.uni-passau.de;https://www.uni-saarland.de;https://www.tuwien.ac.at", + "aff_unique_abbr": "UArizona;UU;;UW;UP;UdS;TU Wien", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3;4;4;5;5", + "aff_country_unique": "United States;Netherlands;Italy;Poland;Germany;Austria" + }, + { + "id": "article-26433", + "title": "The Linear Distance Traveling Tournament Problem Allows an EPTAS", + "track": "main", + "status": "Technical", + "abstract": "The Traveling Tournament Problem (TTP-k) is a well-known benchmark problem in tournament timetabling and has been extensively studied in the field of AI. In this problem, we are going to design a double round-robin schedule such that each pair of teams plays one game in each other's home venue, minimizing the total distance traveled by all n teams (n is even) under the constraint that each team can have at most k-consecutive home games or away games. The Linear Distance Traveling Tournament Problem (LDTTP-k), where all teams are located on a line, was introduced by Hoshino and Kawarabayashi (AAAI 2012). For LDTTP-3, they gave a 4/3-approximation algorithm for n\u22614 (mod 6) teams. In this paper, we show that for any 3\u2264k=o(\u221bn), LDTTP-k allows an efficient polynomial-time approximation scheme (EPTAS).", + "primary_area": "planning routing and scheduling", + "author": "Jingyang Zhao; Mingyu Xiao", + "authorids": "", + "aff": "University of Electronic Science and Technology of China; University of Electronic Science and Technology of China", + "bibtex": "@article{Zhao_Xiao_2023, title={The Linear Distance Traveling Tournament Problem Allows an EPTAS}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26433}, DOI={10.1609/aaai.v37i10.26433}, abstractNote={The Traveling Tournament Problem (TTP-k) is a well-known benchmark problem in tournament timetabling and has been extensively studied in the field of AI. In this problem, we are going to design a double round-robin schedule such that each pair of teams plays one game in each other\u2019s home venue, minimizing the total distance traveled by all n teams (n is even) under the constraint that each team can have at most k-consecutive home games or away games. The Linear Distance Traveling Tournament Problem (LDTTP-k), where all teams are located on a line, was introduced by Hoshino and Kawarabayashi (AAAI 2012). For LDTTP-3, they gave a 4/3-approximation algorithm for n\u22614 (mod 6) teams. In this paper, we show that for any 3\u2264k=o(\u221bn), LDTTP-k allows an efficient polynomial-time approximation scheme (EPTAS).}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Jingyang and Xiao, Mingyu}, year={2023}, month={Jun.}, pages={12155-12162} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26433/26205", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26433", + "pdf_size": 165990, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17963419931425394165&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Electronic Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uestc.edu.cn", + "aff_unique_abbr": "UESTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26796", + "title": "The Many Faces of Adversarial Machine Learning", + "track": "senior member presentation bridge papers", + "status": "Technical", + "abstract": "Adversarial machine learning (AML) research is concerned with robustness of machine learning models and algorithms to malicious tampering. Originating at the intersection between machine learning and cybersecurity, AML has come to have broader research appeal, stretching traditional notions of security to include applications of computer vision, natural language processing, and network science. In addition, the problems of strategic classification, algorithmic recourse, and counterfactual explanations have essentially the same core mathematical structure as AML, despite distinct motivations. I give a simplified overview of the central problems in AML, and then discuss both the security-motivated AML domains, and the problems above unrelated to security. These together span a number of important AI subdisciplines, but can all broadly be viewed as concerned with trustworthy AI. My goal is to clarify both the technical connections among these, as well as the substantive differences, suggesting directions for future research.", + "primary_area": "", + "author": "Yevgeniy Vorobeychik", + "authorids": "", + "aff": "Washington University in Saint Louis", + "bibtex": "@article{Vorobeychik_2024, title={The Many Faces of Adversarial Machine Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26796}, DOI={10.1609/aaai.v37i13.26796}, abstractNote={Adversarial machine learning (AML) research is concerned with robustness of machine learning models and algorithms to malicious tampering. Originating at the intersection between machine learning and cybersecurity, AML has come to have broader research appeal, stretching traditional notions of security to include applications of computer vision, natural language processing, and network science. In addition, the problems of strategic classification, algorithmic recourse, and counterfactual explanations have essentially the same core mathematical structure as AML, despite distinct motivations. I give a simplified overview of the central problems in AML, and then discuss both the security-motivated AML domains, and the problems above unrelated to security. These together span a number of important AI subdisciplines, but can all broadly be viewed as concerned with trustworthy AI. My goal is to clarify both the technical connections among these, as well as the substantive differences, suggesting directions for future research.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vorobeychik, Yevgeniy}, year={2024}, month={Jul.}, pages={15402-15409} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26796/26568", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26796", + "pdf_size": 124914, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6653189339482706472&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 3, + "aff_domain": "wustl.edu", + "email": "wustl.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Washington University in St. Louis", + "aff_unique_dep": "", + "aff_unique_url": "https://wustl.edu", + "aff_unique_abbr": "WUSTL", + "aff_campus_unique_index": "0", + "aff_campus_unique": "St. Louis", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26362", + "title": "The Multi-Agent Transportation Problem", + "track": "main", + "status": "Technical", + "abstract": "We introduce the multi-agent transportation (MAT) problem, where agents have to transport containers from their starting positions to their designated goal positions. Movement takes place in a common environment where collisions between agents and between containers must be avoided.\nIn contrast to other frameworks such as multi-agent pathfinding (MAPF) or multi-agent pickup and delivery (MAPD), the agents are allowed to separate from the containers at any time, which can reduce the makespan and also allows for plans in scenarios that are unsolvable otherwise.\nWe present a complexity analysis establishing the problem's NP-completeness and show how the problem can be reduced to a sequence of SAT problems when optimizing for makespan.\nA MAT solver is empirically evaluated with regard to varying input characteristics and movement constraints and compared to a MAPD solver that utilizes conflict-based search (CBS).", + "primary_area": "multiagent systems", + "author": "Pascal Bachor; Rolf-David Bergdoll; Bernhard Nebel", + "authorids": "", + "aff": "Albert-Ludwigs-Universit \u00a8at Freiburg; Albert-Ludwigs-Universit \u00a8at Freiburg; Albert-Ludwigs-Universit \u00a8at Freiburg", + "bibtex": "@article{Bachor_Bergdoll_Nebel_2023, title={The Multi-Agent Transportation Problem}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26362}, DOI={10.1609/aaai.v37i10.26362}, abstractNote={We introduce the multi-agent transportation (MAT) problem, where agents have to transport containers from their starting positions to their designated goal positions. Movement takes place in a common environment where collisions between agents and between containers must be avoided.\nIn contrast to other frameworks such as multi-agent pathfinding (MAPF) or multi-agent pickup and delivery (MAPD), the agents are allowed to separate from the containers at any time, which can reduce the makespan and also allows for plans in scenarios that are unsolvable otherwise.\nWe present a complexity analysis establishing the problem\u2019s NP-completeness and show how the problem can be reduced to a sequence of SAT problems when optimizing for makespan.\nA MAT solver is empirically evaluated with regard to varying input characteristics and movement constraints and compared to a MAPD solver that utilizes conflict-based search (CBS).}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bachor, Pascal and Bergdoll, Rolf-David and Nebel, Bernhard}, year={2023}, month={Jun.}, pages={11525-11532} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26362/26134", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26362", + "pdf_size": 688733, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7236221872507753933&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "cs.uni-freiburg.de;cs.uni-freiburg.de;cs.uni-freiburg.de", + "email": "cs.uni-freiburg.de;cs.uni-freiburg.de;cs.uni-freiburg.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Albert-Ludwigs-Universit\u00e4t Freiburg", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uni-freiburg.de", + "aff_unique_abbr": "Albert-Ludwigs-Universit\u00e4t", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-27034", + "title": "The Naughtyformer: A Transformer Understands and Moderates Adult Humor (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Jokes are intentionally written to be funny, but not all jokes are created the same. While recent work has shown impressive results on humor detection in text, we instead investigate the more nuanced task of detecting humor subtypes, especially of the more adult variety. To that end, we introduce a novel jokes dataset filtered from Reddit and solve the subtype\nclassification task using a finetuned Transformer dubbed the Naughtyformer. Moreover, we show that our model is significantly better at detecting offensiveness in jokes compared to state-of-the-art methods.", + "primary_area": "", + "author": "Leonard Tang; Alexander Cai; Jason Wang", + "authorids": "", + "aff": "Harvard University; Harvard University; Harvard University", + "bibtex": "@article{Tang_Cai_Wang_2024, title={The Naughtyformer: A Transformer Understands and Moderates Adult Humor (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27034}, DOI={10.1609/aaai.v37i13.27034}, abstractNote={Jokes are intentionally written to be funny, but not all jokes are created the same. While recent work has shown impressive results on humor detection in text, we instead investigate the more nuanced task of detecting humor subtypes, especially of the more adult variety. To that end, we introduce a novel jokes dataset filtered from Reddit and solve the subtype\nclassification task using a finetuned Transformer dubbed the Naughtyformer. Moreover, we show that our model is significantly better at detecting offensiveness in jokes compared to state-of-the-art methods.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tang, Leonard and Cai, Alexander and Wang, Jason}, year={2024}, month={Jul.}, pages={16348-16349} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27034/26806", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27034", + "pdf_size": 59487, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4765861772982058693&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "college.harvard.edu;college.harvard.edu;college.harvard.edu", + "email": "college.harvard.edu;college.harvard.edu;college.harvard.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Harvard University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.harvard.edu", + "aff_unique_abbr": "Harvard", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25771", + "title": "The Parameterized Complexity of Network Microaggregation", + "track": "main", + "status": "Technical", + "abstract": "Microaggregation is a classical statistical disclosure control technique which requires the input data to be partitioned into clusters while adhering to specified size constraints. We provide novel exact algorithms and lower bounds for the task of microaggregating a given network while considering both unrestricted and connected clusterings, and analyze these from the perspective of the parameterized complexity paradigm. Altogether, our results assemble a complete complexity-theoretic picture for the network microaggregation problem with respect to the most natural parameterizations of the problem, including input-specified parameters capturing the size and homogeneity of the clusters as well as the treewidth and vertex cover number of the network.", + "primary_area": "knowledge representation and reasoning", + "author": "V\u00e1clav Bla\u017eej; Robert Ganian; Du\u0161an Knop; Jan Pokorn\u00fd; \u0160imon Schierreich; Kirill Simonov", + "authorids": "", + "aff": "Faculty of Information Technology, Czech Technical University in Prague, Prague, Czechia; Algorithms and Complexity Group, Technische Universit \u00a8at Wien, Vienna, Austria; Faculty of Information Technology, Czech Technical University in Prague, Prague, Czechia; Faculty of Information Technology, Czech Technical University in Prague, Prague, Czechia; Faculty of Information Technology, Czech Technical University in Prague, Prague, Czechia; Hasso Plattner Institute, University of Potsdam, Postdam, Germany", + "bibtex": "@article{Bla\u017eej_Ganian_Knop_Pokorn\u00fd_Schierreich_Simonov_2023, title={The Parameterized Complexity of Network Microaggregation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25771}, DOI={10.1609/aaai.v37i5.25771}, abstractNote={Microaggregation is a classical statistical disclosure control technique which requires the input data to be partitioned into clusters while adhering to specified size constraints. We provide novel exact algorithms and lower bounds for the task of microaggregating a given network while considering both unrestricted and connected clusterings, and analyze these from the perspective of the parameterized complexity paradigm. Altogether, our results assemble a complete complexity-theoretic picture for the network microaggregation problem with respect to the most natural parameterizations of the problem, including input-specified parameters capturing the size and homogeneity of the clusters as well as the treewidth and vertex cover number of the network.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bla\u017eej, V\u00e1clav and Ganian, Robert and Knop, Du\u0161an and Pokorn\u00fd, Jan and Schierreich, \u0160imon and Simonov, Kirill}, year={2023}, month={Jun.}, pages={6262-6270} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25771/25543", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25771", + "pdf_size": 173755, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12567126091212960832&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "fit.cvut.cz;gmail.com;fit.cvut.cz;fit.cvut.cz;fit.cvut.cz;hpi.de", + "email": "fit.cvut.cz;gmail.com;fit.cvut.cz;fit.cvut.cz;fit.cvut.cz;hpi.de", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;2", + "aff_unique_norm": "Czech Technical University in Prague;Technische Universit\u00e4t Wien;Hasso Plattner Institute", + "aff_unique_dep": "Faculty of Information Technology;Algorithms and Complexity Group;", + "aff_unique_url": "https://www.fel.cvut.cz;https://www.tuwien.ac.at;https://www.hpi.de", + "aff_unique_abbr": "CTU;TU Wien;HPI", + "aff_campus_unique_index": "0;1;0;0;0;2", + "aff_campus_unique": "Prague;Vienna;Potsdam", + "aff_country_unique_index": "0;1;0;0;0;2", + "aff_country_unique": "Czechia;Austria;Germany" + }, + { + "id": "article-25733", + "title": "The Perils of Trial-and-Error Reward Design: Misdesign through Overfitting and Invalid Task Specifications", + "track": "main", + "status": "Technical", + "abstract": "In reinforcement learning (RL), a reward function that aligns exactly with a task's true performance metric is often necessarily sparse. For example, a true task metric might encode a reward of 1 upon success and 0 otherwise. The sparsity of these true task metrics can make them hard to learn from, so in practice they are often replaced with alternative dense reward functions. These dense reward functions are typically designed by experts through an ad hoc process of trial and error. In this process, experts manually search for a reward function that improves performance with respect to the task metric while also enabling an RL algorithm to learn faster. This process raises the question of whether the same reward function is optimal for all algorithms, i.e., whether the reward function can be overfit to a particular algorithm. In this paper, we study the consequences of this wide yet unexamined practice of trial-and-error reward design. We first conduct computational experiments that confirm that reward functions can be overfit to learning algorithms and their hyperparameters. We then conduct a controlled observation study which emulates expert practitioners' typical experiences of reward design, in which we similarly find evidence of reward function overfitting. We also find that experts' typical approach to reward design---of adopting a myopic strategy and weighing the relative goodness of each state-action pair---leads to misdesign through invalid task specifications, since RL algorithms use cumulative reward rather than rewards for individual state-action pairs as an optimization target.\n\nCode, data: github.com/serenabooth/reward-design-perils", + "primary_area": "humans and ai", + "author": "Serena Booth; W. Bradley Knox; Julie Shah; Scott Niekum; Peter Stone; Alessandro Allievi", + "authorids": "", + "aff": "Bosch+The University of Texas at Austin+MIT CSAIL; Bosch+The University of Texas at Austin+Google Research; MIT CSAIL; The University of Texas at Austin+The University of Massachusetts Amherst; The University of Texas at Austin+Sony AI; Bosch+The University of Texas at Austin", + "bibtex": "@article{Booth_Knox_Shah_Niekum_Stone_Allievi_2023, title={The Perils of Trial-and-Error Reward Design: Misdesign through Overfitting and Invalid Task Specifications}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25733}, DOI={10.1609/aaai.v37i5.25733}, abstractNote={In reinforcement learning (RL), a reward function that aligns exactly with a task\u2019s true performance metric is often necessarily sparse. For example, a true task metric might encode a reward of 1 upon success and 0 otherwise. The sparsity of these true task metrics can make them hard to learn from, so in practice they are often replaced with alternative dense reward functions. These dense reward functions are typically designed by experts through an ad hoc process of trial and error. In this process, experts manually search for a reward function that improves performance with respect to the task metric while also enabling an RL algorithm to learn faster. This process raises the question of whether the same reward function is optimal for all algorithms, i.e., whether the reward function can be overfit to a particular algorithm. In this paper, we study the consequences of this wide yet unexamined practice of trial-and-error reward design. We first conduct computational experiments that confirm that reward functions can be overfit to learning algorithms and their hyperparameters. We then conduct a controlled observation study which emulates expert practitioners\u2019 typical experiences of reward design, in which we similarly find evidence of reward function overfitting. We also find that experts\u2019 typical approach to reward design---of adopting a myopic strategy and weighing the relative goodness of each state-action pair---leads to misdesign through invalid task specifications, since RL algorithms use cumulative reward rather than rewards for individual state-action pairs as an optimization target. Code, data: github.com/serenabooth/reward-design-perils}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Booth, Serena and Knox, W. Bradley and Shah, Julie and Niekum, Scott and Stone, Peter and Allievi, Alessandro}, year={2023}, month={Jun.}, pages={5920-5929} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25733/25505", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25733", + "pdf_size": 215543, + "gs_citation": 85, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4079011939412229463&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "csail.mit.edu;cs.utexas.edu;csail.mit.edu;cs.umass.edu;cs.utexas.edu;us.bosch.com", + "email": "csail.mit.edu;cs.utexas.edu;csail.mit.edu;cs.umass.edu;cs.utexas.edu;us.bosch.com", + "github": "github.com/serenabooth/reward-design-perils", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0+1+3;2;1+4;1+5;0+1", + "aff_unique_norm": "Robert Bosch GmbH;University of Texas at Austin;Massachusetts Institute of Technology;Google;University of Massachusetts Amherst;Sony", + "aff_unique_dep": ";;Computer Science and Artificial Intelligence Laboratory;Google Research;;Sony AI", + "aff_unique_url": "https://www.bosch.com;https://www.utexas.edu;https://www.csail.mit.edu;https://research.google;https://www.umass.edu;https://www.sony.com", + "aff_unique_abbr": "Bosch;UT Austin;MIT CSAIL;Google Research;UMass Amherst;Sony AI", + "aff_campus_unique_index": "1+2;1+3;2;1+4;1;1", + "aff_campus_unique": ";Austin;Cambridge;Mountain View;Amherst", + "aff_country_unique_index": "0+1+1;0+1+1;1;1+1;1+2;0+1", + "aff_country_unique": "Germany;United States;Japan" + }, + { + "id": "article-25741", + "title": "The Role of Heuristics and Biases during Complex Choices with an AI Teammate", + "track": "main", + "status": "Technical", + "abstract": "Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.", + "primary_area": "humans and ai", + "author": "Nikolos Gurney; John H. Miller; David V. Pynadath", + "authorids": "", + "aff": "Institute for Creative Technologies, University of Southern California; Carnegie Mellon University + Santa Fe Institute; Institute for Creative Technologies, University of Southern California", + "bibtex": "@article{Gurney_Miller_Pynadath_2023, title={The Role of Heuristics and Biases during Complex Choices with an AI Teammate}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25741}, DOI={10.1609/aaai.v37i5.25741}, abstractNote={Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gurney, Nikolos and Miller, John H. and Pynadath, David V.}, year={2023}, month={Jun.}, pages={5993-6001} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25741/25513", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25741", + "pdf_size": 1032916, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12056340413318152061&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "ict.usc.edu;andrew.cmu.edu;ict.usc.edu", + "email": "ict.usc.edu;andrew.cmu.edu;ict.usc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "University of Southern California;Carnegie Mellon University;Santa Fe Institute", + "aff_unique_dep": "Institute for Creative Technologies;;", + "aff_unique_url": "https://ict.usc.edu;https://www.cmu.edu;https://www.santafe.edu", + "aff_unique_abbr": "USC;CMU;SFI", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25864", + "title": "The Sufficiency of Off-Policyness and Soft Clipping: PPO Is Still Insufficient according to an Off-Policy Measure", + "track": "main", + "status": "Technical", + "abstract": "The popular Proximal Policy Optimization (PPO) algorithm approximates the solution in a clipped policy space. Does there exist better policies outside of this space? By using a novel surrogate objective that employs the sigmoid function (which provides an interesting way of exploration), we found that the answer is \"YES\", and the better policies are in fact located very far from the clipped space. We show that PPO is insufficient in \"off-policyness\", according to an off-policy metric called DEON. Our algorithm explores in a much larger policy space than PPO, and it maximizes the Conservative Policy Iteration (CPI) objective better than PPO during training. To the best of our knowledge, all current PPO methods have the clipping operation and optimize in the clipped policy space. Our method is the first of this kind, which advances the understanding of CPI optimization and policy gradient methods. Code is available at https://github.com/raincchio/P3O.", + "primary_area": "machine learning i", + "author": "Xing Chen; Dongcui Diao; Hechang Chen; Hengshuai Yao; Haiyin Piao; Zhixiao Sun; Zhiwei Yang; Randy Goebel; Bei Jiang; Yi Chang", + "authorids": "", + "aff": "School of Artificial Intelligence, Jilin University, Changchun, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China; Department of Mathematical and Statistical Sciences, University of Alberta, Edmonton, Canada; School of Artificial Intelligence, Jilin University, Changchun, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China; Department of Computing Science, University of Alberta, Edmonton, Canada; School of Electronics and Information, Northwestern Polytechnical University, Xian, China; School of Electronics and Information, Northwestern Polytechnical University, Xian, China; School of Artificial Intelligence, Jilin University, Changchun, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China; Department of Computing Science, University of Alberta, Edmonton, Canada+Alberta Machine Intelligence Institute, University of Alberta, Edmonton, Canada; Department of Mathematical and Statistical Sciences, University of Alberta, Edmonton, Canada+Alberta Machine Intelligence Institute, University of Alberta, Edmonton, Canada; School of Artificial Intelligence, Jilin University, Changchun, China+Engineering Research Center of Knowledge-Driven Human-Machine Intelligence, Ministry of Education, China", + "bibtex": "@article{Chen_Diao_Chen_Yao_Piao_Sun_Yang_Goebel_Jiang_Chang_2023, title={The Sufficiency of Off-Policyness and Soft Clipping: PPO Is Still Insufficient according to an Off-Policy Measure}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25864}, DOI={10.1609/aaai.v37i6.25864}, abstractNote={The popular Proximal Policy Optimization (PPO) algorithm approximates the solution in a clipped policy space. Does there exist better policies outside of this space? By using a novel surrogate objective that employs the sigmoid function (which provides an interesting way of exploration), we found that the answer is "YES", and the better policies are in fact located very far from the clipped space. We show that PPO is insufficient in "off-policyness", according to an off-policy metric called DEON. Our algorithm explores in a much larger policy space than PPO, and it maximizes the Conservative Policy Iteration (CPI) objective better than PPO during training. To the best of our knowledge, all current PPO methods have the clipping operation and optimize in the clipped policy space. Our method is the first of this kind, which advances the understanding of CPI optimization and policy gradient methods. Code is available at https://github.com/raincchio/P3O.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Xing and Diao, Dongcui and Chen, Hechang and Yao, Hengshuai and Piao, Haiyin and Sun, Zhixiao and Yang, Zhiwei and Goebel, Randy and Jiang, Bei and Chang, Yi}, year={2023}, month={Jun.}, pages={7078-7086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25864/25636", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25864", + "pdf_size": 788692, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10598361949806309763&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.jlu.edu.cn; ;jlu.edu.cn;ualberta.ca; ; ; ; ; ;jlu.edu.cn", + "email": "mails.jlu.edu.cn; ;jlu.edu.cn;ualberta.ca; ; ; ; ; ;jlu.edu.cn", + "github": "https://github.com/raincchio/P3O", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1;2;0+1;2;3;3;0+1;2+2;2+2;0+1", + "aff_unique_norm": "Jilin University;Engineering Research Center of Knowledge-Driven Human-Machine Intelligence;University of Alberta;Northwestern Polytechnical University", + "aff_unique_dep": "School of Artificial Intelligence;Ministry of Education;Department of Mathematical and Statistical Sciences;School of Electronics and Information", + "aff_unique_url": "http://www.jlu.edu.cn;;https://www.ualberta.ca;http://www.nwpu.edu.cn", + "aff_unique_abbr": "JLU;;UAlberta;NPU", + "aff_campus_unique_index": "0;2;0;2;3;3;0;2+2;2+2;0", + "aff_campus_unique": "Changchun;;Edmonton;Xian", + "aff_country_unique_index": "0+0;1;0+0;1;0;0;0+0;1+1;1+1;0+0", + "aff_country_unique": "China;Canada" + }, + { + "id": "article-26096", + "title": "The Unreasonable Effectiveness of Deep Evidential Regression", + "track": "main", + "status": "Technical", + "abstract": "There is a significant need for principled uncertainty reasoning in machine learning systems as they are increasingly deployed in safety-critical domains.\nA new approach with uncertainty-aware regression-based neural networks (NNs), based on learning evidential distributions for aleatoric and epistemic uncertainties, shows promise over traditional deterministic methods and typical Bayesian NNs, notably with the capabilities to disentangle aleatoric and epistemic uncertainties.\nDespite some empirical success of Deep Evidential Regression (DER), there are important gaps in the mathematical foundation that raise the question of why the proposed technique seemingly works.\nWe detail the theoretical shortcomings and analyze the performance on synthetic and real-world data sets, showing that Deep Evidential Regression is a heuristic rather than an exact uncertainty quantification.\nWe go on to discuss corrections and redefinitions of how aleatoric and epistemic uncertainties should be extracted from NNs.", + "primary_area": "machine learning iii", + "author": "Nis Meinert; Jakob Gawlikowski; Alexander Lavin", + "authorids": "", + "aff": "Pasteur Labs; German Aerospace Center, Institute of Data Science; Pasteur Labs", + "bibtex": "@article{Meinert_Gawlikowski_Lavin_2023, title={The Unreasonable Effectiveness of Deep Evidential Regression}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26096}, DOI={10.1609/aaai.v37i8.26096}, abstractNote={There is a significant need for principled uncertainty reasoning in machine learning systems as they are increasingly deployed in safety-critical domains.\nA new approach with uncertainty-aware regression-based neural networks (NNs), based on learning evidential distributions for aleatoric and epistemic uncertainties, shows promise over traditional deterministic methods and typical Bayesian NNs, notably with the capabilities to disentangle aleatoric and epistemic uncertainties.\nDespite some empirical success of Deep Evidential Regression (DER), there are important gaps in the mathematical foundation that raise the question of why the proposed technique seemingly works.\nWe detail the theoretical shortcomings and analyze the performance on synthetic and real-world data sets, showing that Deep Evidential Regression is a heuristic rather than an exact uncertainty quantification.\nWe go on to discuss corrections and redefinitions of how aleatoric and epistemic uncertainties should be extracted from NNs.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Meinert, Nis and Gawlikowski, Jakob and Lavin, Alexander}, year={2023}, month={Jun.}, pages={9134-9142} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26096/25868", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26096", + "pdf_size": 1379967, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2310007111060267405&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "simulation.science;dlr.de;simulation.science", + "email": "simulation.science;dlr.de;simulation.science", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Pasteur Institute;German Aerospace Center", + "aff_unique_dep": "Pasteur Labs;Institute of Data Science", + "aff_unique_url": "https://www.pasteur.fr;https://www.dlr.de", + "aff_unique_abbr": "Pasteur;DLR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "France;Germany" + }, + { + "id": "article-25734", + "title": "The Value of AI Guidance in Human Examination of Synthetically-Generated Faces", + "track": "main", + "status": "Technical", + "abstract": "Face image synthesis has progressed beyond the point at which humans can effectively distinguish authentic faces from synthetically-generated ones. Recently developed synthetic face image detectors boast ``better-than-human'' discriminative ability, especially those guided by human perceptual intelligence during the model's training process. In this paper, we investigate whether these human-guided synthetic face detectors can assist non-expert human operators in the task of synthetic image detection when compared to models trained without human-guidance. We conducted a large-scale experiment with more than 1,560 subjects classifying whether an image shows an authentic or synthetically-generated face, and annotating regions supporting their decisions. In total, 56,015 annotations across 3,780 unique face images were collected. All subjects first examined samples without any AI support, followed by samples given (a) the AI's decision (``synthetic'' or ``authentic''), (b) class activation maps illustrating where the model deems salient for its decision, \nor (c) both the AI's decision and AI's saliency map. Synthetic faces were generated with six modern Generative Adversarial Networks. Interesting observations from this experiment include: (1) models trained with human-guidance, which are also more accurate in our experiments, offer better support to human examination of face images when compared to models trained traditionally using cross-entropy loss, (2) binary decisions presented to humans results in their better performance than when saliency maps are presented, (3) understanding the AI's accuracy helps humans to increase trust in a given model and thus increase their overall accuracy. This work demonstrates that although humans supported by machines achieve better-than-random accuracy of synthetic face detection, the approaches of supplying humans with AI support and of building trust are key factors determining high effectiveness of the human-AI tandem.", + "primary_area": "humans and ai", + "author": "Aidan Boyd; Patrick Tinsley; Kevin Bowyer; Adam Czajka", + "authorids": "", + "aff": "University of Notre Dame, Notre Dame, Indiana 46556, USA; University of Notre Dame, Notre Dame, Indiana 46556, USA; University of Notre Dame, Notre Dame, Indiana 46556, USA; University of Notre Dame, Notre Dame, Indiana 46556, USA", + "bibtex": "@article{Boyd_Tinsley_Bowyer_Czajka_2023, title={The Value of AI Guidance in Human Examination of Synthetically-Generated Faces}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25734}, DOI={10.1609/aaai.v37i5.25734}, abstractNote={Face image synthesis has progressed beyond the point at which humans can effectively distinguish authentic faces from synthetically-generated ones. Recently developed synthetic face image detectors boast ``better-than-human\u2019\u2019 discriminative ability, especially those guided by human perceptual intelligence during the model\u2019s training process. In this paper, we investigate whether these human-guided synthetic face detectors can assist non-expert human operators in the task of synthetic image detection when compared to models trained without human-guidance. We conducted a large-scale experiment with more than 1,560 subjects classifying whether an image shows an authentic or synthetically-generated face, and annotating regions supporting their decisions. In total, 56,015 annotations across 3,780 unique face images were collected. All subjects first examined samples without any AI support, followed by samples given (a) the AI\u2019s decision (``synthetic\u2019\u2019 or ``authentic\u2019\u2019), (b) class activation maps illustrating where the model deems salient for its decision, or (c) both the AI\u2019s decision and AI\u2019s saliency map. Synthetic faces were generated with six modern Generative Adversarial Networks. Interesting observations from this experiment include: (1) models trained with human-guidance, which are also more accurate in our experiments, offer better support to human examination of face images when compared to models trained traditionally using cross-entropy loss, (2) binary decisions presented to humans results in their better performance than when saliency maps are presented, (3) understanding the AI\u2019s accuracy helps humans to increase trust in a given model and thus increase their overall accuracy. This work demonstrates that although humans supported by machines achieve better-than-random accuracy of synthetic face detection, the approaches of supplying humans with AI support and of building trust are key factors determining high effectiveness of the human-AI tandem.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Boyd, Aidan and Tinsley, Patrick and Bowyer, Kevin and Czajka, Adam}, year={2023}, month={Jun.}, pages={5930-5938} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25734/25506", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25734", + "pdf_size": 4185856, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13870744078870223833&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "nd.edu;nd.edu;nd.edu;nd.edu", + "email": "nd.edu;nd.edu;nd.edu;nd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Notre Dame", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nd.edu", + "aff_unique_abbr": "Notre Dame", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Notre Dame", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26923", + "title": "Theory of Mind: A Familiar Aspect of Humanity to Give Machines", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "My research focuses on machine models of theory of mind, a set of skills that helps humans cooperate with each other. Because these skills present themselves in behavior, inference-based measurements must be carefully designed to rule out alternate hypotheses. Producing models that display these skills requires an extensive understanding of experiences and mechanisms sufficient for learning, and the models must have robust generalization to be effective in varied domains. To address these problems, I intend to evaluate computational models of ToM using a variety of tests.", + "primary_area": "", + "author": "Joel Michelson", + "authorids": "", + "aff": "Vanderbilt University Department of Computer Science", + "bibtex": "@article{Michelson_2024, title={Theory of Mind: A Familiar Aspect of Humanity to Give Machines}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26923}, DOI={10.1609/aaai.v37i13.26923}, abstractNote={My research focuses on machine models of theory of mind, a set of skills that helps humans cooperate with each other. Because these skills present themselves in behavior, inference-based measurements must be carefully designed to rule out alternate hypotheses. Producing models that display these skills requires an extensive understanding of experiences and mechanisms sufficient for learning, and the models must have robust generalization to be effective in varied domains. To address these problems, I intend to evaluate computational models of ToM using a variety of tests.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Michelson, Joel}, year={2024}, month={Jul.}, pages={16125-16126} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26923/26695", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26923", + "pdf_size": 106383, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:iKeCmEjA8_UJ:scholar.google.com/&scioq=Theory+of+Mind:+A+Familiar+Aspect+of+Humanity+to+Give+Machines&hl=en&as_sdt=0,44", + "gs_version_total": 2, + "aff_domain": "vanderbilt.edu", + "email": "vanderbilt.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Vanderbilt University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.vanderbilt.edu", + "aff_unique_abbr": "Vanderbilt", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-25695", + "title": "Tight Inapproximability for Graphical Games", + "track": "main", + "status": "Technical", + "abstract": "We provide a complete characterization for the computational complexity of finding approximate equilibria in two-action graphical games. We consider the two most well-studied approximation notions: \u03b5-Nash equilibria (\u03b5-NE) and \u03b5-well-supported Nash equilibria (\u03b5-WSNE), where \u03b5 is in [0,1]. We prove that computing an \u03b5-NE is PPAD-complete for any constant \u03b5 smaller than 1/2, while a very simple algorithm (namely, letting all players mix uniformly between their two actions) yields a 1/2-NE. On the other hand, we show that computing an \u03b5-WSNE is PPAD-complete for any constant \u03b5 smaller than 1, while a 1-WSNE is trivial to achieve, because any strategy profile is a 1-WSNE. All of our lower bounds immediately also apply to graphical games with more than two actions per player.", + "primary_area": "game theory and economic paradigms", + "author": "Argyrios Deligkas; John Fearnley; Alexandros Hollender; Themistoklis Melissourgos", + "authorids": "", + "aff": "Royal Holloway, United Kingdom; University of Liverpool, United Kingdom; EPFL, Switzerland; University of Essex, United Kingdom", + "bibtex": "@article{Deligkas_Fearnley_Hollender_Melissourgos_2023, title={Tight Inapproximability for Graphical Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25695}, DOI={10.1609/aaai.v37i5.25695}, abstractNote={We provide a complete characterization for the computational complexity of finding approximate equilibria in two-action graphical games. We consider the two most well-studied approximation notions: \u03b5-Nash equilibria (\u03b5-NE) and \u03b5-well-supported Nash equilibria (\u03b5-WSNE), where \u03b5 is in [0,1]. We prove that computing an \u03b5-NE is PPAD-complete for any constant \u03b5 smaller than 1/2, while a very simple algorithm (namely, letting all players mix uniformly between their two actions) yields a 1/2-NE. On the other hand, we show that computing an \u03b5-WSNE is PPAD-complete for any constant \u03b5 smaller than 1, while a 1-WSNE is trivial to achieve, because any strategy profile is a 1-WSNE. All of our lower bounds immediately also apply to graphical games with more than two actions per player.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Deligkas, Argyrios and Fearnley, John and Hollender, Alexandros and Melissourgos, Themistoklis}, year={2023}, month={Jun.}, pages={5600-5607} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25695/25467", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25695", + "pdf_size": 160171, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17947709513325579969&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "rhul.ac.uk;liverpool.ac.uk;epfl.ch;essex.ac.uk", + "email": "rhul.ac.uk;liverpool.ac.uk;epfl.ch;essex.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Royal Holloway, University of London;University of Liverpool;\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne;University of Essex", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.royalholloway.ac.uk;https://www.liverpool.ac.uk;https://www.epfl.ch;https://www.essex.ac.uk", + "aff_unique_abbr": "RHUL;Liv Uni;EPFL;Essex", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United Kingdom;Switzerland" + }, + { + "id": "article-26089", + "title": "Tight Performance Guarantees of Imitator Policies with Continuous Actions", + "track": "main", + "status": "Technical", + "abstract": "Behavioral Cloning (BC) aims at learning a policy that mimics the behavior demonstrated by an expert. The current theoretical understanding of BC is limited to the case of finite actions. In this paper, we study BC with the goal of providing theoretical guarantees on the performance of the imitator policy in the case of continuous actions. We start by deriving a novel bound on the performance gap based on Wasserstein distance, applicable for continuous-action experts, holding under the assumption that the value function is Lipschitz continuous. Since this latter condition is hardy fulfilled in practice, even for Lipschitz Markov Decision Processes and policies, we propose a relaxed setting, proving that value function is always H\\\"older continuous. This result is of independent interest and allows obtaining in BC a general bound for the performance of the imitator policy. Finally, we analyze noise injection, a common practice in which the expert's action is executed in the environment after the application of a noise kernel. We show that this practice allows deriving stronger performance guarantees, at the price of a bias due to the noise addition.", + "primary_area": "machine learning iii", + "author": "Davide Maran; Alberto Maria Metelli; Marcello Restelli", + "authorids": "", + "aff": "Politecnico di Milano; Politecnico di Milano; Politecnico di Milano", + "bibtex": "@article{Maran_Metelli_Restelli_2023, title={Tight Performance Guarantees of Imitator Policies with Continuous Actions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26089}, DOI={10.1609/aaai.v37i8.26089}, abstractNote={Behavioral Cloning (BC) aims at learning a policy that mimics the behavior demonstrated by an expert. The current theoretical understanding of BC is limited to the case of finite actions. In this paper, we study BC with the goal of providing theoretical guarantees on the performance of the imitator policy in the case of continuous actions. We start by deriving a novel bound on the performance gap based on Wasserstein distance, applicable for continuous-action experts, holding under the assumption that the value function is Lipschitz continuous. Since this latter condition is hardy fulfilled in practice, even for Lipschitz Markov Decision Processes and policies, we propose a relaxed setting, proving that value function is always H\\"older continuous. This result is of independent interest and allows obtaining in BC a general bound for the performance of the imitator policy. Finally, we analyze noise injection, a common practice in which the expert\u2019s action is executed in the environment after the application of a noise kernel. We show that this practice allows deriving stronger performance guarantees, at the price of a bias due to the noise addition.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Maran, Davide and Metelli, Alberto Maria and Restelli, Marcello}, year={2023}, month={Jun.}, pages={9073-9080} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26089/25861", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26089", + "pdf_size": 242024, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17320386408975897353&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "polimi.it;polimi.it;polimi.it", + "email": "polimi.it;polimi.it;polimi.it", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Politecnico di Milano", + "aff_unique_dep": "", + "aff_unique_url": "https://www.polimi.it", + "aff_unique_abbr": "Polimi", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25666", + "title": "Tighter Robust Upper Bounds for Options via No-Regret Learning", + "track": "main", + "status": "Technical", + "abstract": "Classic option pricing models, such as the Black-Scholes formula, often depend on some rigid assumptions on the dynamics of the underlying asset prices. These assumptions are inevitably violated in practice and thus induce the model risk. To mitigate this, robust option pricing that only requires the no-arbitrage principle has attracted a great deal of attention among researchers. In this paper, we give new robust upper bounds for option prices based on a novel \u03b7-momentum trading strategy. Our bounds for European options are tighter for most common moneyness, volatility, and expiration date setups than those presented in the existing literature. Our bounds for average strike Asian options are the first closed-form robust upper bounds for those options. Numerical simulations demonstrate that our bounds significantly outperform the benchmarks for both European and Asian options.", + "primary_area": "domain s of application", + "author": "Shan Xue; Ye Du; Liang Xu", + "authorids": "", + "aff": "School of Business Administration, Southwestern University of Finance and Economics, Chengdu, China; Southwestern University of Finance and Economics, Chengdu, China; School of Business Administration, Southwestern University of Finance and Economics, Chengdu, China", + "bibtex": "@article{Xue_Du_Xu_2023, title={Tighter Robust Upper Bounds for Options via No-Regret Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25666}, DOI={10.1609/aaai.v37i4.25666}, abstractNote={Classic option pricing models, such as the Black-Scholes formula, often depend on some rigid assumptions on the dynamics of the underlying asset prices. These assumptions are inevitably violated in practice and thus induce the model risk. To mitigate this, robust option pricing that only requires the no-arbitrage principle has attracted a great deal of attention among researchers. In this paper, we give new robust upper bounds for option prices based on a novel \u03b7-momentum trading strategy. Our bounds for European options are tighter for most common moneyness, volatility, and expiration date setups than those presented in the existing literature. Our bounds for average strike Asian options are the first closed-form robust upper bounds for those options. Numerical simulations demonstrate that our bounds significantly outperform the benchmarks for both European and Asian options.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xue, Shan and Du, Ye and Xu, Liang}, year={2023}, month={Jun.}, pages={5348-5356} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25666/25438", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25666", + "pdf_size": 9228382, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:YGCtuzpmEc4J:scholar.google.com/&scioq=Tighter+Robust+Upper+Bounds+for+Options+via+No-Regret+Learning&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Southwestern University of Finance and Economics", + "aff_unique_dep": "School of Business Administration", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chengdu", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25575", + "title": "Time Series Contrastive Learning with Information-Aware Augmentations", + "track": "main", + "status": "Technical", + "abstract": "Various contrastive learning approaches have been proposed in recent years and achieve significant empirical success. While effective and prevalent, contrastive learning has been less explored for time series data. A key component of contrastive learning is to select appropriate augmentations imposing some priors to construct feasible positive samples, such that an encoder can be trained to learn robust and discriminative representations. Unlike image and language domains where \"desired'' augmented samples can be generated with the rule of thumb guided by prefabricated human priors, the ad-hoc manual selection of time series augmentations is hindered by their diverse and human-unrecognizable temporal structures. How to find the desired augmentations of time series data that are meaningful for given contrastive learning tasks and datasets remains an open question. In this work, we address the problem by encouraging both high fidelity and variety based on information theory. A theoretical analysis leads to the criteria for selecting feasible data augmentations. On top of that, we propose a new contrastive learning approach with information-aware augmentations, InfoTS, that adaptively selects optimal augmentations for time series representation learning. Experiments on various datasets show highly competitive performance with up to a 12.0% reduction in MSE on forecasting tasks and up to 3.7% relative improvement in accuracy on classification tasks over the leading baselines.", + "primary_area": "data mining and knowledge management", + "author": "Dongsheng Luo; Wei Cheng; Yingheng Wang; Dongkuan Xu; Jingchao Ni; Wenchao Yu; Xuchao Zhang; Yanchi Liu; Yuncong Chen; Haifeng Chen; Xiang Zhang", + "authorids": "", + "aff": "Florida International University; NEC Lab America; Cornell University; North Carolina State University; AWS AI Labs; NEC Lab America+Microsoft; Microsoft; NEC Lab America; NEC Lab America; NEC Lab America; The Pennsylvania State University", + "bibtex": "@article{Luo_Cheng_Wang_Xu_Ni_Yu_Zhang_Liu_Chen_Chen_Zhang_2023, title={Time Series Contrastive Learning with Information-Aware Augmentations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25575}, DOI={10.1609/aaai.v37i4.25575}, abstractNote={Various contrastive learning approaches have been proposed in recent years and achieve significant empirical success. While effective and prevalent, contrastive learning has been less explored for time series data. A key component of contrastive learning is to select appropriate augmentations imposing some priors to construct feasible positive samples, such that an encoder can be trained to learn robust and discriminative representations. Unlike image and language domains where "desired\u2019\u2019 augmented samples can be generated with the rule of thumb guided by prefabricated human priors, the ad-hoc manual selection of time series augmentations is hindered by their diverse and human-unrecognizable temporal structures. How to find the desired augmentations of time series data that are meaningful for given contrastive learning tasks and datasets remains an open question. In this work, we address the problem by encouraging both high fidelity and variety based on information theory. A theoretical analysis leads to the criteria for selecting feasible data augmentations. On top of that, we propose a new contrastive learning approach with information-aware augmentations, InfoTS, that adaptively selects optimal augmentations for time series representation learning. Experiments on various datasets show highly competitive performance with up to a 12.0% reduction in MSE on forecasting tasks and up to 3.7% relative improvement in accuracy on classification tasks over the leading baselines.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Dongsheng and Cheng, Wei and Wang, Yingheng and Xu, Dongkuan and Ni, Jingchao and Yu, Wenchao and Zhang, Xuchao and Liu, Yanchi and Chen, Yuncong and Chen, Haifeng and Zhang, Xiang}, year={2023}, month={Jun.}, pages={4534-4542} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25575/25347", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25575", + "pdf_size": 559861, + "gs_citation": 70, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6403566358179020536&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff_domain": "fiu.edu;nec-labs.com;cornell.edu;ncsu.edu;amazon.com;nec-labs.com;microsoft.com;nec-labs.com;nec-labs.com;nec-labs.com;psu.edu", + "email": "fiu.edu;nec-labs.com;cornell.edu;ncsu.edu;amazon.com;nec-labs.com;microsoft.com;nec-labs.com;nec-labs.com;nec-labs.com;psu.edu", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;1;2;3;4;1+5;5;1;1;1;6", + "aff_unique_norm": "Florida International University;NEC America;Cornell University;North Carolina State University;Amazon Web Services;Microsoft Corporation;The Pennsylvania State University", + "aff_unique_dep": ";NEC Lab;;;AWS AI Labs;;", + "aff_unique_url": "https://www.fiu.edu;https://www.necam.com;https://www.cornell.edu;https://www.ncsu.edu;https://aws.amazon.com;https://www.microsoft.com;https://www.psu.edu", + "aff_unique_abbr": "FIU;NEC;Cornell;NCSU;AWS;Microsoft;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26021", + "title": "Time-Aware Random Walk Diffusion to Improve Dynamic Graph Learning", + "track": "main", + "status": "Technical", + "abstract": "How can we augment a dynamic graph for improving the performance of dynamic graph neural networks? Graph augmentation has been widely utilized to boost the learning performance of GNN-based models. However, most existing approaches only enhance spatial structure within an input static graph by transforming the graph, and do not consider dynamics caused by time such as temporal locality, i.e., recent edges are more influential than earlier ones, which remains challenging for dynamic graph augmentation.\nIn this work, we propose TiaRa (Time-aware Random Walk Diffusion), a novel diffusion-based method for augmenting a dynamic graph represented as a discrete-time sequence of graph snapshots. For this purpose, we first design a time-aware random walk proximity so that a surfer can walk along the time dimension as well as edges, resulting in spatially and temporally localized scores. We then derive our diffusion matrices based on the time-aware random walk, and show they become enhanced adjacency matrices that both spatial and temporal localities are augmented. Throughout extensive experiments, we demonstrate that TiaRa effectively augments a given dynamic graph, and leads to significant improvements in dynamic GNN models for various graph datasets and tasks.", + "primary_area": "machine learning ii", + "author": "Jong-whi Lee; Jinhong Jung", + "authorids": "", + "aff": "Department of Computer Science and Artificial Intelligence, Jeonbuk National University, South Korea; Department of Computer Science and Artificial Intelligence, Jeonbuk National University, South Korea", + "bibtex": "@article{Lee_Jung_2023, title={Time-Aware Random Walk Diffusion to Improve Dynamic Graph Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26021}, DOI={10.1609/aaai.v37i7.26021}, abstractNote={How can we augment a dynamic graph for improving the performance of dynamic graph neural networks? Graph augmentation has been widely utilized to boost the learning performance of GNN-based models. However, most existing approaches only enhance spatial structure within an input static graph by transforming the graph, and do not consider dynamics caused by time such as temporal locality, i.e., recent edges are more influential than earlier ones, which remains challenging for dynamic graph augmentation.\nIn this work, we propose TiaRa (Time-aware Random Walk Diffusion), a novel diffusion-based method for augmenting a dynamic graph represented as a discrete-time sequence of graph snapshots. For this purpose, we first design a time-aware random walk proximity so that a surfer can walk along the time dimension as well as edges, resulting in spatially and temporally localized scores. We then derive our diffusion matrices based on the time-aware random walk, and show they become enhanced adjacency matrices that both spatial and temporal localities are augmented. Throughout extensive experiments, we demonstrate that TiaRa effectively augments a given dynamic graph, and leads to significant improvements in dynamic GNN models for various graph datasets and tasks.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Jong-whi and Jung, Jinhong}, year={2023}, month={Jun.}, pages={8473-8481} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26021/25793", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26021", + "pdf_size": 523870, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7467310375658360402&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "jbnu.ac.kr;jbnu.ac.kr", + "email": "jbnu.ac.kr;jbnu.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Jeonbuk National University", + "aff_unique_dep": "Department of Computer Science and Artificial Intelligence", + "aff_unique_url": "http://www.jbnu.ac.kr", + "aff_unique_abbr": "JBNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25469", + "title": "TinyNeRF: Towards 100 x Compression of Voxel Radiance Fields", + "track": "main", + "status": "Technical", + "abstract": "Voxel grid representation of 3D scene properties has been widely used to improve the training or rendering speed of the Neural Radiance Fields (NeRF) while at the same time achieving high synthesis quality. However, these methods accelerate the original NeRF at the expense of extra storage demand, which hinders their applications in many scenarios. To solve this limitation, we present TinyNeRF, a three-stage pipeline: frequency domain transformation, pruning and quantization that work together to reduce the storage demand of the voxel grids with little to no effects on their speed and synthesis quality. Based on the prior knowledge of visual signals sparsity in the frequency domain, we convert the original voxel grids in the frequency domain via block-wise discrete cosine transformation (DCT). Next, we apply pruning and quantization to enforce the DCT coefficients to be sparse and low-bit. Our method can be optimized from scratch in an end-to-end manner, and can typically compress the original models by 2 orders of magnitude with minimal sacrifice on speed and synthesis quality.", + "primary_area": "computer vision iii", + "author": "Tianli Zhao; Jiayuan Chen; Cong Leng; Jian Cheng", + "authorids": "", + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences; Southeast University; Institute of Automation, Chinese Academy of Sciences + AIRIA + Maicro.ai; Institute of Automation, Chinese Academy of Sciences + AIRIA + Maicro.ai", + "bibtex": "@article{Zhao_Chen_Leng_Cheng_2023, title={TinyNeRF: Towards 100 x Compression of Voxel Radiance Fields}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25469}, DOI={10.1609/aaai.v37i3.25469}, abstractNote={Voxel grid representation of 3D scene properties has been widely used to improve the training or rendering speed of the Neural Radiance Fields (NeRF) while at the same time achieving high synthesis quality. However, these methods accelerate the original NeRF at the expense of extra storage demand, which hinders their applications in many scenarios. To solve this limitation, we present TinyNeRF, a three-stage pipeline: frequency domain transformation, pruning and quantization that work together to reduce the storage demand of the voxel grids with little to no effects on their speed and synthesis quality. Based on the prior knowledge of visual signals sparsity in the frequency domain, we convert the original voxel grids in the frequency domain via block-wise discrete cosine transformation (DCT). Next, we apply pruning and quantization to enforce the DCT coefficients to be sparse and low-bit. Our method can be optimized from scratch in an end-to-end manner, and can typically compress the original models by 2 orders of magnitude with minimal sacrifice on speed and synthesis quality.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Tianli and Chen, Jiayuan and Leng, Cong and Cheng, Jian}, year={2023}, month={Jun.}, pages={3588-3596} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25469/25241", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25469", + "pdf_size": 2819896, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18391123789360323187&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "ucas.ac.cn;seu.edu.cn;ucas.ac.cn;ucas.ac.cn", + "email": "ucas.ac.cn;seu.edu.cn;ucas.ac.cn;ucas.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2+3+4;2+3+4", + "aff_unique_norm": "University of Chinese Academy of Sciences;Southeast University;Chinese Academy of Sciences;AIRIA;Maicro.ai", + "aff_unique_dep": "School of Artificial Intelligence;;Institute of Automation;;", + "aff_unique_url": "http://www.ucas.ac.cn;https://www.seu.edu.cn/;http://www.ia.cas.cn;;https://www.maicro.ai", + "aff_unique_abbr": "UCAS;SEU;CAS;;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+2;0+2", + "aff_country_unique": "China;;United States" + }, + { + "id": "article-25267", + "title": "Token Mixing: Parameter-Efficient Transfer Learning from Image-Language to Video-Language", + "track": "main", + "status": "Technical", + "abstract": "Applying large scale pre-trained image-language model to video-language tasks has recently become a trend, which brings two challenges. One is how to effectively transfer knowledge from static images to dynamic videos, and the other is how to deal with the prohibitive cost of fully fine-tuning due to growing model size. Existing works that attempt to realize parameter-efficient image-language to video-language transfer learning can be categorized into two types: 1) appending a sequence of temporal transformer blocks after the 2D Vision Transformer (ViT), and 2) inserting a temporal block into the ViT architecture. While these two types of methods only require fine-tuning the newly added components, there are still many parameters to update, and they are only validated on a single video-language task. In this work, based on our analysis of the core ideas of different temporal modeling components in existing approaches, we propose a token mixing strategy to enable cross-frame interactions, which enables transferring from the pre-trained image-language model to video-language tasks through selecting and mixing a key set and a value set from the input video samples. As token mixing does not require the addition of any components or modules, we can directly partially fine-tune the pre-trained image-language model to achieve parameter-efficiency. We carry out extensive experiments to compare our proposed token mixing method with other parameter-efficient transfer learning methods. Our token mixing method outperforms other methods on both understanding tasks and generation tasks. Besides, our method achieves new records on multiple video-language tasks. The code is available at https://github.com/yuqi657/video_language_model.", + "primary_area": "computer vision ii", + "author": "Yuqi Liu; Luhui Xu; Pengfei Xiong; Qin Jin", + "authorids": "", + "aff": "School of Information, Renmin University of China+Tencent; Tencent; Tencent; School of Information, Renmin University of China", + "bibtex": "@article{Liu_Xu_Xiong_Jin_2023, title={Token Mixing: Parameter-Efficient Transfer Learning from Image-Language to Video-Language}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25267}, DOI={10.1609/aaai.v37i2.25267}, abstractNote={Applying large scale pre-trained image-language model to video-language tasks has recently become a trend, which brings two challenges. One is how to effectively transfer knowledge from static images to dynamic videos, and the other is how to deal with the prohibitive cost of fully fine-tuning due to growing model size. Existing works that attempt to realize parameter-efficient image-language to video-language transfer learning can be categorized into two types: 1) appending a sequence of temporal transformer blocks after the 2D Vision Transformer (ViT), and 2) inserting a temporal block into the ViT architecture. While these two types of methods only require fine-tuning the newly added components, there are still many parameters to update, and they are only validated on a single video-language task. In this work, based on our analysis of the core ideas of different temporal modeling components in existing approaches, we propose a token mixing strategy to enable cross-frame interactions, which enables transferring from the pre-trained image-language model to video-language tasks through selecting and mixing a key set and a value set from the input video samples. As token mixing does not require the addition of any components or modules, we can directly partially fine-tune the pre-trained image-language model to achieve parameter-efficiency. We carry out extensive experiments to compare our proposed token mixing method with other parameter-efficient transfer learning methods. Our token mixing method outperforms other methods on both understanding tasks and generation tasks. Besides, our method achieves new records on multiple video-language tasks. The code is available at https://github.com/yuqi657/video_language_model.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Yuqi and Xu, Luhui and Xiong, Pengfei and Jin, Qin}, year={2023}, month={Jun.}, pages={1781-1789} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25267/25039", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25267", + "pdf_size": 1122564, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5331332163308923149&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "ruc.edu.cn;gmail.com;gmail.com;ruc.edu.cn", + "email": "ruc.edu.cn;gmail.com;gmail.com;ruc.edu.cn", + "github": "https://github.com/yuqi657/video language model", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0", + "aff_unique_norm": "Renmin University of China;Tencent Holdings Limited", + "aff_unique_dep": "School of Information;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "RUC;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25341", + "title": "TopicFM: Robust and Interpretable Topic-Assisted Feature Matching", + "track": "main", + "status": "Technical", + "abstract": "This study addresses an image-matching problem in challenging cases, such as large scene variations or textureless scenes. To gain robustness to such situations, most previous studies have attempted to encode the global contexts of a scene via graph neural networks or transformers. However, these contexts do not explicitly represent high-level contextual information, such as structural shapes or semantic instances; therefore, the encoded features are still not sufficiently discriminative in challenging scenes. We propose a novel image-matching method that applies a topic-modeling strategy to encode high-level contexts in images. The proposed method trains latent semantic instances called topics. It explicitly models an image as a multinomial distribution of topics, and then performs probabilistic feature matching. This approach improves the robustness of matching by focusing on the same semantic areas between the images. In addition, the inferred topics provide interpretability for matching the results, making our method explainable. Extensive experiments on outdoor and indoor datasets show that our method outperforms other state-of-the-art methods, particularly in challenging cases.", + "primary_area": "computer vision ii", + "author": "Khang Truong Giang; Soohwan Song; Sungho Jo", + "authorids": "", + "aff": "School of Computing, KAIST, Daejeon 34141, Republic of Korea; Intelligent Robotics Research Division, ETRI, Daejeon 34129, Republic of Korea; School of Computing, KAIST, Daejeon 34141, Republic of Korea", + "bibtex": "@article{Truong Giang_Song_Jo_2023, title={TopicFM: Robust and Interpretable Topic-Assisted Feature Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25341}, DOI={10.1609/aaai.v37i2.25341}, abstractNote={This study addresses an image-matching problem in challenging cases, such as large scene variations or textureless scenes. To gain robustness to such situations, most previous studies have attempted to encode the global contexts of a scene via graph neural networks or transformers. However, these contexts do not explicitly represent high-level contextual information, such as structural shapes or semantic instances; therefore, the encoded features are still not sufficiently discriminative in challenging scenes. We propose a novel image-matching method that applies a topic-modeling strategy to encode high-level contexts in images. The proposed method trains latent semantic instances called topics. It explicitly models an image as a multinomial distribution of topics, and then performs probabilistic feature matching. This approach improves the robustness of matching by focusing on the same semantic areas between the images. In addition, the inferred topics provide interpretability for matching the results, making our method explainable. Extensive experiments on outdoor and indoor datasets show that our method outperforms other state-of-the-art methods, particularly in challenging cases.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Truong Giang, Khang and Song, Soohwan and Jo, Sungho}, year={2023}, month={Jun.}, pages={2447-2455} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25341/25113", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25341", + "pdf_size": 6879403, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=292180067958361700&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "kaist.ac.kr;etri.re.kr;kaist.ac.kr", + "email": "kaist.ac.kr;etri.re.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "KAIST;ETRI", + "aff_unique_dep": "School of Computing;Intelligent Robotics Research Division", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.etri.re.kr", + "aff_unique_abbr": "KAIST;ETRI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Daejeon", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26925", + "title": "Topics in Selective Classification", + "track": "aaai doctoral consortium track", + "status": "Technical", + "abstract": "In recent decades, advancements in information technology allowed Artificial Intelligence (AI) systems to predict future outcomes with unprecedented success. This brought the widespread deployment of these methods in many fields, intending to support decision-making. A pressing question is how to make AI systems robust to common challenges in real-life scenarios and trustworthy. \nIn my work, I plan to explore ways to enhance the trustworthiness of AI through the selective classification framework. In this setting, the AI system can refrain from predicting whenever it is not confident enough, allowing it to trade off coverage, i.e. the percentage of instances that receive a prediction, for performance.", + "primary_area": "", + "author": "Andrea Pugnana", + "authorids": "", + "aff": "Scuola Normale Superiore", + "bibtex": "@article{Pugnana_2024, title={Topics in Selective Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26925}, DOI={10.1609/aaai.v37i13.26925}, abstractNote={In recent decades, advancements in information technology allowed Artificial Intelligence (AI) systems to predict future outcomes with unprecedented success. This brought the widespread deployment of these methods in many fields, intending to support decision-making. A pressing question is how to make AI systems robust to common challenges in real-life scenarios and trustworthy. In my work, I plan to explore ways to enhance the trustworthiness of AI through the selective classification framework. In this setting, the AI system can refrain from predicting whenever it is not confident enough, allowing it to trade off coverage, i.e. the percentage of instances that receive a prediction, for performance.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pugnana, Andrea}, year={2024}, month={Jul.}, pages={16129-16130} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26925/26697", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26925", + "pdf_size": 56149, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5521068434189075508&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sns.it", + "email": "sns.it", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Scuola Normale Superiore", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sns.it", + "aff_unique_abbr": "SNS", + "aff_country_unique_index": "0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26997", + "title": "Toplogical Data Analysis Detects and Classifies Sunspots (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In our technology-dependent modern world, it is imperative to monitor the Sun for space weather threats to critical infrastructure. Topological data analysis (TDA) is a new set of mathematical techniques used in data analysis and machine learning. We demonstrate that TDA can robustly detect and classify solar surface and coronal activity. This technique is a promising step toward future application in predictive space weather modeling.", + "primary_area": "", + "author": "Aidan Lytle; Neil Pritchard; Alicia Aarnio; Thomas Weighill", + "authorids": "", + "aff": "University of North Carolina at Greensboro; University of North Carolina at Greensboro; University of North Carolina at Greensboro; University of North Carolina at Greensboro", + "bibtex": "@article{Lytle_Pritchard_Aarnio_Weighill_2024, title={Toplogical Data Analysis Detects and Classifies Sunspots (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26997}, DOI={10.1609/aaai.v37i13.26997}, abstractNote={In our technology-dependent modern world, it is imperative to monitor the Sun for space weather threats to critical infrastructure. Topological data analysis (TDA) is a new set of mathematical techniques used in data analysis and machine learning. We demonstrate that TDA can robustly detect and classify solar surface and coronal activity. This technique is a promising step toward future application in predictive space weather modeling.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lytle, Aidan and Pritchard, Neil and Aarnio, Alicia and Weighill, Thomas}, year={2024}, month={Jul.}, pages={16274-16275} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26997/26769", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26997", + "pdf_size": 401382, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:OXEtkuJSwlcJ:scholar.google.com/&scioq=Toplogical+Data+Analysis+Detects+and+Classifies+Sunspots+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "uncg.edu; ; ; ", + "email": "uncg.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of North Carolina at Greensboro", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uncg.edu", + "aff_unique_abbr": "UNCG", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Greensboro", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25689", + "title": "Topological Distance Games", + "track": "main", + "status": "Technical", + "abstract": "We introduce a class of strategic games in which agents are assigned to nodes of a topology graph and the utility of an agent depends on both the agent's inherent utilities for other agents as well as her distance from these agents on the topology graph. This model of topological distance games (TDGs) offers an appealing combination of important aspects of several prominent settings in coalition formation, including (additively separable) hedonic games, social distance games, and Schelling games. We study the existence and complexity of stable outcomes in TDGs\u2014for instance, while a jump stable assignment may not exist in general, we show that the existence is guaranteed in several special cases. We also investigate the dynamics induced by performing beneficial jumps.", + "primary_area": "game theory and economic paradigms", + "author": "Martin Bullinger; Warut Suksompong", + "authorids": "", + "aff": "School of Computation, Information and Technology, Technical University of Munich; School of Computing, National University of Singapore", + "bibtex": "@article{Bullinger_Suksompong_2023, title={Topological Distance Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25689}, DOI={10.1609/aaai.v37i5.25689}, abstractNote={We introduce a class of strategic games in which agents are assigned to nodes of a topology graph and the utility of an agent depends on both the agent\u2019s inherent utilities for other agents as well as her distance from these agents on the topology graph. This model of topological distance games (TDGs) offers an appealing combination of important aspects of several prominent settings in coalition formation, including (additively separable) hedonic games, social distance games, and Schelling games. We study the existence and complexity of stable outcomes in TDGs\u2014for instance, while a jump stable assignment may not exist in general, we show that the existence is guaranteed in several special cases. We also investigate the dynamics induced by performing beneficial jumps.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bullinger, Martin and Suksompong, Warut}, year={2023}, month={Jun.}, pages={5549-5556} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25689/25461", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25689", + "pdf_size": 147973, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8256142681031856526&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "; ", + "email": "; ", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Technical University of Munich;National University of Singapore", + "aff_unique_dep": "School of Computation, Information and Technology;School of Computing", + "aff_unique_url": "https://www.tum.de;https://www.nus.edu.sg", + "aff_unique_abbr": "TUM;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Germany;Singapore" + }, + { + "id": "article-25866", + "title": "Topological Pooling on Graphs", + "track": "main", + "status": "Technical", + "abstract": "Graph neural networks (GNNs) have demonstrated a significant success in various graph learning tasks, from graph classification to anomaly detection. There recently has emerged a number of approaches adopting a graph pooling operation within GNNs, with a goal to preserve graph attributive and structural features during the graph representation learning. However, most existing graph pooling operations suffer from the limitations of relying on node-wise neighbor weighting and embedding, which leads to insufficient encoding of rich topological structures and node attributes exhibited by real-world networks. By invoking the machinery of persistent homology and the concept of landmarks, we propose a novel topological pooling layer and witness complex-based topological embedding mechanism that allow us to systematically integrate hidden topological information at both local and global levels. Specifically, we design new learnable local and global topological representations Wit-TopoPool which allow us to simultaneously extract rich discriminative topological information from graphs. Experiments on 11 diverse benchmark datasets against 18 baseline models in conjunction with graph classification tasks indicate that Wit-TopoPool significantly outperforms all competitors across all datasets.", + "primary_area": "machine learning i", + "author": "Yuzhou Chen; Yulia R. Gel", + "authorids": "", + "aff": "Department of Computer and Information Sciences, Temple University; Department of Mathematical Sciences, University of Texas at Dallas + National Science Foundation", + "bibtex": "@article{Chen_Gel_2023, title={Topological Pooling on Graphs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25866}, DOI={10.1609/aaai.v37i6.25866}, abstractNote={Graph neural networks (GNNs) have demonstrated a significant success in various graph learning tasks, from graph classification to anomaly detection. There recently has emerged a number of approaches adopting a graph pooling operation within GNNs, with a goal to preserve graph attributive and structural features during the graph representation learning. However, most existing graph pooling operations suffer from the limitations of relying on node-wise neighbor weighting and embedding, which leads to insufficient encoding of rich topological structures and node attributes exhibited by real-world networks. By invoking the machinery of persistent homology and the concept of landmarks, we propose a novel topological pooling layer and witness complex-based topological embedding mechanism that allow us to systematically integrate hidden topological information at both local and global levels. Specifically, we design new learnable local and global topological representations Wit-TopoPool which allow us to simultaneously extract rich discriminative topological information from graphs. Experiments on 11 diverse benchmark datasets against 18 baseline models in conjunction with graph classification tasks indicate that Wit-TopoPool significantly outperforms all competitors across all datasets.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Yuzhou and Gel, Yulia R.}, year={2023}, month={Jun.}, pages={7096-7103} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25866/25638", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25866", + "pdf_size": 693298, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13388021709720487207&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "temple.edu;utdallas.edu", + "email": "temple.edu;utdallas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+2", + "aff_unique_norm": "Temple University;University of Texas at Dallas;National Science Foundation", + "aff_unique_dep": "Department of Computer and Information Sciences;Department of Mathematical Sciences;", + "aff_unique_url": "https://www.temple.edu;https://www.utdallas.edu;https://www.nsf.gov", + "aff_unique_abbr": "Temple;UT Dallas;NSF", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Dallas", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "article-25728", + "title": "Tournament Fixing Parameterized by Feedback Vertex Set Number Is FPT", + "track": "main", + "status": "Technical", + "abstract": "A knockout (or single-elimination) tournament is a format of a competition that is very popular in practice (particularly in sports, elections and decision making), and which has been extensively and intensively studied from a theoretical point of view for more than a decade. Particular attention has been devoted to the Tournament Fixing problem, where, roughly speaking, the objective is to determine whether we can conduct the knockout tournament in a way that makes our favorite player win. Here, part of the input is a tournament graph D that encodes the winner of each possible match. A sequence of papers has studied the parameterized complexity of Tournament Fixing with respect to the feedback arc set number (fas) of D Given that this parameter yielded tractability, it has been asked explicitly and repeatedly whether Tournament Fixing is FPT also with respect to the feedback vertex set number (fvs) of D. We answer this question positively. In fact, although fvs can be arbitrarily smaller than fas, we attain the same dependency on the parameter in the time complexity. So, additionally, our work subsumes the best known algorithm for Tournament Fixing with respect to as.", + "primary_area": "game theory and economic paradigms", + "author": "Meirav Zehavi", + "authorids": "", + "aff": "Department of Computer Science, Ben-Gurion University of the Negev, Beersheba, Israel", + "bibtex": "@article{Zehavi_2023, title={Tournament Fixing Parameterized by Feedback Vertex Set Number Is FPT}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25728}, DOI={10.1609/aaai.v37i5.25728}, abstractNote={A knockout (or single-elimination) tournament is a format of a competition that is very popular in practice (particularly in sports, elections and decision making), and which has been extensively and intensively studied from a theoretical point of view for more than a decade. Particular attention has been devoted to the Tournament Fixing problem, where, roughly speaking, the objective is to determine whether we can conduct the knockout tournament in a way that makes our favorite player win. Here, part of the input is a tournament graph D that encodes the winner of each possible match. A sequence of papers has studied the parameterized complexity of Tournament Fixing with respect to the feedback arc set number (fas) of D Given that this parameter yielded tractability, it has been asked explicitly and repeatedly whether Tournament Fixing is FPT also with respect to the feedback vertex set number (fvs) of D. We answer this question positively. In fact, although fvs can be arbitrarily smaller than fas, we attain the same dependency on the parameter in the time complexity. So, additionally, our work subsumes the best known algorithm for Tournament Fixing with respect to as.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zehavi, Meirav}, year={2023}, month={Jun.}, pages={5876-5883} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25728/25500", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25728", + "pdf_size": 155388, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8477651906605132853&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "bgu.ac.il", + "email": "bgu.ac.il", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Ben-Gurion University of the Negev", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Beersheba", + "aff_country_unique_index": "0", + "aff_country_unique": "Israel" + }, + { + "id": "article-25395", + "title": "Toward Robust Diagnosis: A Contour Attention Preserving Adversarial Defense for COVID-19 Detection", + "track": "main", + "status": "Technical", + "abstract": "As the COVID-19 pandemic puts pressure on healthcare systems worldwide, the computed tomography image based AI diagnostic system has become a sustainable solution for early diagnosis. However, the model-wise vulnerability under adversarial perturbation hinders its deployment in practical situation. The existing adversarial training strategies are difficult to generalized into medical imaging field challenged by complex medical texture features. To overcome this challenge, we propose a Contour Attention Preserving (CAP) method based on lung cavity edge extraction. The contour prior features are injected to attention layer via a parameter regularization and we optimize the robust empirical risk with hybrid distance metric. We then introduce a new cross-nation CT scan dataset to evaluate the generalization capability of the adversarial robustness under distribution shift. Experimental results indicate that the proposed method achieves state-of-the-art performance in multiple adversarial defense and generalization tasks. The code and dataset are available at https://github.com/Quinn777/CAP.", + "primary_area": "computer vision iii", + "author": "Kun Xiang; Xing Zhang; Jinwen She; Jinpeng Liu; Haohan Wang; Shiqi Deng; Shancheng Jiang", + "authorids": "", + "aff": "Sun Yat-sen University; Shuguang Hospital, Shanghai University of Traditional Chinese Medicine; Sun Yat-sen University; Sun Yat-sen University; University of Illinois Urbana-Champaign; Sun Yat-sen University; Sun Yat-sen University+Guangdong Provincial Key Laboratory of Fire Science and Technology", + "bibtex": "@article{Xiang_Zhang_She_Liu_Wang_Deng_Jiang_2023, title={Toward Robust Diagnosis: A Contour Attention Preserving Adversarial Defense for COVID-19 Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25395}, DOI={10.1609/aaai.v37i3.25395}, abstractNote={As the COVID-19 pandemic puts pressure on healthcare systems worldwide, the computed tomography image based AI diagnostic system has become a sustainable solution for early diagnosis. However, the model-wise vulnerability under adversarial perturbation hinders its deployment in practical situation. The existing adversarial training strategies are difficult to generalized into medical imaging field challenged by complex medical texture features. To overcome this challenge, we propose a Contour Attention Preserving (CAP) method based on lung cavity edge extraction. The contour prior features are injected to attention layer via a parameter regularization and we optimize the robust empirical risk with hybrid distance metric. We then introduce a new cross-nation CT scan dataset to evaluate the generalization capability of the adversarial robustness under distribution shift. Experimental results indicate that the proposed method achieves state-of-the-art performance in multiple adversarial defense and generalization tasks. The code and dataset are available at https://github.com/Quinn777/CAP.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiang, Kun and Zhang, Xing and She, Jinwen and Liu, Jinpeng and Wang, Haohan and Deng, Shiqi and Jiang, Shancheng}, year={2023}, month={Jun.}, pages={2928-2937} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25395/25167", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25395", + "pdf_size": 8843376, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=695708560988032093&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail2.sysu.edu.cn;shutcm.edu.cn;mail2.sysu.edu.cn;mails.tsinghua.edu.cn;illinois.edu;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;shutcm.edu.cn;mail2.sysu.edu.cn;mails.tsinghua.edu.cn;illinois.edu;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "github": "https://github.com/Quinn777/CAP", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;2;0;0+3", + "aff_unique_norm": "Sun Yat-sen University;Shanghai University of Traditional Chinese Medicine;University of Illinois at Urbana-Champaign;Guangdong Provincial Key Laboratory of Fire Science and Technology", + "aff_unique_dep": ";Traditional Chinese Medicine;;Fire Science and Technology", + "aff_unique_url": "http://www.sysu.edu.cn/;http://www.shutcm.edu.cn;https://illinois.edu;", + "aff_unique_abbr": "SYSU;SHUTCM;UIUC;", + "aff_campus_unique_index": "1;2;", + "aff_campus_unique": ";Shanghai;Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26768", + "title": "Toward Robust Uncertainty Estimation with Random Activation Functions", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep neural networks are in the limelight of machine learning with their excellent performance in many data-driven applications. However, they can lead to inaccurate predictions when queried in out-of-distribution data points, which can have detrimental effects especially in sensitive domains, such as healthcare and transportation, where erroneous predictions can be very costly and/or dangerous. Subsequently, quantifying the uncertainty of the output of a neural network is often leveraged to evaluate the confidence of its predictions, and ensemble models have proved to be effective in measuring the uncertainty by utilizing the variance of predictions over a pool of models. In this paper, we propose a novel approach for uncertainty quantification via ensembles, called Random Activation Functions (RAFs) Ensemble, that aims at improving the ensemble diversity toward a more robust estimation, by accommodating each neural network with a different (random) activation function. Extensive empirical study demonstrates that RAFs Ensemble outperforms state-of-the-art ensemble uncertainty quantification methods on both synthetic and real-world datasets in a series of regression tasks.", + "primary_area": "safe and robust ai", + "author": "Yana Stoyanova; Soroush Ghandi; Maryam Tavakol", + "authorids": "", + "aff": "Eindhoven University of Technology, Eindhoven, The Netherlands; Eindhoven University of Technology, Eindhoven, The Netherlands; Eindhoven University of Technology, Eindhoven, The Netherlands", + "bibtex": "@article{Stoyanova_Ghandi_Tavakol_2023, title={Toward Robust Uncertainty Estimation with Random Activation Functions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26768}, DOI={10.1609/aaai.v37i12.26768}, abstractNote={Deep neural networks are in the limelight of machine learning with their excellent performance in many data-driven applications. However, they can lead to inaccurate predictions when queried in out-of-distribution data points, which can have detrimental effects especially in sensitive domains, such as healthcare and transportation, where erroneous predictions can be very costly and/or dangerous. Subsequently, quantifying the uncertainty of the output of a neural network is often leveraged to evaluate the confidence of its predictions, and ensemble models have proved to be effective in measuring the uncertainty by utilizing the variance of predictions over a pool of models. In this paper, we propose a novel approach for uncertainty quantification via ensembles, called Random Activation Functions (RAFs) Ensemble, that aims at improving the ensemble diversity toward a more robust estimation, by accommodating each neural network with a different (random) activation function. Extensive empirical study demonstrates that RAFs Ensemble outperforms state-of-the-art ensemble uncertainty quantification methods on both synthetic and real-world datasets in a series of regression tasks.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Stoyanova, Yana and Ghandi, Soroush and Tavakol, Maryam}, year={2023}, month={Jun.}, pages={15152-15160} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26768/26540", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26768", + "pdf_size": 12968459, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16870636470621264719&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "student.tue.nl;tue.nl;tue.nl", + "email": "student.tue.nl;tue.nl;tue.nl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Eindhoven University of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tue.nl", + "aff_unique_abbr": "TU/e", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Eindhoven", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "The Netherlands" + }, + { + "id": "article-25840", + "title": "Toward a Perspectivist Turn in Ground Truthing for Predictive Computing", + "track": "main", + "status": "Technical", + "abstract": "Most current Artificial Intelligence applications are based on supervised Machine Learning (ML), which ultimately grounds on data annotated by small teams of experts or large ensemble of volunteers. The annotation process is often performed in terms of a majority vote, however this has been proved to be often problematic by recent evaluation studies.\nIn this article, we describe and advocate for a different paradigm, which we call perspectivism: this counters the removal of disagreement and, consequently, the assumption of correctness of traditionally aggregated gold-standard datasets, and proposes the adoption of methods that preserve divergence of opinions and integrate multiple perspectives in the ground truthing process of ML development. Drawing on previous works which inspired it, mainly from the crowdsourcing and multi-rater labeling settings, we survey the state-of-the-art and describe the potential of our proposal for not only the more subjective tasks (e.g. those related to human language) but also those tasks commonly understood as objective (e.g. medical decision making). We present the main benefits of adopting a perspectivist stance in ML, as well as possible disadvantages, and various ways in which such a stance can be implemented in practice. Finally, we share a set of recommendations and outline a research agenda to advance the perspectivist stance in ML.", + "primary_area": "machine learning i", + "author": "Federico Cabitza; Andrea Campagner; Valerio Basile", + "authorids": "", + "aff": "Department of Informatics, Systems and Communication, University of Milano-Bicocca; IRCCS Istituto Ortopedico Galeazzi; University of Turin", + "bibtex": "@article{Cabitza_Campagner_Basile_2023, title={Toward a Perspectivist Turn in Ground Truthing for Predictive Computing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25840}, DOI={10.1609/aaai.v37i6.25840}, abstractNote={Most current Artificial Intelligence applications are based on supervised Machine Learning (ML), which ultimately grounds on data annotated by small teams of experts or large ensemble of volunteers. The annotation process is often performed in terms of a majority vote, however this has been proved to be often problematic by recent evaluation studies.\nIn this article, we describe and advocate for a different paradigm, which we call perspectivism: this counters the removal of disagreement and, consequently, the assumption of correctness of traditionally aggregated gold-standard datasets, and proposes the adoption of methods that preserve divergence of opinions and integrate multiple perspectives in the ground truthing process of ML development. Drawing on previous works which inspired it, mainly from the crowdsourcing and multi-rater labeling settings, we survey the state-of-the-art and describe the potential of our proposal for not only the more subjective tasks (e.g. those related to human language) but also those tasks commonly understood as objective (e.g. medical decision making). We present the main benefits of adopting a perspectivist stance in ML, as well as possible disadvantages, and various ways in which such a stance can be implemented in practice. Finally, we share a set of recommendations and outline a research agenda to advance the perspectivist stance in ML.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cabitza, Federico and Campagner, Andrea and Basile, Valerio}, year={2023}, month={Jun.}, pages={6860-6868} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25840/25612", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25840", + "pdf_size": 227365, + "gs_citation": 172, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12339548697353184632&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "unimib.it;campus.unimib.it;unito.it", + "email": "unimib.it;campus.unimib.it;unito.it", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Milano-Bicocca;IRCCS Istituto Ortopedico Galeazzi;University of Turin", + "aff_unique_dep": "Department of Informatics, Systems and Communication;;", + "aff_unique_url": "https://www.unimib.it;https://www.galeazzi.org;https://www.unito.it", + "aff_unique_abbr": "UniMiB;;UNITO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-26418", + "title": "Towards Automated Modeling Assistance: An Efficient Approach for Repairing Flawed Planning Domains", + "track": "main", + "status": "Technical", + "abstract": "Designing a planning domain is a difficult task in AI planning. Assisting tools are thus required if we want planning to be used more broadly. In this paper, we are interested in automatically correcting a flawed domain. In particular, we are concerned with the scenario where a domain contradicts a plan that is known to be valid. Our goal is to repair the domain so as to turn the plan into a solution. Specifically, we consider both grounded and lifted representations support for negative preconditions and show how to explore the space of repairs to find the optimal one efficiently. As an evidence of the efficiency of our approach, the experiment results show that all flawed domains except one in the benchmark set can be repaired optimally by our approach within one second.", + "primary_area": "planning routing and scheduling", + "author": "Songtuan Lin; Alban Grastien; Pascal Bercher", + "authorids": "", + "aff": "School of Computing, The Australian National University; School of Computing, The Australian National University; School of Computing, The Australian National University", + "bibtex": "@article{Lin_Grastien_Bercher_2023, title={Towards Automated Modeling Assistance: An Efficient Approach for Repairing Flawed Planning Domains}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26418}, DOI={10.1609/aaai.v37i10.26418}, abstractNote={Designing a planning domain is a difficult task in AI planning. Assisting tools are thus required if we want planning to be used more broadly. In this paper, we are interested in automatically correcting a flawed domain. In particular, we are concerned with the scenario where a domain contradicts a plan that is known to be valid. Our goal is to repair the domain so as to turn the plan into a solution. Specifically, we consider both grounded and lifted representations support for negative preconditions and show how to explore the space of repairs to find the optimal one efficiently. As an evidence of the efficiency of our approach, the experiment results show that all flawed domains except one in the benchmark set can be repaired optimally by our approach within one second.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Songtuan and Grastien, Alban and Bercher, Pascal}, year={2023}, month={Jun.}, pages={12022-12031} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26418/26190", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26418", + "pdf_size": 902492, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4595008814132939258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "anu.edu.au;anu.edu.au;anu.edu.au", + "email": "anu.edu.au;anu.edu.au;anu.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The Australian National University", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.anu.edu.au", + "aff_unique_abbr": "ANU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-25954", + "title": "Towards Better Visualizing the Decision Basis of Networks via Unfold and Conquer Attribution Guidance", + "track": "main", + "status": "Technical", + "abstract": "Revealing the transparency of Deep Neural Networks (DNNs) has been widely studied to describe the decision mechanisms of network inner structures. In this paper, we propose a novel post-hoc framework, Unfold and Conquer Attribution Guidance (UCAG), which enhances the explainability of the network decision by spatially scrutinizing the input features with respect to the model confidence. Addressing the phenomenon of missing detailed descriptions, UCAG sequentially complies with the confidence of slices of the image, leading to providing an abundant and clear interpretation. Therefore, it is possible to enhance the representation ability of explanation by preserving the detailed descriptions of assistant input features, which are commonly overwhelmed by the main meaningful regions. We conduct numerous evaluations to validate the performance in several metrics: i) deletion and insertion, ii) (energy-based) pointing games, and iii) positive and negative density maps. Experimental results, including qualitative comparisons, demonstrate that our method outperforms the existing methods with the nature of clear and detailed explanations and applicability.", + "primary_area": "machine learning ii", + "author": "Jung-Ho Hong; Woo-Jeoung Nam; Kyu-Sung Jeon; Seong-Whan Lee", + "authorids": "", + "aff": "Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; School of Computer Science and Engineering, Kyungpook National University, Daegu, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea", + "bibtex": "@article{Hong_Nam_Jeon_Lee_2023, title={Towards Better Visualizing the Decision Basis of Networks via Unfold and Conquer Attribution Guidance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25954}, DOI={10.1609/aaai.v37i7.25954}, abstractNote={Revealing the transparency of Deep Neural Networks (DNNs) has been widely studied to describe the decision mechanisms of network inner structures. In this paper, we propose a novel post-hoc framework, Unfold and Conquer Attribution Guidance (UCAG), which enhances the explainability of the network decision by spatially scrutinizing the input features with respect to the model confidence. Addressing the phenomenon of missing detailed descriptions, UCAG sequentially complies with the confidence of slices of the image, leading to providing an abundant and clear interpretation. Therefore, it is possible to enhance the representation ability of explanation by preserving the detailed descriptions of assistant input features, which are commonly overwhelmed by the main meaningful regions. We conduct numerous evaluations to validate the performance in several metrics: i) deletion and insertion, ii) (energy-based) pointing games, and iii) positive and negative density maps. Experimental results, including qualitative comparisons, demonstrate that our method outperforms the existing methods with the nature of clear and detailed explanations and applicability.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hong, Jung-Ho and Nam, Woo-Jeoung and Jeon, Kyu-Sung and Lee, Seong-Whan}, year={2023}, month={Jun.}, pages={7884-7892} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25954/25726", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25954", + "pdf_size": 1843720, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15924932639415463289&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 9, + "aff_domain": "korea.ac.kr;knu.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;knu.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Korea University;Kyungpook National University", + "aff_unique_dep": "Department of Artificial Intelligence;School of Computer Science and Engineering", + "aff_unique_url": "http://www.korea.ac.kr;http://www.knu.ac.kr", + "aff_unique_abbr": "KU;KNU", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Seoul;Daegu", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26581", + "title": "Towards Complex Scenarios: Building End-to-End Task-Oriented Dialogue System across Multiple Knowledge Bases", + "track": "main", + "status": "Technical", + "abstract": "With the success of the sequence-to-sequence model, end-to-end task-oriented dialogue systems (EToDs) have obtained remarkable progress. However, most existing EToDs are limited to single KB settings where dialogues can be supported by a single KB, which is still far from satisfying the requirements of some complex applications (multi-KBs setting). In this work, we first empirically show that the existing single-KB EToDs fail to work on multi-KB settings that require models to reason across various KBs. To solve this issue, we take the first step to consider the multi-KBs scenario in EToDs and introduce a KB-over-KB Heterogeneous Graph Attention Network (KoK-HAN) to facilitate model to reason over multiple KBs. The core module is a triple-connection graph interaction layer that can model different granularity levels of interaction information across different KBs (i.e., intra-KB connection, inter-KB connection and dialogue-KB connection). Experimental results confirm the superiority of our model for multiple KBs reasoning.", + "primary_area": "speech natural language processing", + "author": "Libo Qin; Zhouyang Li; Qiying Yu; Lehan Wang; Wanxiang Che", + "authorids": "", + "aff": "School of Computer Science and Engineering, Central South University, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China", + "bibtex": "@article{Qin_Li_Yu_Wang_Che_2023, title={Towards Complex Scenarios: Building End-to-End Task-Oriented Dialogue System across Multiple Knowledge Bases}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26581}, DOI={10.1609/aaai.v37i11.26581}, abstractNote={With the success of the sequence-to-sequence model, end-to-end task-oriented dialogue systems (EToDs) have obtained remarkable progress. However, most existing EToDs are limited to single KB settings where dialogues can be supported by a single KB, which is still far from satisfying the requirements of some complex applications (multi-KBs setting). In this work, we first empirically show that the existing single-KB EToDs fail to work on multi-KB settings that require models to reason across various KBs. To solve this issue, we take the first step to consider the multi-KBs scenario in EToDs and introduce a KB-over-KB Heterogeneous Graph Attention Network (KoK-HAN) to facilitate model to reason over multiple KBs. The core module is a triple-connection graph interaction layer that can model different granularity levels of interaction information across different KBs (i.e., intra-KB connection, inter-KB connection and dialogue-KB connection). Experimental results confirm the superiority of our model for multiple KBs reasoning.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Libo and Li, Zhouyang and Yu, Qiying and Wang, Lehan and Che, Wanxiang}, year={2023}, month={Jun.}, pages={13483-13491} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26581/26353", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26581", + "pdf_size": 729229, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13183870810722973180&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "csu.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;stu.hit.edu.cn", + "email": "csu.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;stu.hit.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Central South University;Harbin Institute of Technology", + "aff_unique_dep": "School of Computer Science and Engineering;Research Center for Social Computing and Information Retrieval", + "aff_unique_url": "http://www.csu.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "CSU;HIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26557", + "title": "Towards Credible Human Evaluation of Open-Domain Dialog Systems Using Interactive Setup", + "track": "main", + "status": "Technical", + "abstract": "Evaluating open-domain conversation models has been an open challenge due to the open-ended nature of conversations. In addition to static evaluations, recent work has started to explore a variety of per-turn and per-dialog interactive evaluation mechanisms and provide advice on the best setup. In this work, we adopt the interactive evaluation framework and further apply to multiple models with a focus on per-turn evaluation techniques. Apart from the widely used setting where participants select the best response among different candidates at each turn, one more novel per-turn evaluation setting is adopted, where participants can select all appropriate responses with different fallback strategies to continue the conversation when no response is selected. We evaluate these settings based on sensitivity and consistency using four GPT2-based models that differ in model sizes or fine-tuning data. To better generalize to any model groups with no prior assumptions on their rankings and control evaluation costs for all setups, we also propose a methodology to estimate the required sample size given a minimum performance gap of interest before running most experiments. Our comprehensive human evaluation results shed light on how to conduct credible human evaluations of open domain dialog systems using the interactive setup, and suggest additional future directions.", + "primary_area": "speech natural language processing", + "author": "Sijia Liu; Patrick Lange; Behnam Hedayatnia; Alexandros Papangelis; Di Jin; Andrew Wirth; Yang Liu; Dilek Hakkani-Tur", + "authorids": "", + "aff": "Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "bibtex": "@article{Liu_Lange_Hedayatnia_Papangelis_Jin_Wirth_Liu_Hakkani-Tur_2023, title={Towards Credible Human Evaluation of Open-Domain Dialog Systems Using Interactive Setup}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26557}, DOI={10.1609/aaai.v37i11.26557}, abstractNote={Evaluating open-domain conversation models has been an open challenge due to the open-ended nature of conversations. In addition to static evaluations, recent work has started to explore a variety of per-turn and per-dialog interactive evaluation mechanisms and provide advice on the best setup. In this work, we adopt the interactive evaluation framework and further apply to multiple models with a focus on per-turn evaluation techniques. Apart from the widely used setting where participants select the best response among different candidates at each turn, one more novel per-turn evaluation setting is adopted, where participants can select all appropriate responses with different fallback strategies to continue the conversation when no response is selected. We evaluate these settings based on sensitivity and consistency using four GPT2-based models that differ in model sizes or fine-tuning data. To better generalize to any model groups with no prior assumptions on their rankings and control evaluation costs for all setups, we also propose a methodology to estimate the required sample size given a minimum performance gap of interest before running most experiments. Our comprehensive human evaluation results shed light on how to conduct credible human evaluations of open domain dialog systems using the interactive setup, and suggest additional future directions.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Sijia and Lange, Patrick and Hedayatnia, Behnam and Papangelis, Alexandros and Jin, Di and Wirth, Andrew and Liu, Yang and Hakkani-Tur, Dilek}, year={2023}, month={Jun.}, pages={13264-13272} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26557/26329", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26557", + "pdf_size": 188004, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=201401977917484031&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "https://www.amazon.science/alexa-prize/socialbot-grand-challenge", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Alexa AI", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25926", + "title": "Towards Decision-Friendly AUC: Learning Multi-Classifier with AUC\u00b5", + "track": "main", + "status": "Technical", + "abstract": "Area Under the ROC Curve (AUC) is a widely used ranking metric in imbalanced learning due to its insensitivity to label distributions. As a well-known multiclass extension of AUC, Multiclass AUC (MAUC, a.k.a. M-metric) measures the average AUC of multiple binary classifiers. In this paper, we argue that simply optimizing MAUC is far from enough for imbalanced multi-classification. More precisely, MAUC only focuses on learning scoring functions via ranking optimization, while leaving the decision process unconsidered. Therefore, scoring functions being able to make good decisions might suffer from low performance in terms of MAUC. To overcome this issue, we turn to explore AUC\u00b5, another multiclass variant of AUC, which further takes the decision process into consideration. Motivated by this fact, we propose a surrogate risk optimization framework to improve model performance from the perspective of AUC\u00b5. Practically, we propose a two-stage training framework for multi-classification, where at the first stage a scoring function is learned maximizing AUC\u00b5, and at the second stage we seek for a decision function to improve the F1-metric via our proposed soft F1. Theoretically, we first provide sufficient conditions that optimizing the surrogate losses could lead to the Bayes optimal scoring function. Afterward, we show that the proposed surrogate risk enjoys a generalization bound in order of O(1/\u221aN). Experimental results on four benchmark datasets demonstrate the effectiveness of our proposed method in both AUC\u00b5 and F1-metric.", + "primary_area": "machine learning i", + "author": "Peifeng Gao; Qianqian Xu; Peisong Wen; Huiyang Shao; Yuan He; Qingming Huang", + "authorids": "", + "aff": "School of Computer Science and Tech., University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences; Beijing Advanced Innovation Center for Big Data and Brain Computing, Beihang University, Beijing 100191, China; Alibaba Group; BDKM, University of Chinese Academy of Sciences; Peng Cheng Laboratory", + "bibtex": "@article{Gao_Xu_Wen_Shao_He_Huang_2023, title={Towards Decision-Friendly AUC: Learning Multi-Classifier with AUC\u00b5}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25926}, DOI={10.1609/aaai.v37i6.25926}, abstractNote={Area Under the ROC Curve (AUC) is a widely used ranking metric in imbalanced learning due to its insensitivity to label distributions. As a well-known multiclass extension of AUC, Multiclass AUC (MAUC, a.k.a. M-metric) measures the average AUC of multiple binary classifiers. In this paper, we argue that simply optimizing MAUC is far from enough for imbalanced multi-classification. More precisely, MAUC only focuses on learning scoring functions via ranking optimization, while leaving the decision process unconsidered. Therefore, scoring functions being able to make good decisions might suffer from low performance in terms of MAUC. To overcome this issue, we turn to explore AUC\u00b5, another multiclass variant of AUC, which further takes the decision process into consideration. Motivated by this fact, we propose a surrogate risk optimization framework to improve model performance from the perspective of AUC\u00b5. Practically, we propose a two-stage training framework for multi-classification, where at the first stage a scoring function is learned maximizing AUC\u00b5, and at the second stage we seek for a decision function to improve the F1-metric via our proposed soft F1. Theoretically, we first provide sufficient conditions that optimizing the surrogate losses could lead to the Bayes optimal scoring function. Afterward, we show that the proposed surrogate risk enjoys a generalization bound in order of O(1/\u221aN). Experimental results on four benchmark datasets demonstrate the effectiveness of our proposed method in both AUC\u00b5 and F1-metric.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Peifeng and Xu, Qianqian and Wen, Peisong and Shao, Huiyang and He, Yuan and Huang, Qingming}, year={2023}, month={Jun.}, pages={7633-7641} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25926/25698", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25926", + "pdf_size": 1359634, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:IjLfOrPVCf4J:scholar.google.com/&scioq=Towards+Decision-Friendly+AUC:+Learning+Multi-Classifier+with+AUC%C2%B5&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "mails.ucas.ac.cn;ict.ac.cn;ict.ac.cn;mails.ucas.ac.cn;alibaba-inc.com;ucas.ac.cn", + "email": "mails.ucas.ac.cn;ict.ac.cn;ict.ac.cn;mails.ucas.ac.cn;alibaba-inc.com;ucas.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;0;4", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;Beihang University;Alibaba Group;Peng Cheng Laboratory", + "aff_unique_dep": "School of Computer Science and Tech.;Institute of Computing Technology;Beijing Advanced Innovation Center for Big Data and Brain Computing;;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.cas.cn;http://www.buaa.edu.cn;https://www.alibaba.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "UCAS;CAS;Beihang;Alibaba;PCL", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26951", + "title": "Towards Deployment-Efficient and Collision-Free Multi-Agent Path Finding (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Multi-agent pathfinding (MAPF) is essential to large-scale robotic coordination tasks. Planning-based algorithms show their advantages in collision avoidance while avoiding exponential growth in the number of agents. Reinforcement-learning (RL)-based algorithms can be deployed efficiently but cannot prevent collisions entirely due to the lack of hard constraints. This paper combines the merits of planning-based and RL-based MAPF methods to propose a deployment-efficient and collision-free MAPF algorithm. The experiments show the effectiveness of our approach.", + "primary_area": "", + "author": "Feng Chen; Chenghe Wang; Fuxiang Zhang; Hao Ding; Qiaoyong Zhong; Shiliang Pu; Zongzhang Zhang", + "authorids": "", + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University; Hikvision Research Institute; Hikvision Research Institute; National Key Laboratory for Novel Software Technology, Nanjing University", + "bibtex": "@article{Chen_Wang_Zhang_Ding_Zhong_Pu_Zhang_2024, title={Towards Deployment-Efficient and Collision-Free Multi-Agent Path Finding (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26951}, DOI={10.1609/aaai.v37i13.26951}, abstractNote={Multi-agent pathfinding (MAPF) is essential to large-scale robotic coordination tasks. Planning-based algorithms show their advantages in collision avoidance while avoiding exponential growth in the number of agents. Reinforcement-learning (RL)-based algorithms can be deployed efficiently but cannot prevent collisions entirely due to the lack of hard constraints. This paper combines the merits of planning-based and RL-based MAPF methods to propose a deployment-efficient and collision-free MAPF algorithm. The experiments show the effectiveness of our approach.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Feng and Wang, Chenghe and Zhang, Fuxiang and Ding, Hao and Zhong, Qiaoyong and Pu, Shiliang and Zhang, Zongzhang}, year={2024}, month={Jul.}, pages={16182-16183} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26951/26723", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26951", + "pdf_size": 230267, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:lwOA7H3XtUIJ:scholar.google.com/&scioq=Towards+Deployment-Efficient+and+Collision-Free+Multi-Agent+Path+Finding+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 2, + "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;hikvision.com;hikvision.com;nju.edu.cn", + "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn;hikvision.com;hikvision.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;1;0", + "aff_unique_norm": "Nanjing University;Hikvision Research Institute", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.hikvision.com/cn/", + "aff_unique_abbr": "Nanjing University;Hikvision", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26594", + "title": "Towards Diverse, Relevant and Coherent Open-Domain Dialogue Generation via Hybrid Latent Variables", + "track": "main", + "status": "Technical", + "abstract": "Conditional variational models, using either continuous or discrete latent variables, are powerful for open-domain dialogue response generation. However, previous works show that continuous latent variables tend to reduce the coherence of generated responses. In this paper, we also found that discrete latent variables have difficulty capturing more diverse expressions. To tackle these problems, we combine the merits of both continuous and discrete latent variables and propose a Hybrid Latent Variable (HLV) method. Specifically, HLV constrains the global semantics of responses through discrete latent variables and enriches responses with continuous latent variables. Thus, we diversify the generated responses while maintaining relevance and coherence. In addition, we propose Conditional Hybrid Variational Transformer (CHVT) to construct and to utilize HLV with transformers for dialogue generation. Through fine-grained symbolic-level semantic information and additive Gaussian mixing, we construct the distribution of continuous variables, prompting the generation of diverse expressions. Meanwhile, to maintain the relevance and coherence, the discrete latent variable is optimized by self-separation training. Experimental results on two dialogue generation datasets (DailyDialog and Opensubtitles) show that CHVT is superior to traditional transformer-based variational mechanism w.r.t. diversity, relevance and coherence metrics. Moreover, we also demonstrate the benefit of applying HLV to fine-tuning two pre-trained dialogue models (PLATO and BART-base).", + "primary_area": "speech natural language processing", + "author": "Bin Sun; Yitong Li; Fei Mi; Weichao Wang; Yiwei Li; Kan Li", + "authorids": "", + "aff": "School of Computer Science & Technology, Beijing Institute of Technology; Huawei Noah\u2019s Ark Lab + Huawei Technologies Ltd.; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; School of Computer Science & Technology, Beijing Institute of Technology; School of Computer Science & Technology, Beijing Institute of Technology", + "bibtex": "@article{Sun_Li_Mi_Wang_Li_Li_2023, title={Towards Diverse, Relevant and Coherent Open-Domain Dialogue Generation via Hybrid Latent Variables}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26594}, DOI={10.1609/aaai.v37i11.26594}, abstractNote={Conditional variational models, using either continuous or discrete latent variables, are powerful for open-domain dialogue response generation. However, previous works show that continuous latent variables tend to reduce the coherence of generated responses. In this paper, we also found that discrete latent variables have difficulty capturing more diverse expressions. To tackle these problems, we combine the merits of both continuous and discrete latent variables and propose a Hybrid Latent Variable (HLV) method. Specifically, HLV constrains the global semantics of responses through discrete latent variables and enriches responses with continuous latent variables. Thus, we diversify the generated responses while maintaining relevance and coherence. In addition, we propose Conditional Hybrid Variational Transformer (CHVT) to construct and to utilize HLV with transformers for dialogue generation. Through fine-grained symbolic-level semantic information and additive Gaussian mixing, we construct the distribution of continuous variables, prompting the generation of diverse expressions. Meanwhile, to maintain the relevance and coherence, the discrete latent variable is optimized by self-separation training. Experimental results on two dialogue generation datasets (DailyDialog and Opensubtitles) show that CHVT is superior to traditional transformer-based variational mechanism w.r.t. diversity, relevance and coherence metrics. Moreover, we also demonstrate the benefit of applying HLV to fine-tuning two pre-trained dialogue models (PLATO and BART-base).}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sun, Bin and Li, Yitong and Mi, Fei and Wang, Weichao and Li, Yiwei and Li, Kan}, year={2023}, month={Jun.}, pages={13600-13608} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26594/26366", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26594", + "pdf_size": 331149, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7563544841314487939&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "bit.edu.cn;huawei.com;huawei.com;huawei.com;bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;huawei.com;huawei.com;huawei.com;bit.edu.cn;bit.edu.cn", + "github": "", + "project": "https://arxiv.org/abs/2212.01145", + "author_num": 6, + "aff_unique_index": "0;1+2;1;1;0;0", + "aff_unique_norm": "Beijing Institute of Technology;Huawei;Huawei Technologies", + "aff_unique_dep": "School of Computer Science & Technology;Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "BIT;Huawei;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25828", + "title": "Towards Efficient and Domain-Agnostic Evasion Attack with High-Dimensional Categorical Inputs", + "track": "main", + "status": "Technical", + "abstract": "Our work targets at searching feasible adversarial perturbation to attack a classifier with high-dimensional categorical inputs in a domain-agnostic setting.\nThis is intrinsically a NP-hard knapsack problem where the exploration space becomes explosively larger as the feature dimension increases. Without the help of domain knowledge, solving this problem via heuristic method, such as Branch-and-Bound, suffers from exponential complexity, yet can bring arbitrarily bad attack results. We address the challenge via the lens of multi-armed bandit based combinatorial search. Our proposed method, namely FEAT, treats modifying each categorical feature as pulling an arm in multi-armed bandit programming. Our objective is to achieve highly efficient and effective attack using an Orthogonal Matching Pursuit (OMP)-enhanced Upper Confidence Bound (UCB) exploration strategy. Our theoretical analysis bounding the regret gap of FEAT guarantees its practical attack performance. In empirical analysis, we compare FEAT with other state-of-the-art domain-agnostic attack methods over various real-world categorical data sets of different applications. Substantial experimental observations confirm the expected efficiency and attack effectiveness of FEAT applied in different application scenarios. Our work further hints the applicability of FEAT for assessing the adversarial vulnerability of classification systems with high-dimensional categorical inputs.", + "primary_area": "machine learning i", + "author": "Hongyan Bao; Yufei Han; Yujun Zhou; Xin Gao; Xiangliang Zhang", + "authorids": "", + "aff": "King Abdullah University of Science and Technology; INRIA; King Abdullah University of Science and Technology; King Abdullah University of Science and Technology; University of Notre Dame + King Abdullah University of Science and Technology", + "bibtex": "@article{Bao_Han_Zhou_Gao_Zhang_2023, title={Towards Efficient and Domain-Agnostic Evasion Attack with High-Dimensional Categorical Inputs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25828}, DOI={10.1609/aaai.v37i6.25828}, abstractNote={Our work targets at searching feasible adversarial perturbation to attack a classifier with high-dimensional categorical inputs in a domain-agnostic setting.\nThis is intrinsically a NP-hard knapsack problem where the exploration space becomes explosively larger as the feature dimension increases. Without the help of domain knowledge, solving this problem via heuristic method, such as Branch-and-Bound, suffers from exponential complexity, yet can bring arbitrarily bad attack results. We address the challenge via the lens of multi-armed bandit based combinatorial search. Our proposed method, namely FEAT, treats modifying each categorical feature as pulling an arm in multi-armed bandit programming. Our objective is to achieve highly efficient and effective attack using an Orthogonal Matching Pursuit (OMP)-enhanced Upper Confidence Bound (UCB) exploration strategy. Our theoretical analysis bounding the regret gap of FEAT guarantees its practical attack performance. In empirical analysis, we compare FEAT with other state-of-the-art domain-agnostic attack methods over various real-world categorical data sets of different applications. Substantial experimental observations confirm the expected efficiency and attack effectiveness of FEAT applied in different application scenarios. Our work further hints the applicability of FEAT for assessing the adversarial vulnerability of classification systems with high-dimensional categorical inputs.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bao, Hongyan and Han, Yufei and Zhou, Yujun and Gao, Xin and Zhang, Xiangliang}, year={2023}, month={Jun.}, pages={6753-6761} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25828/25600", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25828", + "pdf_size": 179522, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4032323066894525033&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "kaust.edu.sa;gmail.com;kaust.edu.sa;kaust.edu.sa;nd.edu", + "email": "kaust.edu.sa;gmail.com;kaust.edu.sa;kaust.edu.sa;nd.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;2+0", + "aff_unique_norm": "King Abdullah University of Science and Technology;INRIA;University of Notre Dame", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.kast.kau.edu.sa;https://www.inria.fr;https://www.nd.edu", + "aff_unique_abbr": "KAUST;INRIA;Notre Dame", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;2+0", + "aff_country_unique": "Saudi Arabia;France;United States" + }, + { + "id": "article-26967", + "title": "Towards Fair and Selectively Privacy-Preserving Models Using Negative Multi-Task Learning (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Deep learning models have shown great performances in natural language processing tasks. While much attention has been paid to improvements in utility, privacy leakage and social bias are two major concerns arising in trained models. In order to tackle these problems, we protect individuals' sensitive information and mitigate gender bias simultaneously. First, we propose a selective privacy-preserving method that only obscures individuals' sensitive information. Then we propose a negative multi-task learning framework to mitigate the gender bias which contains a main task and a gender prediction task. We analyze two existing word embeddings and evaluate them on sentiment analysis and a medical text classification task. Our experimental results show that our negative multi-task learning framework can mitigate the gender bias while keeping models\u2019 utility.", + "primary_area": "", + "author": "Liyuan Gao; Huixin Zhan; Austin Chen; Victor S. Sheng", + "authorids": "", + "aff": "Texas Tech University; Texas Tech University; Lubbock High School; Texas Tech University", + "bibtex": "@article{Gao_Zhan_Chen_Sheng_2024, title={Towards Fair and Selectively Privacy-Preserving Models Using Negative Multi-Task Learning (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26967}, DOI={10.1609/aaai.v37i13.26967}, abstractNote={Deep learning models have shown great performances in natural language processing tasks. While much attention has been paid to improvements in utility, privacy leakage and social bias are two major concerns arising in trained models. In order to tackle these problems, we protect individuals\u2019 sensitive information and mitigate gender bias simultaneously. First, we propose a selective privacy-preserving method that only obscures individuals\u2019 sensitive information. Then we propose a negative multi-task learning framework to mitigate the gender bias which contains a main task and a gender prediction task. We analyze two existing word embeddings and evaluate them on sentiment analysis and a medical text classification task. Our experimental results show that our negative multi-task learning framework can mitigate the gender bias while keeping models\u2019 utility.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Liyuan and Zhan, Huixin and Chen, Austin and Sheng, Victor S.}, year={2024}, month={Jul.}, pages={16214-16215} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26967/26739", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26967", + "pdf_size": 72045, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1237501883462105791&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "ttu.edu; ; ; ", + "email": "ttu.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Texas Tech University;Lubbock High School", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ttu.edu;", + "aff_unique_abbr": "TTU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26040", + "title": "Towards Fine-Grained Explainability for Heterogeneous Graph Neural Network", + "track": "main", + "status": "Technical", + "abstract": "Heterogeneous graph neural networks (HGNs) are prominent approaches to node classification tasks on heterogeneous graphs. Despite the superior performance, insights about the predictions made from HGNs are obscure to humans. Existing explainability techniques are mainly proposed for GNNs on homogeneous graphs. They focus on highlighting salient graph objects to the predictions whereas the problem of how these objects affect the predictions remains unsolved. Given heterogeneous graphs with complex structures and rich semantics, it is imperative that salient objects can be accompanied with their influence paths to the predictions, unveiling the reasoning process of HGNs. In this paper, we develop xPath, a new framework that provides fine-grained explanations for black-box HGNs specifying a cause node with its influence path to the target node. In xPath, we differentiate the influence of a node on the prediction w.r.t. every individual influence path, and measure the influence by perturbing graph structure via a novel graph rewiring algorithm. Furthermore, we introduce a greedy search algorithm to find the most influential fine-grained explanations efficiently. Empirical results on various HGNs and heterogeneous graphs show that xPath yields faithful explanations efficiently, outperforming the adaptations of advanced GNN explanation approaches.", + "primary_area": "machine learning ii", + "author": "Tong Li; Jiale Deng; Yanyan Shen; Luyu Qiu; Huang Yongxiang; Caleb Chen Cao", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Huawei Research Hong Kong; Huawei Research Hong Kong; Huawei Research Hong Kong", + "bibtex": "@article{Li_Deng_Shen_Qiu_Yongxiang_Cao_2023, title={Towards Fine-Grained Explainability for Heterogeneous Graph Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26040}, DOI={10.1609/aaai.v37i7.26040}, abstractNote={Heterogeneous graph neural networks (HGNs) are prominent approaches to node classification tasks on heterogeneous graphs. Despite the superior performance, insights about the predictions made from HGNs are obscure to humans. Existing explainability techniques are mainly proposed for GNNs on homogeneous graphs. They focus on highlighting salient graph objects to the predictions whereas the problem of how these objects affect the predictions remains unsolved. Given heterogeneous graphs with complex structures and rich semantics, it is imperative that salient objects can be accompanied with their influence paths to the predictions, unveiling the reasoning process of HGNs. In this paper, we develop xPath, a new framework that provides fine-grained explanations for black-box HGNs specifying a cause node with its influence path to the target node. In xPath, we differentiate the influence of a node on the prediction w.r.t. every individual influence path, and measure the influence by perturbing graph structure via a novel graph rewiring algorithm. Furthermore, we introduce a greedy search algorithm to find the most influential fine-grained explanations efficiently. Empirical results on various HGNs and heterogeneous graphs show that xPath yields faithful explanations efficiently, outperforming the adaptations of advanced GNN explanation approaches.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Tong and Deng, Jiale and Shen, Yanyan and Qiu, Luyu and Yongxiang, Huang and Cao, Caleb Chen}, year={2023}, month={Jun.}, pages={8640-8647} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26040/25812", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26040", + "pdf_size": 359100, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14231443444711606594&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;huawei.com;huawei.com;huawei.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;huawei.com;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Huawei Research", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.huawei.com/research", + "aff_unique_abbr": "SJTU;Huawei", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Hong Kong", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25426", + "title": "Towards Global Video Scene Segmentation with Context-Aware Transformer", + "track": "main", + "status": "Technical", + "abstract": "Videos such as movies or TV episodes usually need to divide the long storyline into cohesive units, i.e., scenes, to facilitate the understanding of video semantics. The key challenge lies in finding the boundaries of scenes by comprehensively considering the complex temporal structure and semantic information. To this end, we introduce a novel Context-Aware Transformer (CAT) with a self-supervised learning framework to learn high-quality shot representations, for generating well-bounded scenes. More specifically, we design the CAT with local-global self-attentions, which can effectively consider both the long-term and short-term context to improve the shot encoding. For training the CAT, we adopt the self-supervised learning schema. Firstly, we leverage shot-to-scene level pretext tasks to facilitate the pre-training with pseudo boundary, which guides CAT to learn the discriminative shot representations that maximize intra-scene similarity and inter-scene discrimination in an unsupervised manner. Then, we transfer contextual representations for fine-tuning the CAT with supervised data, which encourages CAT to accurately detect the boundary for scene segmentation. As a result, CAT is able to learn the context-aware shot representations and provides global guidance for scene segmentation. Our empirical analyses show that CAT can achieve state-of-the-art performance when conducting the scene segmentation task on the MovieNet dataset, e.g., offering 2.15 improvements on AP.", + "primary_area": "computer vision iii", + "author": "Yang Yang; Yurui Huang; Weili Guo; Baohua Xu; Dingyin Xia", + "authorids": "", + "aff": "Nanjing University of Science and Technology + MIIT Key Lab. of Pattern Analysis and Machine Intelligence, NUAA + State Key Lab. for Novel Software Technology, NJU; Nanjing University of Science and Technology + HUAWEI CBG Edu AI Lab; Nanjing University of Science and Technology; HUAWEI CBG Edu AI Lab; HUAWEI CBG Edu AI Lab", + "bibtex": "@article{Yang_Huang_Guo_Xu_Xia_2023, title={Towards Global Video Scene Segmentation with Context-Aware Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25426}, DOI={10.1609/aaai.v37i3.25426}, abstractNote={Videos such as movies or TV episodes usually need to divide the long storyline into cohesive units, i.e., scenes, to facilitate the understanding of video semantics. The key challenge lies in finding the boundaries of scenes by comprehensively considering the complex temporal structure and semantic information. To this end, we introduce a novel Context-Aware Transformer (CAT) with a self-supervised learning framework to learn high-quality shot representations, for generating well-bounded scenes. More specifically, we design the CAT with local-global self-attentions, which can effectively consider both the long-term and short-term context to improve the shot encoding. For training the CAT, we adopt the self-supervised learning schema. Firstly, we leverage shot-to-scene level pretext tasks to facilitate the pre-training with pseudo boundary, which guides CAT to learn the discriminative shot representations that maximize intra-scene similarity and inter-scene discrimination in an unsupervised manner. Then, we transfer contextual representations for fine-tuning the CAT with supervised data, which encourages CAT to accurately detect the boundary for scene segmentation. As a result, CAT is able to learn the context-aware shot representations and provides global guidance for scene segmentation. Our empirical analyses show that CAT can achieve state-of-the-art performance when conducting the scene segmentation task on the MovieNet dataset, e.g., offering 2.15 improvements on AP.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Yang and Huang, Yurui and Guo, Weili and Xu, Baohua and Xia, Dingyin}, year={2023}, month={Jun.}, pages={3206-3213} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25426/25198", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25426", + "pdf_size": 453274, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10637743320808220559&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "njust.edu.cn;njust.edu.cn;njust.edu.cn;huawei.com;huawei.com", + "email": "njust.edu.cn;njust.edu.cn;njust.edu.cn;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;0+3;0;3;3", + "aff_unique_norm": "Nanjing University of Science and Technology;Nanjing University of Aeronautics and Astronautics;Nanjing University;HUAWEI", + "aff_unique_dep": ";Key Lab. of Pattern Analysis and Machine Intelligence;State Key Lab. for Novel Software Technology;Edu AI Lab", + "aff_unique_url": "http://www.nust.edu.cn/;http://www.nuaa.edu.cn;http://www.nju.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "NUST;NUAA;Nanjing University;HUAWEI", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25378", + "title": "Towards Good Practices for Missing Modality Robust Action Recognition", + "track": "main", + "status": "Technical", + "abstract": "Standard multi-modal models assume the use of the same modalities in training and inference stages. However, in practice, the environment in which multi-modal models operate may not satisfy such assumption. As such, their performances degrade drastically if any modality is missing in the inference stage. We ask: how can we train a model that is robust to missing modalities? This paper seeks a set of good practices for multi-modal action recognition, with a particular interest in circumstances where some modalities are not available at an inference time. First, we show how to effectively regularize the model during training (e.g., data augmentation). Second, we investigate on fusion methods for robustness to missing modalities: we find that transformer-based fusion shows better robustness for missing modality than summation or concatenation. Third, we propose a simple modular network, ActionMAE, which learns missing modality predictive coding by randomly dropping modality features and tries to reconstruct them with the remaining modality features. Coupling these good practices, we build a model that is not only effective in multi-modal action recognition but also robust to modality missing. Our model achieves the state-of-the-arts on multiple benchmarks and maintains competitive performances even in missing modality scenarios.", + "primary_area": "computer vision iii", + "author": "Sangmin Woo; Sumin Lee; Yeonju Park; Muhammad Adi Nugroho; Changick Kim", + "authorids": "", + "aff": "Korea Advanced Institue of Science and Technology (KAIST); Korea Advanced Institue of Science and Technology (KAIST); Korea Advanced Institue of Science and Technology (KAIST); Korea Advanced Institue of Science and Technology (KAIST); Korea Advanced Institue of Science and Technology (KAIST)", + "bibtex": "@article{Woo_Lee_Park_Nugroho_Kim_2023, title={Towards Good Practices for Missing Modality Robust Action Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25378}, DOI={10.1609/aaai.v37i3.25378}, abstractNote={Standard multi-modal models assume the use of the same modalities in training and inference stages. However, in practice, the environment in which multi-modal models operate may not satisfy such assumption. As such, their performances degrade drastically if any modality is missing in the inference stage. We ask: how can we train a model that is robust to missing modalities? This paper seeks a set of good practices for multi-modal action recognition, with a particular interest in circumstances where some modalities are not available at an inference time. First, we show how to effectively regularize the model during training (e.g., data augmentation). Second, we investigate on fusion methods for robustness to missing modalities: we find that transformer-based fusion shows better robustness for missing modality than summation or concatenation. Third, we propose a simple modular network, ActionMAE, which learns missing modality predictive coding by randomly dropping modality features and tries to reconstruct them with the remaining modality features. Coupling these good practices, we build a model that is not only effective in multi-modal action recognition but also robust to modality missing. Our model achieves the state-of-the-arts on multiple benchmarks and maintains competitive performances even in missing modality scenarios.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Woo, Sangmin and Lee, Sumin and Park, Yeonju and Nugroho, Muhammad Adi and Kim, Changick}, year={2023}, month={Jun.}, pages={2776-2784} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25378/25150", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25378", + "pdf_size": 881443, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12201580213000599393&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26856", + "title": "Towards Hybrid Automation by Bootstrapping Conversational Interfaces for IT Operation Tasks", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Process automation has evolved from end-to-end automation of repetitive process branches to hybrid automation where bots perform some activities and humans serve other activities. In the context of knowledge-intensive processes such as IT operations, implementing hybrid automation is a natural choice where robots can perform certain mundane functions, with humans taking over the decision of when and which IT systems need to act. Recently, ChatOps, which refers to conversation-driven collaboration for IT operations, has rapidly accelerated efficiency by providing a cross-organization and cross-domain platform to resolve and manage issues as soon as possible. Hence, providing a natural language interface to bots is a logical progression to enable collaboration between humans and bots. This work presents a no-code approach to provide a conversational interface that enables human workers to collaborate with bots executing automation scripts. The bots identify the intent of users' requests and automatically orchestrate one or more relevant automation tasks to serve the request. We further detail our process of mining the conversations between humans and bots to monitor performance and identify the scope for improvement in service quality.", + "primary_area": "emerging applications of ai", + "author": "Jayachandu Bandlamudi; Kushal Mukherjee; Prerna Agarwal; Sampath Dechu; Siyu Huo; Vatche Isahagian; Vinod Muthusamy; Naveen Purushothaman; Renuka Sindhgatta", + "authorids": "", + "aff": "IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Consulting; IBM Research", + "bibtex": "@article{Bandlamudi_Mukherjee_Agarwal_Dechu_Huo_Isahagian_Muthusamy_Purushothaman_Sindhgatta_2024, title={Towards Hybrid Automation by Bootstrapping Conversational Interfaces for IT Operation Tasks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26856}, DOI={10.1609/aaai.v37i13.26856}, abstractNote={Process automation has evolved from end-to-end automation of repetitive process branches to hybrid automation where bots perform some activities and humans serve other activities. In the context of knowledge-intensive processes such as IT operations, implementing hybrid automation is a natural choice where robots can perform certain mundane functions, with humans taking over the decision of when and which IT systems need to act. Recently, ChatOps, which refers to conversation-driven collaboration for IT operations, has rapidly accelerated efficiency by providing a cross-organization and cross-domain platform to resolve and manage issues as soon as possible. Hence, providing a natural language interface to bots is a logical progression to enable collaboration between humans and bots. This work presents a no-code approach to provide a conversational interface that enables human workers to collaborate with bots executing automation scripts. The bots identify the intent of users\u2019 requests and automatically orchestrate one or more relevant automation tasks to serve the request. We further detail our process of mining the conversations between humans and bots to monitor performance and identify the scope for improvement in service quality.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bandlamudi, Jayachandu and Mukherjee, Kushal and Agarwal, Prerna and Dechu, Sampath and Huo, Siyu and Isahagian, Vatche and Muthusamy, Vinod and Purushothaman, Naveen and Sindhgatta, Renuka}, year={2024}, month={Jul.}, pages={15654-15660} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26856/26628", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26856", + "pdf_size": 874890, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5403007021773744961&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26230", + "title": "Towards In-Distribution Compatible Out-of-Distribution Detection", + "track": "main", + "status": "Technical", + "abstract": "Deep neural network, despite its remarkable capability of discriminating targeted in-distribution samples, shows poor performance on detecting anomalous out-of-distribution data. To address this defect, state-of-the-art solutions choose to train deep networks on an auxiliary dataset of outliers. Various training criteria for these auxiliary outliers are proposed based on heuristic intuitions. However, we find that these intuitively designed outlier training criteria can hurt in-distribution learning and eventually lead to inferior performance. To this end, we identify three causes of the in-distribution incompatibility: contradictory gradient, false likelihood, and distribution shift. Based on our new understandings, we propose a new out-of-distribution detection method by adapting both the top-design of deep models and the loss function. Our method achieves in-distribution compatibility by pursuing less interference with the probabilistic characteristic of in-distribution features. On several benchmarks, our method not only achieves the state-of-the-art out-of-distribution detection performance but also improves the in-distribution accuracy.", + "primary_area": "machine learning iv", + "author": "Boxi Wu; Jie Jiang; Haidong Ren; Zifan Du; Wenxiao Wang; Zhifeng Li; Deng Cai; Xiaofei He; Binbin Lin; Wei Liu", + "authorids": "", + "aff": "State Key Lab of CAD&CG, Zhejiang University; Tencent Data Platform; Ningbo Zhoushan Port Group Co.,Ltd., Ningbo, China; School of Software Technology, Zhejiang University; School of Software Technology, Zhejiang University; Tencent Data Platform; State Key Lab of CAD&CG, Zhejiang University; State Key Lab of CAD&CG, Zhejiang University; School of Software Technology, Zhejiang University; Tencent Data Platform", + "bibtex": "@article{Wu_Jiang_Ren_Du_Wang_Li_Cai_He_Lin_Liu_2023, title={Towards In-Distribution Compatible Out-of-Distribution Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26230}, DOI={10.1609/aaai.v37i9.26230}, abstractNote={Deep neural network, despite its remarkable capability of discriminating targeted in-distribution samples, shows poor performance on detecting anomalous out-of-distribution data. To address this defect, state-of-the-art solutions choose to train deep networks on an auxiliary dataset of outliers. Various training criteria for these auxiliary outliers are proposed based on heuristic intuitions. However, we find that these intuitively designed outlier training criteria can hurt in-distribution learning and eventually lead to inferior performance. To this end, we identify three causes of the in-distribution incompatibility: contradictory gradient, false likelihood, and distribution shift. Based on our new understandings, we propose a new out-of-distribution detection method by adapting both the top-design of deep models and the loss function. Our method achieves in-distribution compatibility by pursuing less interference with the probabilistic characteristic of in-distribution features. On several benchmarks, our method not only achieves the state-of-the-art out-of-distribution detection performance but also improves the in-distribution accuracy.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Boxi and Jiang, Jie and Ren, Haidong and Du, Zifan and Wang, Wenxiao and Li, Zhifeng and Cai, Deng and He, Xiaofei and Lin, Binbin and Liu, Wei}, year={2023}, month={Jun.}, pages={10333-10341} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26230/26002", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26230", + "pdf_size": 879127, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9859624448581957511&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "; ; ; ; ; ; ; ;zju.edu.cn; ", + "email": "; ; ; ; ; ; ; ;zju.edu.cn; ", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;2;0;0;1;0;0;0;1", + "aff_unique_norm": "Zhejiang University;Tencent;Ningbo Zhoushan Port Group Co., Ltd.", + "aff_unique_dep": "State Key Lab of CAD&CG;Data Platform;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.tencent.com;", + "aff_unique_abbr": "ZJU;Tencent;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26048", + "title": "Towards Inference Efficient Deep Ensemble Learning", + "track": "main", + "status": "Technical", + "abstract": "Ensemble methods can deliver surprising performance gains but also bring significantly higher computational costs, e.g., can be up to 2048X in large-scale ensemble tasks. However, we found that the majority of computations in ensemble methods are redundant. For instance, over 77% of samples in CIFAR-100 dataset can be correctly classified with only a single ResNet-18 model, which indicates that only around 23% of the samples need an ensemble of extra models. To this end, we propose an inference efficient ensemble learning method, to simultaneously optimize for effectiveness and efficiency in ensemble learning. More specifically, we regard ensemble of models as a sequential inference process and learn the optimal halting event for inference on a specific sample. At each timestep of the inference process, a common selector judges if the current ensemble has reached ensemble effectiveness and halt further inference, otherwise filters this challenging sample for the subsequent models to conduct more powerful ensemble. Both the base models and common selector are jointly optimized to dynamically adjust ensemble inference for different samples with various hardness, through the novel optimization goals including sequential ensemble boosting and computation saving. The experiments with different backbones on real-world datasets illustrate our method can bring up to 56% inference cost reduction while maintaining comparable performance to full ensemble, achieving significantly better ensemble utility than other baselines. Code and supplemental materials are available at https://seqml.github.io/irene.", + "primary_area": "machine learning ii", + "author": "Ziyue Li; Kan Ren; Yifan Yang; Xinyang Jiang; Yuqing Yang; Dongsheng Li", + "authorids": "", + "aff": "Microsoft Research*; Microsoft Research; ; ; ; Microsoft Research", + "bibtex": "@article{Li_Ren_Yang_Jiang_Yang_Li_2023, title={Towards Inference Efficient Deep Ensemble Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26048}, DOI={10.1609/aaai.v37i7.26048}, abstractNote={Ensemble methods can deliver surprising performance gains but also bring significantly higher computational costs, e.g., can be up to 2048X in large-scale ensemble tasks. However, we found that the majority of computations in ensemble methods are redundant. For instance, over 77% of samples in CIFAR-100 dataset can be correctly classified with only a single ResNet-18 model, which indicates that only around 23% of the samples need an ensemble of extra models. To this end, we propose an inference efficient ensemble learning method, to simultaneously optimize for effectiveness and efficiency in ensemble learning. More specifically, we regard ensemble of models as a sequential inference process and learn the optimal halting event for inference on a specific sample. At each timestep of the inference process, a common selector judges if the current ensemble has reached ensemble effectiveness and halt further inference, otherwise filters this challenging sample for the subsequent models to conduct more powerful ensemble. Both the base models and common selector are jointly optimized to dynamically adjust ensemble inference for different samples with various hardness, through the novel optimization goals including sequential ensemble boosting and computation saving. The experiments with different backbones on real-world datasets illustrate our method can bring up to 56% inference cost reduction while maintaining comparable performance to full ensemble, achieving significantly better ensemble utility than other baselines. Code and supplemental materials are available at https://seqml.github.io/irene.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Ziyue and Ren, Kan and Yang, Yifan and Jiang, Xinyang and Yang, Yuqing and Li, Dongsheng}, year={2023}, month={Jun.}, pages={8711-8719} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26048/25820", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26048", + "pdf_size": 613231, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15968454660833148663&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;microsoft.com; ; ; ; ", + "email": "gmail.com;microsoft.com; ; ; ; ", + "github": "", + "project": "https://seqml.github.io/irene", + "author_num": 6, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Microsoft Corporation", + "aff_unique_dep": "Microsoft Research", + "aff_unique_url": "https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26095", + "title": "Towards Interpreting and Utilizing Symmetry Property in Adversarial Examples", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we identify symmetry property in adversarial scenario by viewing adversarial attack in a fine-grained manner. A newly designed metric called attack proportion, is thus proposed to count the proportion of the adversarial examples misclassified between classes. We observe that the distribution of attack proportion is unbalanced as each class shows vulnerability to particular classes. Further, some class pairs correlate strongly and have the same degree of attack proportion for each other. We call this intriguing phenomenon symmetry property. We empirically prove this phenomenon is widespread and then analyze the reason behind the existence of symmetry property. This explanation, to some extent, could be utilized to understand robust models, which also inspires us to strengthen adversarial defenses.", + "primary_area": "machine learning iii", + "author": "Shibin Mei; Chenglong Zhao; Bingbing Ni; Shengchao Yuan", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Mei_Zhao_Ni_Yuan_2023, title={Towards Interpreting and Utilizing Symmetry Property in Adversarial Examples}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26095}, DOI={10.1609/aaai.v37i8.26095}, abstractNote={In this paper, we identify symmetry property in adversarial scenario by viewing adversarial attack in a fine-grained manner. A newly designed metric called attack proportion, is thus proposed to count the proportion of the adversarial examples misclassified between classes. We observe that the distribution of attack proportion is unbalanced as each class shows vulnerability to particular classes. Further, some class pairs correlate strongly and have the same degree of attack proportion for each other. We call this intriguing phenomenon symmetry property. We empirically prove this phenomenon is widespread and then analyze the reason behind the existence of symmetry property. This explanation, to some extent, could be utilized to understand robust models, which also inspires us to strengthen adversarial defenses.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mei, Shibin and Zhao, Chenglong and Ni, Bingbing and Yuan, Shengchao}, year={2023}, month={Jun.}, pages={9126-9133} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26095/25867", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26095", + "pdf_size": 5813219, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12787673474200265714&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26656", + "title": "Towards Learning to Discover Money Laundering Sub-network in Massive Transaction Network", + "track": "aaai special track", + "status": "Technical", + "abstract": "Anti-money laundering (AML) systems play a critical role in safeguarding global economy. As money laundering is considered as one of the top group crimes, there is a crucial need to discover money laundering sub-network behind a particular money laundering transaction for a robust AML system. However, existing rule-based methods for money laundering sub-network discovery is heavily based on domain knowledge and may lag behind the modus operandi of launderers. Therefore, in this work, we first address the money laundering sub-network discovery problem with a neural network based approach, and propose an AML framework AMAP equipped with an adaptive sub-network proposer. In particular, we design an adaptive sub-network proposer guided by a supervised contrastive loss to discriminate money laundering transactions from massive benign transactions. We conduct extensive experiments on real-word datasets in AliPay of Ant Group. The result demonstrates the effectiveness of our AMAP in both money laundering transaction detection and money laundering sub-network discovering. The learned framework which yields money laundering sub-network from massive transaction network leads to a more comprehensive risk coverage and a deeper insight to money laundering strategies.", + "primary_area": "ai for social impact", + "author": "Ziwei Chai; Yang Yang; Jiawang Dan; Sheng Tian; Changhua Meng; Weiqiang Wang; Yifei Sun", + "authorids": "", + "aff": "Zhejiang University; Zhejiang University; Ant Group; Ant Group; Ant Group; Ant Group; Zhejiang University", + "bibtex": "@article{Chai_Yang_Dan_Tian_Meng_Wang_Sun_2023, title={Towards Learning to Discover Money Laundering Sub-network in Massive Transaction Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26656}, DOI={10.1609/aaai.v37i12.26656}, abstractNote={Anti-money laundering (AML) systems play a critical role in safeguarding global economy. As money laundering is considered as one of the top group crimes, there is a crucial need to discover money laundering sub-network behind a particular money laundering transaction for a robust AML system. However, existing rule-based methods for money laundering sub-network discovery is heavily based on domain knowledge and may lag behind the modus operandi of launderers. Therefore, in this work, we first address the money laundering sub-network discovery problem with a neural network based approach, and propose an AML framework AMAP equipped with an adaptive sub-network proposer. In particular, we design an adaptive sub-network proposer guided by a supervised contrastive loss to discriminate money laundering transactions from massive benign transactions. We conduct extensive experiments on real-word datasets in AliPay of Ant Group. The result demonstrates the effectiveness of our AMAP in both money laundering transaction detection and money laundering sub-network discovering. The learned framework which yields money laundering sub-network from massive transaction network leads to a more comprehensive risk coverage and a deeper insight to money laundering strategies.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chai, Ziwei and Yang, Yang and Dan, Jiawang and Tian, Sheng and Meng, Changhua and Wang, Weiqiang and Sun, Yifei}, year={2023}, month={Jun.}, pages={14153-14160} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26656/26428", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26656", + "pdf_size": 520376, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13268936316006553838&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;antgroup.com;antgroup.com;antgroup.com;antgroup.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;antgroup.com;antgroup.com;antgroup.com;antgroup.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;1;1;0", + "aff_unique_norm": "Zhejiang University;Ant Group", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.zju.edu.cn;https://www.antgroup.com", + "aff_unique_abbr": "ZJU;Ant Group", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25986", + "title": "Towards More Robust Interpretation via Local Gradient Alignment", + "track": "main", + "status": "Technical", + "abstract": "Neural network interpretation methods, particularly feature attribution methods, are known to be fragile with respect to adversarial input perturbations. \nTo address this, several methods for enhancing the local smoothness of the gradient while training have been proposed for attaining robust feature attributions.\nHowever, the lack of considering the normalization of the attributions, which is essential in their visualizations, has been an obstacle to understanding and improving the robustness of feature attribution methods. \nIn this paper, we provide new insights by taking such normalization into account. First, we show that for every non-negative homogeneous neural network, a naive l2-robust criterion for gradients is not normalization invariant, which means that two functions with the same normalized gradient can have different values. \nSecond, we formulate a normalization invariant cosine distance-based criterion and derive its upper bound, which gives insight for why simply minimizing the Hessian norm at the input, as has been done in previous work, is not sufficient for attaining robust feature attribution. Finally, we propose to combine both l2 and cosine distance-based criteria as regularization terms to leverage the advantages of both in aligning the local gradient. As a result, we experimentally show that models trained with our method produce much more robust interpretations on CIFAR-10 and ImageNet-100 without significantly hurting the accuracy, compared to the recent baselines. To the best of our knowledge, this is the first work to verify the robustness of interpretation on a larger-scale dataset beyond CIFAR-10, thanks to the computational efficiency of our method.", + "primary_area": "machine learning ii", + "author": "Sunghwan Joo; SeokHyeon Jeong; Juyeon Heo; Adrian Weller; Taesup Moon", + "authorids": "", + "aff": "Department of ECE, Sungkyunkwan University; Department of ECE, Seoul National University; University of Cambridge; University of Cambridge; ASRI/INMC/IPAI/AIIS, Seoul National University", + "bibtex": "@article{Joo_Jeong_Heo_Weller_Moon_2023, title={Towards More Robust Interpretation via Local Gradient Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25986}, DOI={10.1609/aaai.v37i7.25986}, abstractNote={Neural network interpretation methods, particularly feature attribution methods, are known to be fragile with respect to adversarial input perturbations. To address this, several methods for enhancing the local smoothness of the gradient while training have been proposed for attaining robust feature attributions.\nHowever, the lack of considering the normalization of the attributions, which is essential in their visualizations, has been an obstacle to understanding and improving the robustness of feature attribution methods. In this paper, we provide new insights by taking such normalization into account. First, we show that for every non-negative homogeneous neural network, a naive l2-robust criterion for gradients is not normalization invariant, which means that two functions with the same normalized gradient can have different values. Second, we formulate a normalization invariant cosine distance-based criterion and derive its upper bound, which gives insight for why simply minimizing the Hessian norm at the input, as has been done in previous work, is not sufficient for attaining robust feature attribution. Finally, we propose to combine both l2 and cosine distance-based criteria as regularization terms to leverage the advantages of both in aligning the local gradient. As a result, we experimentally show that models trained with our method produce much more robust interpretations on CIFAR-10 and ImageNet-100 without significantly hurting the accuracy, compared to the recent baselines. To the best of our knowledge, this is the first work to verify the robustness of interpretation on a larger-scale dataset beyond CIFAR-10, thanks to the computational efficiency of our method.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Joo, Sunghwan and Jeong, SeokHyeon and Heo, Juyeon and Weller, Adrian and Moon, Taesup}, year={2023}, month={Jun.}, pages={8168-8176} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25986/25758", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25986", + "pdf_size": 6090733, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5408768252564732795&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "gmail.com;snu.ac.kr;cam.ac.uk;cam.ac.uk;snu.ac.kr", + "email": "gmail.com;snu.ac.kr;cam.ac.uk;cam.ac.uk;snu.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;1", + "aff_unique_norm": "Sungkyunkwan University;Seoul National University;University of Cambridge", + "aff_unique_dep": "Department of ECE;Department of Electrical and Computer Engineering;", + "aff_unique_url": "http://www.sungkyunkwan.ac.kr;https://www.snu.ac.kr;https://www.cam.ac.uk", + "aff_unique_abbr": "SKKU;SNU;Cambridge", + "aff_campus_unique_index": "1;2;2;1", + "aff_campus_unique": ";Seoul;Cambridge", + "aff_country_unique_index": "0;0;1;1;0", + "aff_country_unique": "South Korea;United Kingdom" + }, + { + "id": "article-26247", + "title": "Towards Optimal Randomized Strategies in Adversarial Example Game", + "track": "main", + "status": "Technical", + "abstract": "The vulnerability of deep neural network models to adversarial example attacks is a practical challenge in many artificial intelligence applications. A recent line of work shows that the use of randomization in adversarial training is the key to find optimal strategies against adversarial example attacks. However, in a fully randomized setting where both the defender and the attacker can use randomized strategies, there are no efficient algorithm for finding such an optimal strategy. To fill the gap, we propose the first algorithm of its kind, called FRAT, which models the problem with a new infinite-dimensional continuous-time flow on probability distribution spaces. FRAT maintains a lightweight mixture of models for the defender, with flexibility to efficiently update mixing weights and model parameters at each iteration. Furthermore, FRAT utilizes lightweight sampling subroutines to construct a random strategy for the attacker. We prove that the continuous-time limit of FRAT converges to a mixed Nash equilibria in a zero-sum game formed by a defender and an attacker. Experimental results also demonstrate the efficiency of FRAT on CIFAR-10 and CIFAR-100 datasets.", + "primary_area": "machine learning iv", + "author": "Jiahao Xie; Chao Zhang; Weijie Liu; Wensong Bai; Hui Qian", + "authorids": "", + "aff": "College of Computer Science and Technology, Zhejiang University; Advanced Technology Institute, Zhejiang University; Qiushi Academy for Advanced Studies, Zhejiang University+College of Computer Science and Technology, Zhejiang University; Advanced Technology Institute, Zhejiang University; State Key Lab of CAD&CG, Zhejiang University+College of Computer Science and Technology, Zhejiang University", + "bibtex": "@article{Xie_Zhang_Liu_Bai_Qian_2023, title={Towards Optimal Randomized Strategies in Adversarial Example Game}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26247}, DOI={10.1609/aaai.v37i9.26247}, abstractNote={The vulnerability of deep neural network models to adversarial example attacks is a practical challenge in many artificial intelligence applications. A recent line of work shows that the use of randomization in adversarial training is the key to find optimal strategies against adversarial example attacks. However, in a fully randomized setting where both the defender and the attacker can use randomized strategies, there are no efficient algorithm for finding such an optimal strategy. To fill the gap, we propose the first algorithm of its kind, called FRAT, which models the problem with a new infinite-dimensional continuous-time flow on probability distribution spaces. FRAT maintains a lightweight mixture of models for the defender, with flexibility to efficiently update mixing weights and model parameters at each iteration. Furthermore, FRAT utilizes lightweight sampling subroutines to construct a random strategy for the attacker. We prove that the continuous-time limit of FRAT converges to a mixed Nash equilibria in a zero-sum game formed by a defender and an attacker. Experimental results also demonstrate the efficiency of FRAT on CIFAR-10 and CIFAR-100 datasets.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xie, Jiahao and Zhang, Chao and Liu, Weijie and Bai, Wensong and Qian, Hui}, year={2023}, month={Jun.}, pages={10490-10498} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26247/26019", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26247", + "pdf_size": 435091, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4397522748696564452&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+0;0;0+0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Computer Science and Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25350", + "title": "Towards Real-Time Panoptic Narrative Grounding by an End-to-End Grounding Network", + "track": "main", + "status": "Technical", + "abstract": "Panoptic Narrative Grounding (PNG) is an emerging cross-modal grounding task, which locates the target regions of an image corresponding to the text description.\nExisting approaches for PNG are mainly based on a two-stage paradigm, which is computationally expensive. In this paper, we propose a one-stage network for real-time PNG, termed End-to-End Panoptic Narrative Grounding network (EPNG), which directly generates masks for referents. Specifically, we propose two innovative designs, i.e., Locality-Perceptive Attention (LPA) and a bidirectional Semantic Alignment Loss (SAL), to properly handle the many-to-many relationship between textual expressions and visual objects. LPA embeds the local spatial priors into attention modeling, i.e., a pixel may belong to multiple masks at different scales, thereby improving segmentation. To help understand the complex semantic relationships, SAL proposes a bidirectional contrastive objective to regularize the semantic consistency inter modalities. Extensive experiments on the PNG benchmark dataset demonstrate the effectiveness and efficiency of our method. Compared to the single-stage baseline, our method achieves a significant improvement of up to 9.4% accuracy. More importantly, our EPNG is 10 times faster than the two-stage model. Meanwhile, the generalization ability of EPNG is also validated by zero-shot experiments on other grounding tasks. The source codes and trained models for all our experiments are publicly available at https://github.com/Mr-Neko/EPNG.git.", + "primary_area": "computer vision ii", + "author": "Haowei Wang; Jiayi Ji; Yiyi Zhou; Yongjian Wu; Xiaoshuai Sun", + "authorids": "", + "aff": "Media Analytics and Computing Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, 361005, China; Media Analytics and Computing Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, 361005, China; Media Analytics and Computing Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, 361005, China + Institute of Artificial Intelligence, Xiamen University, China + Fujian Engineering Research Center of Trusted Artificial Intelligence Analysis and Application, Xiamen University, China; Tencent Youtu Lab, Shanghai, China; Media Analytics and Computing Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University, 361005, China + Institute of Artificial Intelligence, Xiamen University, China + Fujian Engineering Research Center of Trusted Artificial Intelligence Analysis and Application, Xiamen University, China", + "bibtex": "@article{Wang_Ji_Zhou_Wu_Sun_2023, title={Towards Real-Time Panoptic Narrative Grounding by an End-to-End Grounding Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25350}, DOI={10.1609/aaai.v37i2.25350}, abstractNote={Panoptic Narrative Grounding (PNG) is an emerging cross-modal grounding task, which locates the target regions of an image corresponding to the text description.\nExisting approaches for PNG are mainly based on a two-stage paradigm, which is computationally expensive. In this paper, we propose a one-stage network for real-time PNG, termed End-to-End Panoptic Narrative Grounding network (EPNG), which directly generates masks for referents. Specifically, we propose two innovative designs, i.e., Locality-Perceptive Attention (LPA) and a bidirectional Semantic Alignment Loss (SAL), to properly handle the many-to-many relationship between textual expressions and visual objects. LPA embeds the local spatial priors into attention modeling, i.e., a pixel may belong to multiple masks at different scales, thereby improving segmentation. To help understand the complex semantic relationships, SAL proposes a bidirectional contrastive objective to regularize the semantic consistency inter modalities. Extensive experiments on the PNG benchmark dataset demonstrate the effectiveness and efficiency of our method. Compared to the single-stage baseline, our method achieves a significant improvement of up to 9.4% accuracy. More importantly, our EPNG is 10 times faster than the two-stage model. Meanwhile, the generalization ability of EPNG is also validated by zero-shot experiments on other grounding tasks. The source codes and trained models for all our experiments are publicly available at https://github.com/Mr-Neko/EPNG.git.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Haowei and Ji, Jiayi and Zhou, Yiyi and Wu, Yongjian and Sun, Xiaoshuai}, year={2023}, month={Jun.}, pages={2528-2536} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25350/25122", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25350", + "pdf_size": 1090502, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1719113034154376106&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;gmail.com;xmu.edu.cn;tencent.com;xmu.edu.cn", + "email": "stu.xmu.edu.cn;gmail.com;xmu.edu.cn;tencent.com;xmu.edu.cn", + "github": "https://github.com/Mr-Neko/EPNG.git", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+0+0;1;0+0+0", + "aff_unique_norm": "Xiamen University;Tencent", + "aff_unique_dep": "Department of Artificial Intelligence;Youtu Lab", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "XMU;Tencent", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0;0+0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25232", + "title": "Towards Real-Time Segmentation on the Edge", + "track": "main", + "status": "Technical", + "abstract": "The research in real-time segmentation mainly focuses on desktop GPUs. \nHowever, autonomous driving and many other applications rely on real-time segmentation on the edge, and current arts are far from the goal. \nIn addition, recent advances in vision transformers also inspire us to re-design the network architecture for dense prediction task. \nIn this work, we propose to combine the self attention block with lightweight convolutions to form new building blocks, and employ latency constraints to search an efficient sub-network. \nWe train an MLP latency model based on generated architecture configurations and their latency measured on mobile devices, so that we can predict the latency of subnets during search phase. \nTo the best of our knowledge, we are the first to achieve over 74% mIoU on Cityscapes with semi-real-time inference (over 15 FPS) on mobile GPU from an off-the-shelf phone.", + "primary_area": "computer vision ii", + "author": "Yanyu Li; Changdi Yang; Pu Zhao; Geng Yuan; Wei Niu; Jiexiong Guan; Hao Tang; Minghai Qin; Qing Jin; Bin Ren; Xue Lin; Yanzhi Wang", + "authorids": "", + "aff": "Northeastern University; Northeastern University; Northeastern University; Northeastern University; College of William & Mary; College of William & Mary; CVL, ETH Zurich; Northeastern University; Northeastern University; College of William & Mary; Northeastern University; Northeastern University", + "bibtex": "@article{Li_Yang_Zhao_Yuan_Niu_Guan_Tang_Qin_Jin_Ren_Lin_Wang_2023, title={Towards Real-Time Segmentation on the Edge}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25232}, DOI={10.1609/aaai.v37i2.25232}, abstractNote={The research in real-time segmentation mainly focuses on desktop GPUs. However, autonomous driving and many other applications rely on real-time segmentation on the edge, and current arts are far from the goal. In addition, recent advances in vision transformers also inspire us to re-design the network architecture for dense prediction task. In this work, we propose to combine the self attention block with lightweight convolutions to form new building blocks, and employ latency constraints to search an efficient sub-network. We train an MLP latency model based on generated architecture configurations and their latency measured on mobile devices, so that we can predict the latency of subnets during search phase. To the best of our knowledge, we are the first to achieve over 74% mIoU on Cityscapes with semi-real-time inference (over 15 FPS) on mobile GPU from an off-the-shelf phone.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yanyu and Yang, Changdi and Zhao, Pu and Yuan, Geng and Niu, Wei and Guan, Jiexiong and Tang, Hao and Qin, Minghai and Jin, Qing and Ren, Bin and Lin, Xue and Wang, Yanzhi}, year={2023}, month={Jun.}, pages={1468-1476} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25232/25004", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25232", + "pdf_size": 4688029, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11870451025800988212&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;email.wm.edu;email.wm.edu;vision.ee.ethz.ch;gmail.com;northeastern.edu;cs.wm.edu;northeastern.edu;northeastern.edu", + "email": "northeastern.edu;northeastern.edu;northeastern.edu;northeastern.edu;email.wm.edu;email.wm.edu;vision.ee.ethz.ch;gmail.com;northeastern.edu;cs.wm.edu;northeastern.edu;northeastern.edu", + "github": "", + "project": "", + "author_num": 12, + "aff_unique_index": "0;0;0;0;1;1;2;0;0;1;0;0", + "aff_unique_norm": "Northeastern University;College of William & Mary;ETH Zurich", + "aff_unique_dep": ";;Computer Vision Laboratory", + "aff_unique_url": "https://www.northeastern.edu;https://www.wm.edu;https://www.ethz.ch", + "aff_unique_abbr": "NEU;WM;ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1;0;0;0;0;0", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "article-25561", + "title": "Towards Reliable Item Sampling for Recommendation Evaluation", + "track": "main", + "status": "Technical", + "abstract": "Since Rendle and Krichene argued that commonly used sampling-based evaluation metrics are ``inconsistent'' with respect to the global metrics (even in expectation), there have been a few studies on the sampling-based recommender system evaluation. Existing methods try either mapping the sampling-based metrics to their global counterparts or more generally, learning the empirical rank distribution to estimate the top-K metrics. \nHowever, despite existing efforts, there is still a lack of rigorous theoretical understanding of the proposed metric estimators, and the basic item sampling also suffers from the ``blind spot'' issue, i.e., estimation accuracy to recover the top-K metrics when K is small can still be rather substantial. \nIn this paper, we provide an in-depth investigation into these problems and make two innovative contributions. First, we propose a new item-sampling estimator that explicitly optimizes the error with respect to the ground truth, and theoretically highlights its subtle difference against prior work. Second, we propose a new adaptive sampling method that aims to deal with the ``blind spot'' problem and also demonstrate the\nexpectation-maximization (EM) algorithm can be generalized for such a setting. \nOur experimental results confirm our statistical analysis and the superiority of the proposed works. \nThis study helps lay the theoretical foundation for adopting item sampling metrics for recommendation evaluation and provides strong evidence for making item sampling a powerful and reliable tool for recommendation evaluation.", + "primary_area": "data mining and knowledge management", + "author": "Dong Li; Ruoming Jin; Zhenming Liu; Bin Ren; Jing Gao; Zhi Liu", + "authorids": "", + "aff": "Kent State University; Kent State University; College of William & Mary; College of William & Mary; iLambda; iLambda", + "bibtex": "@article{Li_Jin_Liu_Ren_Gao_Liu_2023, title={Towards Reliable Item Sampling for Recommendation Evaluation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25561}, DOI={10.1609/aaai.v37i4.25561}, abstractNote={Since Rendle and Krichene argued that commonly used sampling-based evaluation metrics are ``inconsistent\u2019\u2019 with respect to the global metrics (even in expectation), there have been a few studies on the sampling-based recommender system evaluation. Existing methods try either mapping the sampling-based metrics to their global counterparts or more generally, learning the empirical rank distribution to estimate the top-K metrics. However, despite existing efforts, there is still a lack of rigorous theoretical understanding of the proposed metric estimators, and the basic item sampling also suffers from the ``blind spot\u2019\u2019 issue, i.e., estimation accuracy to recover the top-K metrics when K is small can still be rather substantial. In this paper, we provide an in-depth investigation into these problems and make two innovative contributions. First, we propose a new item-sampling estimator that explicitly optimizes the error with respect to the ground truth, and theoretically highlights its subtle difference against prior work. Second, we propose a new adaptive sampling method that aims to deal with the ``blind spot\u2019\u2019 problem and also demonstrate the\nexpectation-maximization (EM) algorithm can be generalized for such a setting. Our experimental results confirm our statistical analysis and the superiority of the proposed works. This study helps lay the theoretical foundation for adopting item sampling metrics for recommendation evaluation and provides strong evidence for making item sampling a powerful and reliable tool for recommendation evaluation.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Dong and Jin, Ruoming and Liu, Zhenming and Ren, Bin and Gao, Jing and Liu, Zhi}, year={2023}, month={Jun.}, pages={4409-4416} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25561/25333", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25561", + "pdf_size": 218520, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12897075533184367804&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "kent.edu;kent.edu;cs.wm.edu;cs.wm.edu;ilambda.com;ilambda.com", + "email": "kent.edu;kent.edu;cs.wm.edu;cs.wm.edu;ilambda.com;ilambda.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;2;2", + "aff_unique_norm": "Kent State University;College of William & Mary;iLambda", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.kent.edu;https://www.wm.edu;", + "aff_unique_abbr": "KSU;WM;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "article-26606", + "title": "Towards Reliable Neural Machine Translation with Consistency-Aware Meta-Learning", + "track": "main", + "status": "Technical", + "abstract": "Neural machine translation (NMT) has achieved remarkable success in producing high-quality translations. However, current NMT systems suffer from a lack of reliability, as their outputs that are often affected by lexical or syntactic changes in inputs, resulting in large variations in quality. This limitation hinders the practicality and trustworthiness of NMT. A contributing factor to this problem is that NMT models trained with the one-to-one paradigm struggle to handle the source diversity phenomenon, where inputs with the same meaning can be expressed differently. In this work, we treat this problem as a bilevel optimization problem and present a consistency-aware meta-learning (CAML) framework derived from the model-agnostic meta-learning (MAML) algorithm to address it. Specifically, the NMT model with CAML (named CoNMT) first learns a consistent meta representation of semantically equivalent sentences in the outer loop. Subsequently, a mapping from the meta representation to the output sentence is learned in the inner loop, allowing the NMT model to translate semantically equivalent sentences to the same target sentence. We conduct experiments on the NIST Chinese to English task, three WMT translation tasks, and the TED M2O task. The results demonstrate that CoNMT effectively improves overall translation quality and reliably handles diverse inputs.", + "primary_area": "speech natural language processing", + "author": "Rongxiang Weng; Qiang Wang; Wensen Cheng; Changfeng Zhu; Min Zhang", + "authorids": "", + "aff": "Soochow University, Suzhou, China+miHoYo AI, Shanghai, China; Zhejiang University, Hangzhou, China+RoyalFlush AI Research Institute, Hangzhou, China; miHoYo AI, Shanghai, China; miHoYo AI, Shanghai, China; Soochow University, Suzhou, China", + "bibtex": "@article{Weng_Wang_Cheng_Zhu_Zhang_2023, title={Towards Reliable Neural Machine Translation with Consistency-Aware Meta-Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26606}, DOI={10.1609/aaai.v37i11.26606}, abstractNote={Neural machine translation (NMT) has achieved remarkable success in producing high-quality translations. However, current NMT systems suffer from a lack of reliability, as their outputs that are often affected by lexical or syntactic changes in inputs, resulting in large variations in quality. This limitation hinders the practicality and trustworthiness of NMT. A contributing factor to this problem is that NMT models trained with the one-to-one paradigm struggle to handle the source diversity phenomenon, where inputs with the same meaning can be expressed differently. In this work, we treat this problem as a bilevel optimization problem and present a consistency-aware meta-learning (CAML) framework derived from the model-agnostic meta-learning (MAML) algorithm to address it. Specifically, the NMT model with CAML (named CoNMT) first learns a consistent meta representation of semantically equivalent sentences in the outer loop. Subsequently, a mapping from the meta representation to the output sentence is learned in the inner loop, allowing the NMT model to translate semantically equivalent sentences to the same target sentence. We conduct experiments on the NIST Chinese to English task, three WMT translation tasks, and the TED M2O task. The results demonstrate that CoNMT effectively improves overall translation quality and reliably handles diverse inputs.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Weng, Rongxiang and Wang, Qiang and Cheng, Wensen and Zhu, Changfeng and Zhang, Min}, year={2023}, month={Jun.}, pages={13709-13717} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26606/26378", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26606", + "pdf_size": 802078, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18214903987029163496&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;gmail.com;126.com;suda.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;126.com;suda.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2+3;1;1;0", + "aff_unique_norm": "Soochow University;miHoYo AI;Zhejiang University;RoyalFlush AI Research Institute", + "aff_unique_dep": ";AI;;", + "aff_unique_url": "https://www.soochow.edu.cn;;http://www.zju.edu.cn;", + "aff_unique_abbr": ";miHoYo AI;ZJU;", + "aff_campus_unique_index": "0+1;2+2;1;1;0", + "aff_campus_unique": "Suzhou;Shanghai;Hangzhou", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26392", + "title": "Towards Robust Metrics for Concept Representation Evaluation", + "track": "main", + "status": "Technical", + "abstract": "Recent work on interpretability has focused on concept-based explanations, where deep learning models are explained in terms of high-level units of information, referred to as concepts. Concept learning models, however, have been shown to be prone to encoding impurities in their representations, failing to fully capture meaningful features of their inputs. While concept learning lacks metrics to measure such phenomena, the field of disentanglement learning has explored the related notion of underlying factors of variation in the data, with plenty of metrics to measure the purity of such factors. In this paper, we show that such metrics are not appropriate for concept learning and propose novel metrics for evaluating the purity of concept representations in both approaches. We show the advantage of these metrics over existing ones and demonstrate their utility in evaluating the robustness of concept representations and interventions performed on them. In addition, we show their utility for benchmarking state-of-the-art methods from both families and find that, contrary to common assumptions, supervision alone may not be sufficient for pure concept representations.", + "primary_area": "philosophy and ethics of ai", + "author": "Mateo Espinosa Zarlenga; Pietro Barbiero; Zohreh Shams; Dmitry Kazhdan; Umang Bhatt; Adrian Weller; Mateja Jamnik", + "authorids": "", + "aff": "University of Cambridge; University of Cambridge; University of Cambridge + Babylon Health; University of Cambridge; University of Cambridge + The Alan Turing Institute; University of Cambridge + The Alan Turing Institute; University of Cambridge", + "bibtex": "@article{Espinosa Zarlenga_Barbiero_Shams_Kazhdan_Bhatt_Weller_Jamnik_2023, title={Towards Robust Metrics for Concept Representation Evaluation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26392}, DOI={10.1609/aaai.v37i10.26392}, abstractNote={Recent work on interpretability has focused on concept-based explanations, where deep learning models are explained in terms of high-level units of information, referred to as concepts. Concept learning models, however, have been shown to be prone to encoding impurities in their representations, failing to fully capture meaningful features of their inputs. While concept learning lacks metrics to measure such phenomena, the field of disentanglement learning has explored the related notion of underlying factors of variation in the data, with plenty of metrics to measure the purity of such factors. In this paper, we show that such metrics are not appropriate for concept learning and propose novel metrics for evaluating the purity of concept representations in both approaches. We show the advantage of these metrics over existing ones and demonstrate their utility in evaluating the robustness of concept representations and interventions performed on them. In addition, we show their utility for benchmarking state-of-the-art methods from both families and find that, contrary to common assumptions, supervision alone may not be sufficient for pure concept representations.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Espinosa Zarlenga, Mateo and Barbiero, Pietro and Shams, Zohreh and Kazhdan, Dmitry and Bhatt, Umang and Weller, Adrian and Jamnik, Mateja}, year={2023}, month={Jun.}, pages={11791-11799} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26392/26164", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26392", + "pdf_size": 316332, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4924639908747222180&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cl.cam.ac.uk", + "email": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk;cl.cam.ac.uk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0+1;0;0+2;0+2;0", + "aff_unique_norm": "University of Cambridge;Babylon Health;The Alan Turing Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cam.ac.uk;https://www.babylonhealth.com;https://www.turing.ac.uk", + "aff_unique_abbr": "Cambridge;;ATI", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0+0;0;0+0;0+0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26789", + "title": "Towards Safe AI: Sandboxing DNNs-Based Controllers in Stochastic Games", + "track": "aaai special track", + "status": "Technical", + "abstract": "Nowadays, AI-based techniques, such as deep neural networks (DNNs), are widely deployed in autonomous systems for complex mission requirements (e.g., motion planning in robotics). However, DNNs-based controllers are typically very complex, and it is very hard to formally verify their correctness, potentially causing severe risks for safety-critical autonomous systems. In this paper, we propose a construction scheme for a so-called Safe-visor architecture to sandbox DNNs-based controllers. Particularly, we consider the construction under a stochastic game framework to provide a system-level safety guarantee which is robust to noises and disturbances. A supervisor is built to check the control inputs provided by a DNNs-based controller and decide whether to accept them. Meanwhile, a safety advisor is running in parallel to provide fallback control inputs in case the DNN-based controller is rejected. We demonstrate the proposed approaches on a quadrotor employing an unverified DNNs-based controller.", + "primary_area": "safe and robust ai", + "author": "Bingzhuo Zhong; Hongpeng Cao; Majid Zamani; Marco Caccamo", + "authorids": "", + "aff": "Technical University of Munich, Germany; Technical University of Munich, Germany; University of Colorado Boulder, USA; Technical University of Munich, Germany", + "bibtex": "@article{Zhong_Cao_Zamani_Caccamo_2023, title={Towards Safe AI: Sandboxing DNNs-Based Controllers in Stochastic Games}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26789}, DOI={10.1609/aaai.v37i12.26789}, abstractNote={Nowadays, AI-based techniques, such as deep neural networks (DNNs), are widely deployed in autonomous systems for complex mission requirements (e.g., motion planning in robotics). However, DNNs-based controllers are typically very complex, and it is very hard to formally verify their correctness, potentially causing severe risks for safety-critical autonomous systems. In this paper, we propose a construction scheme for a so-called Safe-visor architecture to sandbox DNNs-based controllers. Particularly, we consider the construction under a stochastic game framework to provide a system-level safety guarantee which is robust to noises and disturbances. A supervisor is built to check the control inputs provided by a DNNs-based controller and decide whether to accept them. Meanwhile, a safety advisor is running in parallel to provide fallback control inputs in case the DNN-based controller is rejected. We demonstrate the proposed approaches on a quadrotor employing an unverified DNNs-based controller.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhong, Bingzhuo and Cao, Hongpeng and Zamani, Majid and Caccamo, Marco}, year={2023}, month={Jun.}, pages={15340-15349} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26789/26561", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26789", + "pdf_size": 1668380, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9279282768505376864&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "tum.de;tum.de;colorado.edu;tum.de", + "email": "tum.de;tum.de;colorado.edu;tum.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Technical University of Munich;University of Colorado Boulder", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tum.de;https://www.colorado.edu", + "aff_unique_abbr": "TUM;CU Boulder", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Boulder", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "article-26862", + "title": "Towards Safe Mechanical Ventilation Treatment Using Deep Offline Reinforcement Learning", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Mechanical ventilation is a key form of life support for patients with pulmonary impairment. Healthcare workers are required to continuously adjust ventilator settings for each patient, a challenging and time consuming task. Hence, it would be beneficial to develop an automated decision support tool to optimize ventilation treatment. We present DeepVent, a Conservative Q-Learning (CQL) based offline Deep Reinforcement Learning (DRL) agent that learns to predict the optimal ventilator parameters for a patient to promote 90 day survival. We design a clinically relevant intermediate reward that encourages continuous improvement of the patient vitals as well as addresses the challenge of sparse reward in RL. We find that DeepVent recommends ventilation parameters within safe ranges, as outlined in recent clinical trials. The CQL algorithm offers additional safety by mitigating the overestimation of the value estimates of out-of-distribution states/actions. We evaluate our agent using Fitted Q Evaluation (FQE) and demonstrate that it outperforms physicians from the MIMIC-III dataset.", + "primary_area": "emerging applications of ai", + "author": "Flemming Kondrup; Thomas Jiralerspong; Elaine Lau; Nathan de Lara; Jacob Shkrob; My Duc Tran; Doina Precup; Sumana Basu", + "authorids": "", + "aff": "McGill University; McGill University; McGill University; McGill University; McGill University; McGill University; McGill University + Mila; McGill University + Mila", + "bibtex": "@article{Kondrup_Jiralerspong_Lau_de Lara_Shkrob_Tran_Precup_Basu_2024, title={Towards Safe Mechanical Ventilation Treatment Using Deep Offline Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26862}, DOI={10.1609/aaai.v37i13.26862}, abstractNote={Mechanical ventilation is a key form of life support for patients with pulmonary impairment. Healthcare workers are required to continuously adjust ventilator settings for each patient, a challenging and time consuming task. Hence, it would be beneficial to develop an automated decision support tool to optimize ventilation treatment. We present DeepVent, a Conservative Q-Learning (CQL) based offline Deep Reinforcement Learning (DRL) agent that learns to predict the optimal ventilator parameters for a patient to promote 90 day survival. We design a clinically relevant intermediate reward that encourages continuous improvement of the patient vitals as well as addresses the challenge of sparse reward in RL. We find that DeepVent recommends ventilation parameters within safe ranges, as outlined in recent clinical trials. The CQL algorithm offers additional safety by mitigating the overestimation of the value estimates of out-of-distribution states/actions. We evaluate our agent using Fitted Q Evaluation (FQE) and demonstrate that it outperforms physicians from the MIMIC-III dataset.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kondrup, Flemming and Jiralerspong, Thomas and Lau, Elaine and de Lara, Nathan and Shkrob, Jacob and Tran, My Duc and Precup, Doina and Basu, Sumana}, year={2024}, month={Jul.}, pages={15696-15702} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26862/26634", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26862", + "pdf_size": 9863182, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16512155186345747499&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;cs.mcgill.ca;mail.mcgill.ca", + "email": "mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;cs.mcgill.ca;mail.mcgill.ca", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0+1;0+1", + "aff_unique_norm": "McGill University;Mila", + "aff_unique_dep": ";Quebec Artificial Intelligence Institute", + "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec", + "aff_unique_abbr": "McGill;Mila", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0+0;0+0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26968", + "title": "Towards Safe Reinforcement Learning via OOD Dynamics Detection in Autonomous Driving System (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Deep reinforcement learning (DRL) has proven effective in training agents to achieve goals in complex environments. However, a trained RL agent may exhibit, during deployment, unexpected behavior when faced with a situation where its state transitions differ even slightly from the training environment. Such a situation can arise for a variety of reasons. Rapid and accurate detection of anomalous behavior appears to be a prerequisite for using DRL in safety-critical systems, such as autonomous driving. We propose a novel OOD detection algorithm based on modeling the transition function of the training environment. Our method captures the bias of model behavior when encountering subtle changes of dynamics while maintaining a low false positive rate. Preliminary evaluations on the realistic simulator CARLA corroborate the relevance of our proposed method.", + "primary_area": "", + "author": "Arnaud Gardille; Ola Ahmad", + "authorids": "", + "aff": "Paris-Saclay University, France; Thales Digital Solutions, Montreal, Canada", + "bibtex": "@article{Gardille_Ahmad_2024, title={Towards Safe Reinforcement Learning via OOD Dynamics Detection in Autonomous Driving System (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26968}, DOI={10.1609/aaai.v37i13.26968}, abstractNote={Deep reinforcement learning (DRL) has proven effective in training agents to achieve goals in complex environments. However, a trained RL agent may exhibit, during deployment, unexpected behavior when faced with a situation where its state transitions differ even slightly from the training environment. Such a situation can arise for a variety of reasons. Rapid and accurate detection of anomalous behavior appears to be a prerequisite for using DRL in safety-critical systems, such as autonomous driving. We propose a novel OOD detection algorithm based on modeling the transition function of the training environment. Our method captures the bias of model behavior when encountering subtle changes of dynamics while maintaining a low false positive rate. Preliminary evaluations on the realistic simulator CARLA corroborate the relevance of our proposed method.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gardille, Arnaud and Ahmad, Ola}, year={2024}, month={Jul.}, pages={16216-16217} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26968/26740", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26968", + "pdf_size": 415796, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6318478333252598325&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "universite-paris-saclay.fr;thalesdigital.io", + "email": "universite-paris-saclay.fr;thalesdigital.io", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Paris-Saclay University;Thales Digital Solutions", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.universite-paris-saclay.fr;", + "aff_unique_abbr": "Paris-Saclay;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0;1", + "aff_country_unique": "France;Canada" + }, + { + "id": "article-26816", + "title": "Towards Safe and Resilient Autonomy in Multi-Robot Systems", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "In the near future, autonomous systems such as multi-robot\nsystems are envisioned to increasingly co-exist with hu-\nmans in our daily lives, from household service to large-\nscale warehouse logistics, agriculture environment sampling,\nand smart city. In these applications, robots and humans as\nnetworked heterogeneous components will frequently inter-\nact with each other in a variety of scenarios under uncer-\ntain, rapidly-changing, and possibly hostile environment. On\none hand, harmonious interactions among robots, as well as\nbetween robots and humans, would require safe integration\n(e.g. collision-free close-proximity interactions) of heteroge-\nneous robots, human, and human-robot autonomy. On the\nother hand, reliable interactions among autonomous multi-\nrobot systems often call for resilient system integrity (e.g.\ncommunication capability with potential robot failures) to re-\ntain its capability of accomplishing complex tasks through\ncoordinated behaviors. In the proposed talk, I will discuss our\nrecent works towards safe autonomy and resilient autonomy\nthat aim to facilitate correct-by-design robotic behaviors in a\nvariety of applications.", + "primary_area": "", + "author": "Wenhao Luo", + "authorids": "", + "aff": "University of North Carolina at Charlotte", + "bibtex": "@article{Luo_2024, title={Towards Safe and Resilient Autonomy in Multi-Robot Systems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26816}, DOI={10.1609/aaai.v37i13.26816}, abstractNote={In the near future, autonomous systems such as multi-robot\nsystems are envisioned to increasingly co-exist with hu-\nmans in our daily lives, from household service to large-\nscale warehouse logistics, agriculture environment sampling,\nand smart city. In these applications, robots and humans as\nnetworked heterogeneous components will frequently inter-\nact with each other in a variety of scenarios under uncer-\ntain, rapidly-changing, and possibly hostile environment. On\none hand, harmonious interactions among robots, as well as\nbetween robots and humans, would require safe integration\n(e.g. collision-free close-proximity interactions) of heteroge-\nneous robots, human, and human-robot autonomy. On the\nother hand, reliable interactions among autonomous multi-\nrobot systems often call for resilient system integrity (e.g.\ncommunication capability with potential robot failures) to re-\ntain its capability of accomplishing complex tasks through\ncoordinated behaviors. In the proposed talk, I will discuss our\nrecent works towards safe autonomy and resilient autonomy\nthat aim to facilitate correct-by-design robotic behaviors in a\nvariety of applications.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Wenhao}, year={2024}, month={Jul.}, pages={15449-15449} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26816/26588", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26816", + "pdf_size": 39593, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3649762741002802580&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 3, + "aff_domain": "uncc.edu", + "email": "uncc.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of North Carolina at Charlotte", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uncc.edu", + "aff_unique_abbr": "UNCC", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Charlotte", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26830", + "title": "Towards Societal Impact of AI", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Artificial intelligence (AI) and Machine Learning (ML) have shown great success in many areas such as computer vision, natural language processing, and knowledge discovery. However, AI research to deliver social benefits and impacts is less explored while imminent needed. Guided by the United Nations\u2019 Sustainable Development Goals, my research involves the development of advanced AI techniques, in particular Deep Graph Learning (DGL), to address the grand societal challenges and further apply them to various social good applications for improving our society and people\u2019s daily life, namely DGL for Social Good (DGL4SG). Achieving the goal is not easy since challenges come from the increasing complexity of many factors including problems, data, and techniques, which require long-term and concentrated effort. DGL presents a good opportunity to build better solutions and tools due to its strong capability in learning and inferring graph data which is ideal for modeling many real-world social good systems. Fortunately, I have been working on DGL with continued contributions and impacts since my graduate study. The special research experience lifts me up to a unique position for conducting research that intersects AI, DGL, and social good, and pushing the field of DGL4SG forward.", + "primary_area": "", + "author": "Chuxu Zhang", + "authorids": "", + "aff": "Department of Computer Science, Brandeis University, USA", + "bibtex": "@article{Zhang_2024, title={Towards Societal Impact of AI}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26830}, DOI={10.1609/aaai.v37i13.26830}, abstractNote={Artificial intelligence (AI) and Machine Learning (ML) have shown great success in many areas such as computer vision, natural language processing, and knowledge discovery. However, AI research to deliver social benefits and impacts is less explored while imminent needed. Guided by the United Nations\u2019 Sustainable Development Goals, my research involves the development of advanced AI techniques, in particular Deep Graph Learning (DGL), to address the grand societal challenges and further apply them to various social good applications for improving our society and people\u2019s daily life, namely DGL for Social Good (DGL4SG). Achieving the goal is not easy since challenges come from the increasing complexity of many factors including problems, data, and techniques, which require long-term and concentrated effort. DGL presents a good opportunity to build better solutions and tools due to its strong capability in learning and inferring graph data which is ideal for modeling many real-world social good systems. Fortunately, I have been working on DGL with continued contributions and impacts since my graduate study. The special research experience lifts me up to a unique position for conducting research that intersects AI, DGL, and social good, and pushing the field of DGL4SG forward.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chuxu}, year={2024}, month={Jul.}, pages={15463-15463} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26830/26602", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26830", + "pdf_size": 384585, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff_domain": "uncc.edu", + "email": "uncc.edu", + "github": "", + "project": "https://sdgs.un.org/goals", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Brandeis University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.brandeis.edu", + "aff_unique_abbr": "Brandeis", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26823", + "title": "Towards Unified, Explainable, and Robust Multisensory Perception", + "track": "new faculty highlights", + "status": "Technical", + "abstract": "Humans perceive surrounding scenes through multiple senses with multisensory integration. For example, hearing helps capture the spatial location of a racing car behind us; seeing peoples' talking faces can strengthen our perception of their speech. However, today's state-of-the-art scene understanding systems are usually designed to rely on a single audio or visual modality. Ignoring multisensory cooperation has become one of the key bottlenecks in creating intelligent systems with human-level perception capability, which impedes the real-world applications of existing scene understanding models. To address this limitation, my research has pioneered marrying computer vision with computer audition to create multimodal systems that can learn to understand audio and visual data. In particular, my current research focuses on asking and solving fundamental problems in a fresh research area: audio-visual scene understanding and strives to develop unified, explainable, and robust multisensory perception machines. The three themes are distinct yet interconnected, and all of them are essential for designing powerful and trustworthy perception systems. In my talk, I will give a brief overview about this new research area and then introduce my works in the three research thrusts.", + "primary_area": "", + "author": "Yapeng Tian", + "authorids": "", + "aff": "Department of Computer Science, University of Texas at Dallas, USA", + "bibtex": "@article{Tian_2024, title={Towards Unified, Explainable, and Robust Multisensory Perception}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26823}, DOI={10.1609/aaai.v37i13.26823}, abstractNote={Humans perceive surrounding scenes through multiple senses with multisensory integration. For example, hearing helps capture the spatial location of a racing car behind us; seeing peoples\u2019 talking faces can strengthen our perception of their speech. However, today\u2019s state-of-the-art scene understanding systems are usually designed to rely on a single audio or visual modality. Ignoring multisensory cooperation has become one of the key bottlenecks in creating intelligent systems with human-level perception capability, which impedes the real-world applications of existing scene understanding models. To address this limitation, my research has pioneered marrying computer vision with computer audition to create multimodal systems that can learn to understand audio and visual data. In particular, my current research focuses on asking and solving fundamental problems in a fresh research area: audio-visual scene understanding and strives to develop unified, explainable, and robust multisensory perception machines. The three themes are distinct yet interconnected, and all of them are essential for designing powerful and trustworthy perception systems. In my talk, I will give a brief overview about this new research area and then introduce my works in the three research thrusts.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Yapeng}, year={2024}, month={Jul.}, pages={15456-15456} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26823/26595", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26823", + "pdf_size": 45505, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=749073661056247949&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "utdallas.edu", + "email": "utdallas.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Texas at Dallas", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utdallas.edu", + "aff_unique_abbr": "UT Dallas", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Dallas", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "article-26773", + "title": "Towards Verifying the Geometric Robustness of Large-Scale Neural Networks", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep neural networks (DNNs) are known to be vulnerable to adversarial geometric transformation. This paper aims to verify the robustness of large-scale DNNs against the combination of multiple geometric transformations with a provable guarantee. Given a set of transformations (e.g., rotation, scaling, etc.), we develop GeoRobust, a black-box robustness analyser built upon a novel global optimisation strategy, for locating the worst-case combination of transformations that affect and even alter a network's output. GeoRobust can provide provable guarantees on finding the worst-case combination based on recent advances in Lipschitzian theory. Due to its black-box nature, GeoRobust can be deployed on large-scale DNNs regardless of their architectures, activation functions, and the number of neurons. In practice, GeoRobust can locate the worst-case geometric transformation with high precision for the ResNet50 model on ImageNet in a few seconds on average. We examined 18 ImageNet classifiers, including the ResNet family and vision transformers, and found a positive correlation between the geometric robustness of the networks and the parameter numbers. We also observe that increasing the depth of DNN is more beneficial than increasing its width in terms of improving its geometric robustness. Our tool GeoRobust is available at https://github.com/TrustAI/GeoRobust.", + "primary_area": "safe and robust ai", + "author": "Fu Wang; Peipei Xu; Wenjie Ruan; Xiaowei Huang", + "authorids": "", + "aff": "Department of Computer Science, University of Exeter, Exeter, EX4 4QF, UK; Department of Computer Science, University of Liverpool, Liverpool, L69 3BX, UK; Department of Computer Science, University of Exeter, Exeter, EX4 4QF, UK; Department of Computer Science, University of Liverpool, Liverpool, L69 3BX, UK", + "bibtex": "@article{Wang_Xu_Ruan_Huang_2023, title={Towards Verifying the Geometric Robustness of Large-Scale Neural Networks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26773}, DOI={10.1609/aaai.v37i12.26773}, abstractNote={Deep neural networks (DNNs) are known to be vulnerable to adversarial geometric transformation. This paper aims to verify the robustness of large-scale DNNs against the combination of multiple geometric transformations with a provable guarantee. Given a set of transformations (e.g., rotation, scaling, etc.), we develop GeoRobust, a black-box robustness analyser built upon a novel global optimisation strategy, for locating the worst-case combination of transformations that affect and even alter a network\u2019s output. GeoRobust can provide provable guarantees on finding the worst-case combination based on recent advances in Lipschitzian theory. Due to its black-box nature, GeoRobust can be deployed on large-scale DNNs regardless of their architectures, activation functions, and the number of neurons. In practice, GeoRobust can locate the worst-case geometric transformation with high precision for the ResNet50 model on ImageNet in a few seconds on average. We examined 18 ImageNet classifiers, including the ResNet family and vision transformers, and found a positive correlation between the geometric robustness of the networks and the parameter numbers. We also observe that increasing the depth of DNN is more beneficial than increasing its width in terms of improving its geometric robustness. Our tool GeoRobust is available at https://github.com/TrustAI/GeoRobust.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Fu and Xu, Peipei and Ruan, Wenjie and Huang, Xiaowei}, year={2023}, month={Jun.}, pages={15197-15205} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26773/26545", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26773", + "pdf_size": 696168, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16949613170969479176&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "exeter.ac.uk;liverpool.ac.uk;exeter.ac.uk;liverpool.ac.uk", + "email": "exeter.ac.uk;liverpool.ac.uk;exeter.ac.uk;liverpool.ac.uk", + "github": "https://github.com/TrustAI/GeoRobust", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "University of Exeter;University of Liverpool", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.exeter.ac.uk;https://www.liverpool.ac.uk", + "aff_unique_abbr": "Exeter;Liv Uni", + "aff_campus_unique_index": "0;1;0;1", + "aff_campus_unique": "Exeter;Liverpool", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-25745", + "title": "Towards Voice Reconstruction from EEG during Imagined Speech", + "track": "main", + "status": "Technical", + "abstract": "Translating imagined speech from human brain activity into voice is a challenging and absorbing research issue that can provide new means of human communication via brain signals. Efforts to reconstruct speech from brain activity have shown their potential using invasive measures of spoken speech data, but have faced challenges in reconstructing imagined speech. In this paper, we propose NeuroTalk, which converts non-invasive brain signals of imagined speech into the user's own voice. Our model was trained with spoken speech EEG which was generalized to adapt to the domain of imagined speech, thus allowing natural correspondence between the imagined speech and the voice as a ground truth. In our framework, an automatic speech recognition decoder contributed to decomposing the phonemes of the generated speech, demonstrating the potential of voice reconstruction from unseen words. Our results imply the potential of speech synthesis from human EEG signals, not only from spoken speech but also from the brain signals of imagined speech.", + "primary_area": "humans and ai", + "author": "Young-Eun Lee; Seo-Hyun Lee; Sang-Ho Kim; Seong-Whan Lee", + "authorids": "", + "aff": "Department of Brain and Cognitive Engineering, Korea University, Seoul, Republic of Korea; Department of Brain and Cognitive Engineering, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea", + "bibtex": "@article{Lee_Lee_Kim_Lee_2023, title={Towards Voice Reconstruction from EEG during Imagined Speech}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25745}, DOI={10.1609/aaai.v37i5.25745}, abstractNote={Translating imagined speech from human brain activity into voice is a challenging and absorbing research issue that can provide new means of human communication via brain signals. Efforts to reconstruct speech from brain activity have shown their potential using invasive measures of spoken speech data, but have faced challenges in reconstructing imagined speech. In this paper, we propose NeuroTalk, which converts non-invasive brain signals of imagined speech into the user\u2019s own voice. Our model was trained with spoken speech EEG which was generalized to adapt to the domain of imagined speech, thus allowing natural correspondence between the imagined speech and the voice as a ground truth. In our framework, an automatic speech recognition decoder contributed to decomposing the phonemes of the generated speech, demonstrating the potential of voice reconstruction from unseen words. Our results imply the potential of speech synthesis from human EEG signals, not only from spoken speech but also from the brain signals of imagined speech.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lee, Young-Eun and Lee, Seo-Hyun and Kim, Sang-Ho and Lee, Seong-Whan}, year={2023}, month={Jun.}, pages={6030-6038} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25745/25517", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25745", + "pdf_size": 1333042, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13955995630586587812&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Korea University", + "aff_unique_dep": "Department of Brain and Cognitive Engineering", + "aff_unique_url": "http://www.korea.ac.kr", + "aff_unique_abbr": "KU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-26573", + "title": "Towards a Holistic Understanding of Mathematical Questions with Contrastive Pre-training", + "track": "main", + "status": "Technical", + "abstract": "Understanding mathematical questions effectively is a crucial task, which can benefit many applications, such as difficulty estimation. Researchers have drawn much attention to designing pre-training models for question representations due to the scarcity of human annotations (e.g., labeling difficulty). However, unlike general free-format texts (e.g., user comments), mathematical questions are generally designed with explicit purposes and mathematical logic, and usually consist of more complex content, such as formulas, and related mathematical knowledge (e.g., Function). Therefore, the problem of holistically representing mathematical questions remains underexplored. To this end, in this paper, we propose a novel contrastive pre-training approach for mathematical question representations, namely QuesCo, which attempts to bring questions with more similar purposes closer. Specifically, we first design two-level question augmentations, including content-level and structure-level, which generate literally diverse question pairs with similar purposes. Then, to fully exploit hierarchical information of knowledge concepts, we propose a knowledge hierarchy-aware rank strategy (KHAR), which ranks the similarities between questions in a fine-grained manner. Next, we adopt a ranking contrastive learning task to optimize our model based on the augmented and ranked questions. We conduct extensive experiments on two real-world mathematical datasets. The experimental results demonstrate the effectiveness of our model.", + "primary_area": "speech natural language processing", + "author": "Yuting Ning; Zhenya Huang; Xin Lin; Enhong Chen; Shiwei Tong; Zheng Gong; Shijin Wang", + "authorids": "", + "aff": "Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; State Key Laboratory of Cognitive Intelligence+iFLYTEK AI Research (Central China), iFLYTEK Co., Ltd.", + "bibtex": "@article{Ning_Huang_Lin_Chen_Tong_Gong_Wang_2023, title={Towards a Holistic Understanding of Mathematical Questions with Contrastive Pre-training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26573}, DOI={10.1609/aaai.v37i11.26573}, abstractNote={Understanding mathematical questions effectively is a crucial task, which can benefit many applications, such as difficulty estimation. Researchers have drawn much attention to designing pre-training models for question representations due to the scarcity of human annotations (e.g., labeling difficulty). However, unlike general free-format texts (e.g., user comments), mathematical questions are generally designed with explicit purposes and mathematical logic, and usually consist of more complex content, such as formulas, and related mathematical knowledge (e.g., Function). Therefore, the problem of holistically representing mathematical questions remains underexplored. To this end, in this paper, we propose a novel contrastive pre-training approach for mathematical question representations, namely QuesCo, which attempts to bring questions with more similar purposes closer. Specifically, we first design two-level question augmentations, including content-level and structure-level, which generate literally diverse question pairs with similar purposes. Then, to fully exploit hierarchical information of knowledge concepts, we propose a knowledge hierarchy-aware rank strategy (KHAR), which ranks the similarities between questions in a fine-grained manner. Next, we adopt a ranking contrastive learning task to optimize our model based on the augmented and ranked questions. We conduct extensive experiments on two real-world mathematical datasets. The experimental results demonstrate the effectiveness of our model.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ning, Yuting and Huang, Zhenya and Lin, Xin and Chen, Enhong and Tong, Shiwei and Gong, Zheng and Wang, Shijin}, year={2023}, month={Jun.}, pages={13409-13418} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26573/26345", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26573", + "pdf_size": 733082, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16146670119187799240&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;iflytek.com", + "email": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;iflytek.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1;1+2", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;iFLYTEK Co., Ltd.", + "aff_unique_dep": "School of Computer Science and Technology;;iFLYTEK AI Research", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.iflytek.com", + "aff_unique_abbr": "USTC;;iFLYTEK", + "aff_campus_unique_index": ";;;;;;1", + "aff_campus_unique": ";Central China", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25463", + "title": "TrEP: Transformer-Based Evidential Prediction for Pedestrian Intention with Uncertainty", + "track": "main", + "status": "Technical", + "abstract": "With rapid development in hardware (sensors and processors) and AI algorithms, automated driving techniques have entered the public\u2019s daily life and achieved great success in supporting human driving performance. However, due to the high contextual variations and temporal dynamics in pedestrian behaviors, the interaction between autonomous-driving cars and pedestrians remains challenging, impeding the development of fully autonomous driving systems. This paper focuses on predicting pedestrian intention with a novel transformer-based evidential prediction (TrEP) algorithm. We develop a transformer module towards the temporal correlations among the input features within pedestrian video sequences and a deep evidential learning model to capture the AI uncertainty under scene complexities. Experimental results on three popular pedestrian intent benchmarks have verified the effectiveness of our proposed model over the state-of-the-art. The algorithm performance can be further boosted by controlling the uncertainty level. We systematically compare human disagreements with AI uncertainty to further evaluate AI performance in confusing scenes. The code is released at https://github.com/zzmonlyyou/TrEP.git.", + "primary_area": "computer vision iii", + "author": "Zhengming Zhang; Renran Tian; Zhengming Ding", + "authorids": "", + "aff": "School of Industrial Engineering, Purdue University, West Lafayette, Indiana, USA; Department of Computer Information Technology, Indiana University Purdue University Indianapolis, Indiana, USA; Department of Computer Science, Tulane University, New Orleans, Louisiana, USA", + "bibtex": "@article{Zhang_Tian_Ding_2023, title={TrEP: Transformer-Based Evidential Prediction for Pedestrian Intention with Uncertainty}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25463}, DOI={10.1609/aaai.v37i3.25463}, abstractNote={With rapid development in hardware (sensors and processors) and AI algorithms, automated driving techniques have entered the public\u2019s daily life and achieved great success in supporting human driving performance. However, due to the high contextual variations and temporal dynamics in pedestrian behaviors, the interaction between autonomous-driving cars and pedestrians remains challenging, impeding the development of fully autonomous driving systems. This paper focuses on predicting pedestrian intention with a novel transformer-based evidential prediction (TrEP) algorithm. We develop a transformer module towards the temporal correlations among the input features within pedestrian video sequences and a deep evidential learning model to capture the AI uncertainty under scene complexities. Experimental results on three popular pedestrian intent benchmarks have verified the effectiveness of our proposed model over the state-of-the-art. The algorithm performance can be further boosted by controlling the uncertainty level. We systematically compare human disagreements with AI uncertainty to further evaluate AI performance in confusing scenes. The code is released at https://github.com/zzmonlyyou/TrEP.git.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhengming and Tian, Renran and Ding, Zhengming}, year={2023}, month={Jun.}, pages={3534-3542} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25463/25235", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25463", + "pdf_size": 7072662, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=420834740100151130&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "purdue.edu;iupui.edu;tulane.edu", + "email": "purdue.edu;iupui.edu;tulane.edu", + "github": "https://github.com/zzmonlyyou/TrEP.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Purdue University;Indiana University Purdue University Indianapolis;Tulane University", + "aff_unique_dep": "School of Industrial Engineering;Department of Computer Information Technology;Department of Computer Science", + "aff_unique_url": "https://www.purdue.edu;https://www.iupui.edu;https://www.tulane.edu", + "aff_unique_abbr": "Purdue;IUPUI;Tulane", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "West Lafayette;Indianapolis;New Orleans", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26538", + "title": "TrOCR: Transformer-Based Optical Character Recognition with Pre-trained Models", + "track": "main", + "status": "Technical", + "abstract": "Text recognition is a long-standing research problem for document digitalization. Existing approaches are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on the printed, handwritten and scene text recognition tasks. The TrOCR models and code are publicly available at https://aka.ms/trocr.", + "primary_area": "speech natural language processing", + "author": "Minghao Li; Tengchao Lv; Jingye Chen; Lei Cui; Yijuan Lu; Dinei Florencio; Cha Zhang; Zhoujun Li; Furu Wei", + "authorids": "", + "aff": "Beihang University+Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Beihang University+Microsoft Corporation; Microsoft Corporation", + "bibtex": "@article{Li_Lv_Chen_Cui_Lu_Florencio_Zhang_Li_Wei_2023, title={TrOCR: Transformer-Based Optical Character Recognition with Pre-trained Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26538}, DOI={10.1609/aaai.v37i11.26538}, abstractNote={Text recognition is a long-standing research problem for document digitalization. Existing approaches are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on the printed, handwritten and scene text recognition tasks. The TrOCR models and code are publicly available at https://aka.ms/trocr.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Minghao and Lv, Tengchao and Chen, Jingye and Cui, Lei and Lu, Yijuan and Florencio, Dinei and Zhang, Cha and Li, Zhoujun and Wei, Furu}, year={2023}, month={Jun.}, pages={13094-13102} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26538/26310", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26538", + "pdf_size": 230790, + "gs_citation": 515, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8094494954357753001&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff_domain": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;buaa.edu.cn;microsoft.com", + "email": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;buaa.edu.cn;microsoft.com", + "github": "", + "project": "https://aka.ms/trocr", + "author_num": 9, + "aff_unique_index": "0+1;1;1;1;1;1;1;0+1;1", + "aff_unique_norm": "Beihang University;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.buaa.edu.cn/;https://www.microsoft.com", + "aff_unique_abbr": "BUAA;Microsoft", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1;1;1;1;1;0+1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25103", + "title": "Tracking and Reconstructing Hand Object Interactions from Point Cloud Sequences in the Wild", + "track": "main", + "status": "Technical", + "abstract": "In this work, we tackle the challenging task of jointly tracking hand object poses and reconstructing their shapes from depth point cloud sequences in the wild, given the initial poses at frame 0. We for the first time propose a point cloud-based hand joint tracking network, HandTrackNet, to estimate the inter-frame hand joint motion. Our HandTrackNet proposes a novel hand pose canonicalization module to ease the tracking task, yielding accurate and robust hand joint tracking. Our pipeline then reconstructs the full hand via converting the predicted hand joints into a MANO hand. For object tracking, we devise a simple yet effective module that estimates the object SDF from the first frame and performs optimization-based tracking. Finally, a joint optimization step is adopted to perform joint hand and object reasoning, which alleviates the occlusion-induced ambiguity and further refines the hand pose. During training, the whole pipeline only sees purely synthetic data, which are synthesized with sufficient variations and by depth simulation for the ease of generalization. The whole pipeline is pertinent to the generalization gaps and thus directly transferable to real in-the-wild data. We evaluate our method on two real hand object interaction datasets, e.g. HO3D and DexYCB, without any fine-tuning. Our experiments demonstrate that the proposed method significantly outperforms the previous state-of-the-art depth-based hand and object pose estimation and tracking methods, running at a frame rate of 9 FPS. We have released our code on https://github.com/PKU-EPIC/HOTrack.", + "primary_area": "computer vision i", + "author": "Jiayi Chen; Mi Yan; Jiazhao Zhang; Yinzhen Xu; Xiaolong Li; Yijia Weng; Li Yi; Shuran Song; He Wang", + "authorids": "", + "aff": "Peking University+Beijing Institute for General AI; Peking University+Beijing Institute for General AI; Peking University; Peking University+Beijing Institute for General AI; Virginia Tech; Stanford University; Tsinghua University; Columbia University; Peking University", + "bibtex": "@article{Chen_Yan_Zhang_Xu_Li_Weng_Yi_Song_Wang_2023, title={Tracking and Reconstructing Hand Object Interactions from Point Cloud Sequences in the Wild}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25103}, DOI={10.1609/aaai.v37i1.25103}, abstractNote={In this work, we tackle the challenging task of jointly tracking hand object poses and reconstructing their shapes from depth point cloud sequences in the wild, given the initial poses at frame 0. We for the first time propose a point cloud-based hand joint tracking network, HandTrackNet, to estimate the inter-frame hand joint motion. Our HandTrackNet proposes a novel hand pose canonicalization module to ease the tracking task, yielding accurate and robust hand joint tracking. Our pipeline then reconstructs the full hand via converting the predicted hand joints into a MANO hand. For object tracking, we devise a simple yet effective module that estimates the object SDF from the first frame and performs optimization-based tracking. Finally, a joint optimization step is adopted to perform joint hand and object reasoning, which alleviates the occlusion-induced ambiguity and further refines the hand pose. During training, the whole pipeline only sees purely synthetic data, which are synthesized with sufficient variations and by depth simulation for the ease of generalization. The whole pipeline is pertinent to the generalization gaps and thus directly transferable to real in-the-wild data. We evaluate our method on two real hand object interaction datasets, e.g. HO3D and DexYCB, without any fine-tuning. Our experiments demonstrate that the proposed method significantly outperforms the previous state-of-the-art depth-based hand and object pose estimation and tracking methods, running at a frame rate of 9 FPS. We have released our code on https://github.com/PKU-EPIC/HOTrack.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Jiayi and Yan, Mi and Zhang, Jiazhao and Xu, Yinzhen and Li, Xiaolong and Weng, Yijia and Yi, Li and Song, Shuran and Wang, He}, year={2023}, month={Jun.}, pages={304-312} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25103/24875", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25103", + "pdf_size": 11389483, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11586745514239637591&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff_domain": "pku.edu.cn;pku.edu.cn;gmail.com;pku.edu.cn;vt.edu;gmail.com;gmail.com;cs.columbia.edu;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;gmail.com;pku.edu.cn;vt.edu;gmail.com;gmail.com;cs.columbia.edu;pku.edu.cn", + "github": "https://github.com/PKU-EPIC/HOTrack", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0+1;0;0+1;2;3;4;5;0", + "aff_unique_norm": "Peking University;Beijing Institute for General AI;Virginia Tech;Stanford University;Tsinghua University;Columbia University", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "http://www.pku.edu.cn;http://www general-ai.cn;https://www.vt.edu;https://www.stanford.edu;https://www.tsinghua.edu.cn;https://www.columbia.edu", + "aff_unique_abbr": "Peking U;;VT;Stanford;THU;Columbia", + "aff_campus_unique_index": ";;;1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0+0;0+0;0;0+0;1;1;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25980", + "title": "Trafformer: Unify Time and Space in Traffic Prediction", + "track": "main", + "status": "Technical", + "abstract": "Traffic prediction is an important component of the intelligent transportation system. Existing deep learning methods encode temporal information and spatial information separately or iteratively. However, the spatial and temporal information is highly correlated in a traffic network, so existing methods may not learn the complex spatial-temporal dependencies hidden in the traffic network due to the decomposed model design. To overcome this limitation, we propose a new model named Trafformer, which unifies spatial and temporal information in one transformer-style model. Trafformer enables every node at every timestamp interact with every other node in every other timestamp in just one step in the spatial-temporal correlation matrix. This design enables Trafformer to catch complex spatial-temporal dependencies. Following the same design principle, we use the generative style decoder to predict multiple timestamps in only one forward operation instead of the iterative style decoder in Transformer. Furthermore, to reduce the complexity brought about by the huge spatial-temporal self-attention matrix, we also propose two variants of Trafformer to further improve the training and inference speed without losing much effectivity. Extensive experiments on two traffic datasets demonstrate that Trafformer outperforms existing methods and provides a promising future direction for the spatial-temporal traffic prediction problem.", + "primary_area": "machine learning ii", + "author": "Di Jin; Jiayi Shi; Rui Wang; Yawen Li; Yuxiao Huang; Yu-Bin Yang", + "authorids": "", + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China; College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China; College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China; School of Economics and Management, Beijing University of Posts and Telecommunications, Beijing, P.R. China; Columbian College of Arts Sciences, George Washington University, Washington, D.C., USA; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, P.R. China", + "bibtex": "@article{Jin_Shi_Wang_Li_Huang_Yang_2023, title={Trafformer: Unify Time and Space in Traffic Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25980}, DOI={10.1609/aaai.v37i7.25980}, abstractNote={Traffic prediction is an important component of the intelligent transportation system. Existing deep learning methods encode temporal information and spatial information separately or iteratively. However, the spatial and temporal information is highly correlated in a traffic network, so existing methods may not learn the complex spatial-temporal dependencies hidden in the traffic network due to the decomposed model design. To overcome this limitation, we propose a new model named Trafformer, which unifies spatial and temporal information in one transformer-style model. Trafformer enables every node at every timestamp interact with every other node in every other timestamp in just one step in the spatial-temporal correlation matrix. This design enables Trafformer to catch complex spatial-temporal dependencies. Following the same design principle, we use the generative style decoder to predict multiple timestamps in only one forward operation instead of the iterative style decoder in Transformer. Furthermore, to reduce the complexity brought about by the huge spatial-temporal self-attention matrix, we also propose two variants of Trafformer to further improve the training and inference speed without losing much effectivity. Extensive experiments on two traffic datasets demonstrate that Trafformer outperforms existing methods and provides a promising future direction for the spatial-temporal traffic prediction problem.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jin, Di and Shi, Jiayi and Wang, Rui and Li, Yawen and Huang, Yuxiao and Yang, Yu-Bin}, year={2023}, month={Jun.}, pages={8114-8122} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25980/25752", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25980", + "pdf_size": 611569, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11495294153274553387&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;bupt.edu.cn;gwu.edu;nju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;bupt.edu.cn;gwu.edu;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;3", + "aff_unique_norm": "Tianjin University;Beijing University of Posts and Telecommunications;George Washington University;Nanjing University", + "aff_unique_dep": "College of Intelligence and Computing;School of Economics and Management;Columbian College of Arts and Sciences;State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.tju.edu.cn;http://www.bupt.edu.cn;https://www.gwu.edu;http://www.nju.edu.cn", + "aff_unique_abbr": "Tianjin University;BUPT;GWU;Nanjing U", + "aff_campus_unique_index": "0;0;0;1;2;3", + "aff_campus_unique": "Tianjin;Beijing;Washington, D.C.;Nanjing", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26139", + "title": "Training Meta-Surrogate Model for Transferable Adversarial Attack", + "track": "main", + "status": "Technical", + "abstract": "The problem of adversarial attacks to a black-box model when no queries are allowed has posed a great challenge to the community and has been extensively investigated. In this setting, one simple yet effective method is to transfer the obtained adversarial examples from attacking surrogate models to fool the target model. Previous works have studied what kind of attacks to the surrogate model can generate more transferable adversarial examples, but their performances are still limited due to the mismatches between surrogate models and the target model. In this paper, we tackle this problem from a novel angle---instead of using the original surrogate models, can we obtain a Meta-Surrogate Model (MSM) such that attacks to this model can be easily transferred to other models? We show that this goal can be mathematically formulated as a bi-level optimization problem and design a differentiable attacker to make training feasible. Given one or a set of surrogate models, our method can thus obtain an MSM such that adversarial examples generated on MSM enjoy eximious transferability. Comprehensive experiments on Cifar-10 and ImageNet demonstrate that by attacking the MSM, we can obtain stronger transferable adversarial examples to deceive black-box models including adversarially trained ones, with much higher success rates than existing methods.", + "primary_area": "machine learning iii", + "author": "Yunxiao Qin; Yuanhao Xiong; Jinfeng Yi; Cho-Jui Hsieh", + "authorids": "", + "aff": "State Key Laboratory of Media Convergence and Communication, Communication University of China, Beijing, China+Neuroscience and Intelligent Media Institute, Communication University of China, Beijing, China; University of California, Los Angeles, USA; JD AI Research, Beijing, China; University of California, Los Angeles, USA", + "bibtex": "@article{Qin_Xiong_Yi_Hsieh_2023, title={Training Meta-Surrogate Model for Transferable Adversarial Attack}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26139}, DOI={10.1609/aaai.v37i8.26139}, abstractNote={The problem of adversarial attacks to a black-box model when no queries are allowed has posed a great challenge to the community and has been extensively investigated. In this setting, one simple yet effective method is to transfer the obtained adversarial examples from attacking surrogate models to fool the target model. Previous works have studied what kind of attacks to the surrogate model can generate more transferable adversarial examples, but their performances are still limited due to the mismatches between surrogate models and the target model. In this paper, we tackle this problem from a novel angle---instead of using the original surrogate models, can we obtain a Meta-Surrogate Model (MSM) such that attacks to this model can be easily transferred to other models? We show that this goal can be mathematically formulated as a bi-level optimization problem and design a differentiable attacker to make training feasible. Given one or a set of surrogate models, our method can thus obtain an MSM such that adversarial examples generated on MSM enjoy eximious transferability. Comprehensive experiments on Cifar-10 and ImageNet demonstrate that by attacking the MSM, we can obtain stronger transferable adversarial examples to deceive black-box models including adversarially trained ones, with much higher success rates than existing methods.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Yunxiao and Xiong, Yuanhao and Yi, Jinfeng and Hsieh, Cho-Jui}, year={2023}, month={Jun.}, pages={9516-9524} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26139/25911", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26139", + "pdf_size": 301347, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10812596193748787198&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cuc.edu.cn;gmail.com;cs.ucla.edu;cs.ucla.edu", + "email": "cuc.edu.cn;gmail.com;cs.ucla.edu;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;1;2;1", + "aff_unique_norm": "Communication University of China;University of California, Los Angeles;JD AI Research", + "aff_unique_dep": "State Key Laboratory of Media Convergence and Communication;;", + "aff_unique_url": "http://www.cuc.edu.cn/;https://www.ucla.edu;", + "aff_unique_abbr": "CUC;UCLA;", + "aff_campus_unique_index": "0+0;1;0;1", + "aff_campus_unique": "Beijing;Los Angeles", + "aff_country_unique_index": "0+0;1;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26198", + "title": "Training-Time Attacks against K-nearest Neighbors", + "track": "main", + "status": "Technical", + "abstract": "Nearest neighbor-based methods are commonly used for classification tasks and as subroutines of other data-analysis methods. \nAn attacker with the capability of inserting their own data points into the training set can manipulate the inferred nearest neighbor structure.\nWe distill this goal to the task of performing a training-set data insertion attack against k-Nearest Neighbor classification (kNN).\nWe prove that computing an optimal training-time (a.k.a. poisoning) attack against kNN classification is NP-Hard, even when k = 1 and the attacker can insert only a single data point.\nWe provide an anytime algorithm to perform such an attack, and a greedy algorithm for general k and attacker budget.\nWe provide theoretical bounds and empirically demonstrate the effectiveness and practicality of our methods on synthetic and real-world datasets.\nEmpirically, we find that kNN is vulnerable in practice and that dimensionality reduction is an effective defense.\nWe conclude with a discussion of open problems illuminated by our analysis.", + "primary_area": "machine learning iii", + "author": "Ara Vartanian; Will Rosenbaum; Scott Alfeld", + "authorids": "", + "aff": "University of Wisconsin\u2013Madison; Amherst College; Amherst College", + "bibtex": "@article{Vartanian_Rosenbaum_Alfeld_2023, title={Training-Time Attacks against K-nearest Neighbors}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26198}, DOI={10.1609/aaai.v37i8.26198}, abstractNote={Nearest neighbor-based methods are commonly used for classification tasks and as subroutines of other data-analysis methods. An attacker with the capability of inserting their own data points into the training set can manipulate the inferred nearest neighbor structure.\nWe distill this goal to the task of performing a training-set data insertion attack against k-Nearest Neighbor classification (kNN).\nWe prove that computing an optimal training-time (a.k.a. poisoning) attack against kNN classification is NP-Hard, even when k = 1 and the attacker can insert only a single data point.\nWe provide an anytime algorithm to perform such an attack, and a greedy algorithm for general k and attacker budget.\nWe provide theoretical bounds and empirically demonstrate the effectiveness and practicality of our methods on synthetic and real-world datasets.\nEmpirically, we find that kNN is vulnerable in practice and that dimensionality reduction is an effective defense.\nWe conclude with a discussion of open problems illuminated by our analysis.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Vartanian, Ara and Rosenbaum, Will and Alfeld, Scott}, year={2023}, month={Jun.}, pages={10053-10060} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26198/25970", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26198", + "pdf_size": 423418, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16345228224170278319&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "cs.wisc.edu;amherst.edu;amherst.edu", + "email": "cs.wisc.edu;amherst.edu;amherst.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University of Wisconsin\u2013Madison;Amherst College", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.wisc.edu;https://www.amherst.edu", + "aff_unique_abbr": "UW\u2013Madison;Amherst", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Madison;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25256", + "title": "TransLO: A Window-Based Masked Point Transformer Framework for Large-Scale LiDAR Odometry", + "track": "main", + "status": "Technical", + "abstract": "Recently, transformer architecture has gained great success in the computer vision community, such as image classification, object detection, etc. Nonetheless, its application for 3D vision remains to be explored, given that point cloud is inherently sparse, irregular, and unordered. Furthermore, existing point transformer frameworks usually feed raw point cloud of N\u00d73 dimension into transformers, which limits the point processing scale because of their quadratic computational costs to the input size N. In this paper, we rethink the structure of point transformer. Instead of directly applying transformer to points, our network (TransLO) can process tens of thousands of points simultaneously by projecting points onto a 2D surface and then feeding them into a local transformer with linear complexity. Specifically, it is mainly composed of two components: Window-based Masked transformer with Self Attention (WMSA) to capture long-range dependencies; Masked Cross-Frame Attention (MCFA) to associate two frames and predict pose estimation. To deal with the sparsity issue of point cloud, we propose a binary mask to remove invalid and dynamic points. To our knowledge, this is the first transformer-based LiDAR odometry network. The experiment results on the KITTI odometry dataset show that our average rotation and translation RMSE achieves 0.500\u00b0/100m and 0.993% respectively. The performance of our network surpasses all recent learning-based methods and even outperforms LOAM on most evaluation sequences.Codes will be released on https://github.com/IRMVLab/TransLO.", + "primary_area": "computer vision ii", + "author": "Jiuming Liu; Guangming Wang; Chaokang Jiang; Zhe Liu; Hesheng Wang", + "authorids": "", + "aff": "Department of Automation, Key Laboratory of System Control and Information Processing of Ministry of Education, Key Laboratory of Marine Intelligent Equipment and System of Ministry of Education, Shanghai Jiao Tong University; Department of Automation, Key Laboratory of System Control and Information Processing of Ministry of Education, Key Laboratory of Marine Intelligent Equipment and System of Ministry of Education, Shanghai Jiao Tong University; Engineering Research Center of Intelligent Control for Underground Space, Ministry of Education, School of Information and Control Engineering, Advanced Robotics Research Center, China University of Mining and Technology; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, China; Department of Automation, Key Laboratory of System Control and Information Processing of Ministry of Education, Key Laboratory of Marine Intelligent Equipment and System of Ministry of Education, Shanghai Jiao Tong University", + "bibtex": "@article{Liu_Wang_Jiang_Liu_Wang_2023, title={TransLO: A Window-Based Masked Point Transformer Framework for Large-Scale LiDAR Odometry}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25256}, DOI={10.1609/aaai.v37i2.25256}, abstractNote={Recently, transformer architecture has gained great success in the computer vision community, such as image classification, object detection, etc. Nonetheless, its application for 3D vision remains to be explored, given that point cloud is inherently sparse, irregular, and unordered. Furthermore, existing point transformer frameworks usually feed raw point cloud of N\u00d73 dimension into transformers, which limits the point processing scale because of their quadratic computational costs to the input size N. In this paper, we rethink the structure of point transformer. Instead of directly applying transformer to points, our network (TransLO) can process tens of thousands of points simultaneously by projecting points onto a 2D surface and then feeding them into a local transformer with linear complexity. Specifically, it is mainly composed of two components: Window-based Masked transformer with Self Attention (WMSA) to capture long-range dependencies; Masked Cross-Frame Attention (MCFA) to associate two frames and predict pose estimation. To deal with the sparsity issue of point cloud, we propose a binary mask to remove invalid and dynamic points. To our knowledge, this is the first transformer-based LiDAR odometry network. The experiment results on the KITTI odometry dataset show that our average rotation and translation RMSE achieves 0.500\u00b0/100m and 0.993% respectively. The performance of our network surpasses all recent learning-based methods and even outperforms LOAM on most evaluation sequences.Codes will be released on https://github.com/IRMVLab/TransLO.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Jiuming and Wang, Guangming and Jiang, Chaokang and Liu, Zhe and Wang, Hesheng}, year={2023}, month={Jun.}, pages={1683-1691} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25256/25028", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25256", + "pdf_size": 8662599, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8237301031591041062&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;cumt.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;cumt.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "https://github.com/IRMVLab/TransLO", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;China University of Mining and Technology", + "aff_unique_dep": "Department of Automation;School of Information and Control Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn;http://www.cumt.edu.cn/", + "aff_unique_abbr": "SJTU;CUMT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26465", + "title": "TransPath: Learning Heuristics for Grid-Based Pathfinding via Transformers", + "track": "main", + "status": "Technical", + "abstract": "Heuristic search algorithms, e.g. A*, are the commonly used tools for pathfinding on grids, i.e. graphs of regular structure that are widely employed to represent environments in robotics, video games, etc. Instance-independent heuristics for grid graphs, e.g. Manhattan distance, do not take the obstacles into account, and thus the search led by such heuristics performs poorly in obstacle-rich environments. To this end, we suggest learning the instance-dependent heuristic proxies that are supposed to notably increase the efficiency of the search. The first heuristic proxy we suggest to learn is the correction factor, i.e. the ratio between the instance-independent cost-to-go estimate and the perfect one (computed offline at the training phase). Unlike learning the absolute values of the cost-to-go heuristic function, which was known before, learning the correction factor utilizes the knowledge of the instance-independent heuristic. The second heuristic proxy is the path probability, which indicates how likely the grid cell is lying on the shortest path. This heuristic can be employed in the Focal Search framework as the secondary heuristic, allowing us to preserve the guarantees on the bounded sub-optimality of the solution. We learn both suggested heuristics in a supervised fashion with the state-of-the-art neural networks containing attention blocks (transformers). We conduct a thorough empirical evaluation on a comprehensive dataset of planning tasks, showing that the suggested techniques i) reduce the computational effort of the A* up to a factor of 4x while producing the solutions, whose costs exceed those of the optimal solutions by less than 0.3% on average; ii) outperform the competitors, which include the conventional techniques from the heuristic search, i.e. weighted A*, as well as the state-of-the-art learnable planners.\n\nThe project web-page is: https://airi-institute.github.io/TransPath/.", + "primary_area": "search and optimization", + "author": "Daniil Kirilenko; Anton Andreychuk; Aleksandr Panov; Konstantin Yakovlev", + "authorids": "", + "aff": "Federal Research Center for Computer Science and Control of Russian Academy of Sciences; AIRI; Federal Research Center for Computer Science and Control of Russian Academy of Sciences + AIRI; Federal Research Center for Computer Science and Control of Russian Academy of Sciences + AIRI", + "bibtex": "@article{Kirilenko_Andreychuk_Panov_Yakovlev_2023, title={TransPath: Learning Heuristics for Grid-Based Pathfinding via Transformers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26465}, DOI={10.1609/aaai.v37i10.26465}, abstractNote={Heuristic search algorithms, e.g. A*, are the commonly used tools for pathfinding on grids, i.e. graphs of regular structure that are widely employed to represent environments in robotics, video games, etc. Instance-independent heuristics for grid graphs, e.g. Manhattan distance, do not take the obstacles into account, and thus the search led by such heuristics performs poorly in obstacle-rich environments. To this end, we suggest learning the instance-dependent heuristic proxies that are supposed to notably increase the efficiency of the search. The first heuristic proxy we suggest to learn is the correction factor, i.e. the ratio between the instance-independent cost-to-go estimate and the perfect one (computed offline at the training phase). Unlike learning the absolute values of the cost-to-go heuristic function, which was known before, learning the correction factor utilizes the knowledge of the instance-independent heuristic. The second heuristic proxy is the path probability, which indicates how likely the grid cell is lying on the shortest path. This heuristic can be employed in the Focal Search framework as the secondary heuristic, allowing us to preserve the guarantees on the bounded sub-optimality of the solution. We learn both suggested heuristics in a supervised fashion with the state-of-the-art neural networks containing attention blocks (transformers). We conduct a thorough empirical evaluation on a comprehensive dataset of planning tasks, showing that the suggested techniques i) reduce the computational effort of the A* up to a factor of 4x while producing the solutions, whose costs exceed those of the optimal solutions by less than 0.3% on average; ii) outperform the competitors, which include the conventional techniques from the heuristic search, i.e. weighted A*, as well as the state-of-the-art learnable planners. The project web-page is: https://airi-institute.github.io/TransPath/.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kirilenko, Daniil and Andreychuk, Anton and Panov, Aleksandr and Yakovlev, Konstantin}, year={2023}, month={Jun.}, pages={12436-12443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26465/26237", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26465", + "pdf_size": 283036, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1423572318423295749&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;airi.net;airi.net;isa.ru", + "email": "gmail.com;airi.net;airi.net;isa.ru", + "github": "", + "project": "https://airi-institute.github.io/TransPath/", + "author_num": 4, + "aff_unique_index": "0;1;0+1;0+1", + "aff_unique_norm": "Russian Academy of Sciences;Artificial Intelligence Research Institute", + "aff_unique_dep": "Federal Research Center for Computer Science and Control;", + "aff_unique_url": "https://www.ras.ru;https://www.airi.jp", + "aff_unique_abbr": "RAS;AIRI", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0+1;0+1", + "aff_country_unique": "Russia;Japan" + }, + { + "id": "article-25158", + "title": "TransVCL: Attention-Enhanced Video Copy Localization Network with Flexible Supervision", + "track": "main", + "status": "Technical", + "abstract": "Video copy localization aims to precisely localize all the copied segments within a pair of untrimmed videos in video retrieval applications. Previous methods typically start from frame-to-frame similarity matrix generated by cosine similarity between frame-level features of the input video pair, and then detect and refine the boundaries of copied segments on similarity matrix under temporal constraints. In this paper, we propose TransVCL: an attention-enhanced video copy localization network, which is optimized directly from initial frame-level features and trained end-to-end with three main components: a customized Transformer for feature enhancement, a correlation and softmax layer for similarity matrix generation, and a temporal alignment module for copied segments localization. In contrast to previous methods demanding the handcrafted similarity matrix, TransVCL incorporates long-range temporal information between feature sequence pair using self- and cross- attention layers. With the joint design and optimization of three components, the similarity matrix can be learned to present more discriminative copied patterns, leading to significant improvements over previous methods on segment-level labeled datasets (VCSL and VCDB). Besides the state-of-the-art performance in fully supervised setting, the attention architecture facilitates TransVCL to further exploit unlabeled or simply video-level labeled data. Additional experiments of supplementing video-level labeled datasets including SVD and FIVR reveal the high flexibility of TransVCL from full supervision to semi-supervision (with or without video-level annotation). Code is publicly available at https://github.com/transvcl/TransVCL.", + "primary_area": "computer vision i", + "author": "Sifeng He; Yue He; Minlong Lu; Chen Jiang; Xudong Yang; Feng Qian; Xiaobo Zhang; Lei Yang; Jiandong Zhang", + "authorids": "", + "aff": "Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Ant Group; Copyright Protection Center of China", + "bibtex": "@article{He_He_Lu_Jiang_Yang_Qian_Zhang_Yang_Zhang_2023, title={TransVCL: Attention-Enhanced Video Copy Localization Network with Flexible Supervision}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25158}, DOI={10.1609/aaai.v37i1.25158}, abstractNote={Video copy localization aims to precisely localize all the copied segments within a pair of untrimmed videos in video retrieval applications. Previous methods typically start from frame-to-frame similarity matrix generated by cosine similarity between frame-level features of the input video pair, and then detect and refine the boundaries of copied segments on similarity matrix under temporal constraints. In this paper, we propose TransVCL: an attention-enhanced video copy localization network, which is optimized directly from initial frame-level features and trained end-to-end with three main components: a customized Transformer for feature enhancement, a correlation and softmax layer for similarity matrix generation, and a temporal alignment module for copied segments localization. In contrast to previous methods demanding the handcrafted similarity matrix, TransVCL incorporates long-range temporal information between feature sequence pair using self- and cross- attention layers. With the joint design and optimization of three components, the similarity matrix can be learned to present more discriminative copied patterns, leading to significant improvements over previous methods on segment-level labeled datasets (VCSL and VCDB). Besides the state-of-the-art performance in fully supervised setting, the attention architecture facilitates TransVCL to further exploit unlabeled or simply video-level labeled data. Additional experiments of supplementing video-level labeled datasets including SVD and FIVR reveal the high flexibility of TransVCL from full supervision to semi-supervision (with or without video-level annotation). Code is publicly available at https://github.com/transvcl/TransVCL.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Sifeng and He, Yue and Lu, Minlong and Jiang, Chen and Yang, Xudong and Qian, Feng and Zhang, Xiaobo and Yang, Lei and Zhang, Jiandong}, year={2023}, month={Jun.}, pages={799-807} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25158/24930", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25158", + "pdf_size": 7186846, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3230976815159780949&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "antgroup.com; ; ; ; ;antgroup.com; ; ; ", + "email": "antgroup.com; ; ; ; ;antgroup.com; ; ; ", + "github": "https://github.com/transvcl/TransVCL", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;1", + "aff_unique_norm": "Ant Group;Copyright Protection Center of China", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.antgroup.com;", + "aff_unique_abbr": "Ant Group;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26262", + "title": "Transfer Learning Enhanced DeepONet for Long-Time Prediction of Evolution Equations", + "track": "main", + "status": "Technical", + "abstract": "Deep operator network (DeepONet) has demonstrated great\nsuccess in various learning tasks, including learning solution\noperators of partial differential equations. In particular, it provides\n an efficient approach to predicting the evolution equations\nin a finite time horizon. Nevertheless, the vanilla DeepONet\nsuffers from the issue of stability degradation in the long-\ntime prediction. This paper proposes a transfer-learning aided\nDeepONet to enhance the stability. Our idea is to use transfer\nlearning to sequentially update the DeepONets as the surro-\ngates for propagators learned in different time frames. The\nevolving DeepONets can better track the varying complexities\nof the evolution equations, while only need to be updated by\nefficient training of a tiny fraction of the operator networks.\nThrough systematic experiments, we show that the proposed\nmethod not only improves the long-time accuracy of Deep-\nONet while maintaining similar computational cost but also\nsubstantially reduces the sample size of the training set.", + "primary_area": "machine learning iv", + "author": "Wuzhe Xu; Yulong Lu; Li Wang", + "authorids": "", + "aff": "School of Mathematics, University of Minnesota; Department of Mathematics and Statistics, University of Massachusetts Amherst; School of Mathematics, University of Minnesota", + "bibtex": "@article{Xu_Lu_Wang_2023, title={Transfer Learning Enhanced DeepONet for Long-Time Prediction of Evolution Equations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26262}, DOI={10.1609/aaai.v37i9.26262}, abstractNote={Deep operator network (DeepONet) has demonstrated great\nsuccess in various learning tasks, including learning solution\noperators of partial differential equations. In particular, it provides an efficient approach to predicting the evolution equations\nin a finite time horizon. Nevertheless, the vanilla DeepONet\nsuffers from the issue of stability degradation in the long-\ntime prediction. This paper proposes a transfer-learning aided\nDeepONet to enhance the stability. Our idea is to use transfer\nlearning to sequentially update the DeepONets as the surro-\ngates for propagators learned in different time frames. The\nevolving DeepONets can better track the varying complexities\nof the evolution equations, while only need to be updated by\nefficient training of a tiny fraction of the operator networks.\nThrough systematic experiments, we show that the proposed\nmethod not only improves the long-time accuracy of Deep-\nONet while maintaining similar computational cost but also\nsubstantially reduces the sample size of the training set.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Wuzhe and Lu, Yulong and Wang, Li}, year={2023}, month={Jun.}, pages={10629-10636} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26262/26034", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26262", + "pdf_size": 2716062, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=418165833854573840&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "umn.edu;umass.edu;umn.edu", + "email": "umn.edu;umass.edu;umn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Minnesota;University of Massachusetts Amherst", + "aff_unique_dep": "School of Mathematics;Department of Mathematics and Statistics", + "aff_unique_url": "https://www.math.umn.edu;https://www.umass.edu", + "aff_unique_abbr": "UMN;UMass Amherst", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Minneapolis;Amherst", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26632", + "title": "Transferable Post-hoc Calibration on Pretrained Transformers in Noisy Text Classification", + "track": "main", + "status": "Technical", + "abstract": "Recent work has demonstrated that pretrained transformers are overconfident in text classification tasks, which can be calibrated by the famous post-hoc calibration method temperature scaling (TS). Character or word spelling mistakes are frequently encountered in real applications and greatly threaten transformer model safety. Research on calibration under noisy settings is rare, and we focus on this direction. Based on a toy experiment, we discover that TS performs poorly when the datasets are perturbed by slight noise, such as swapping the characters, which results in distribution shift. We further utilize two metrics, predictive uncertainty and maximum mean discrepancy (MMD), to measure the distribution shift between clean and noisy datasets, based on which we propose a simple yet effective transferable TS method for calibrating models dynamically. To evaluate the performance of the proposed methods under noisy settings, we construct a benchmark consisting of four noise types and five shift intensities based on the QNLI, AG-News, and Emotion tasks. Experimental results on the noisy benchmark show that (1) the metrics are effective in measuring distribution shift and (2) transferable TS can significantly decrease the expected calibration error (ECE) compared with the competitive baseline ensemble TS by approximately 46.09%.", + "primary_area": "speech natural language processing", + "author": "Jun Zhang; Wen Yao; Xiaoqian Chen; Ling Feng", + "authorids": "", + "aff": "Tsinghua University+National Innovation Institute of Defense Technology, Chinese Academy of Military Science; National Innovation Institute of Defense Technology, Chinese Academy of Military Science; National Innovation Institute of Defense Technology, Chinese Academy of Military Science; Tsinghua University", + "bibtex": "@article{Zhang_Yao_Chen_Feng_2023, title={Transferable Post-hoc Calibration on Pretrained Transformers in Noisy Text Classification}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26632}, DOI={10.1609/aaai.v37i11.26632}, abstractNote={Recent work has demonstrated that pretrained transformers are overconfident in text classification tasks, which can be calibrated by the famous post-hoc calibration method temperature scaling (TS). Character or word spelling mistakes are frequently encountered in real applications and greatly threaten transformer model safety. Research on calibration under noisy settings is rare, and we focus on this direction. Based on a toy experiment, we discover that TS performs poorly when the datasets are perturbed by slight noise, such as swapping the characters, which results in distribution shift. We further utilize two metrics, predictive uncertainty and maximum mean discrepancy (MMD), to measure the distribution shift between clean and noisy datasets, based on which we propose a simple yet effective transferable TS method for calibrating models dynamically. To evaluate the performance of the proposed methods under noisy settings, we construct a benchmark consisting of four noise types and five shift intensities based on the QNLI, AG-News, and Emotion tasks. Experimental results on the noisy benchmark show that (1) the metrics are effective in measuring distribution shift and (2) transferable TS can significantly decrease the expected calibration error (ECE) compared with the competitive baseline ensemble TS by approximately 46.09%.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jun and Yao, Wen and Chen, Xiaoqian and Feng, Ling}, year={2023}, month={Jun.}, pages={13940-13948} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26632/26404", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26632", + "pdf_size": 861310, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17946317384657302080&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff_domain": "mails.tsinghua.edu.cn;126.com;nudt.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;126.com;nudt.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0", + "aff_unique_norm": "Tsinghua University;National Innovation Institute of Defense Technology", + "aff_unique_dep": ";Chinese Academy of Military Science", + "aff_unique_url": "https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "THU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25380", + "title": "Transformation-Equivariant 3D Object Detection for Autonomous Driving", + "track": "main", + "status": "Technical", + "abstract": "3D object detection received increasing attention in autonomous driving recently. Objects in 3D scenes are distributed with diverse orientations. Ordinary detectors do not explicitly model the variations of rotation and reflection transformations. Consequently, large networks and extensive data augmentation are required for robust detection. Recent equivariant networks explicitly model the transformation variations by applying shared networks on multiple transformed point clouds, showing great potential in object geometry modeling. However, it is difficult to apply such networks to 3D object detection in autonomous driving due to its large computation cost and slow reasoning speed. In this work, we present TED, an efficient Transformation-Equivariant 3D Detector to overcome the computation cost and speed issues. TED first applies a sparse convolution backbone to extract multi-channel transformation-equivariant voxel features; and then aligns and aggregates these equivariant features into lightweight and compact representations for high-performance 3D object detection. On the highly competitive KITTI 3D car detection leaderboard, TED ranked 1st among all submissions with competitive efficiency. Code is available at https://github.com/hailanyi/TED.", + "primary_area": "computer vision iii", + "author": "Hai Wu; Chenglu Wen; Wei Li; Xin Li; Ruigang Yang; Cheng Wang", + "authorids": "", + "aff": "School of Informatics, Xiamen University+Inceptio Technology; School of Informatics, Xiamen University; Inceptio Technology; School of Performance, Visualization, and Fine Art, Texas A&M University; Inceptio Technology; School of Informatics, Xiamen University", + "bibtex": "@article{Wu_Wen_Li_Li_Yang_Wang_2023, title={Transformation-Equivariant 3D Object Detection for Autonomous Driving}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25380}, DOI={10.1609/aaai.v37i3.25380}, abstractNote={3D object detection received increasing attention in autonomous driving recently. Objects in 3D scenes are distributed with diverse orientations. Ordinary detectors do not explicitly model the variations of rotation and reflection transformations. Consequently, large networks and extensive data augmentation are required for robust detection. Recent equivariant networks explicitly model the transformation variations by applying shared networks on multiple transformed point clouds, showing great potential in object geometry modeling. However, it is difficult to apply such networks to 3D object detection in autonomous driving due to its large computation cost and slow reasoning speed. In this work, we present TED, an efficient Transformation-Equivariant 3D Detector to overcome the computation cost and speed issues. TED first applies a sparse convolution backbone to extract multi-channel transformation-equivariant voxel features; and then aligns and aggregates these equivariant features into lightweight and compact representations for high-performance 3D object detection. On the highly competitive KITTI 3D car detection leaderboard, TED ranked 1st among all submissions with competitive efficiency. Code is available at https://github.com/hailanyi/TED.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Hai and Wen, Chenglu and Li, Wei and Li, Xin and Yang, Ruigang and Wang, Cheng}, year={2023}, month={Jun.}, pages={2795-2802} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25380/25152", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25380", + "pdf_size": 808579, + "gs_citation": 121, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8570763343209677965&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;gmail.com;tamu.edu;inceptio.ai;xmu.edu.cn", + "email": "stu.xmu.edu.cn;xmu.edu.cn;gmail.com;tamu.edu;inceptio.ai;xmu.edu.cn", + "github": "https://github.com/hailanyi/TED", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;1;2;1;0", + "aff_unique_norm": "Xiamen University;Inceptio Technology;Texas A&M University", + "aff_unique_dep": "School of Informatics;;School of Performance, Visualization, and Fine Art", + "aff_unique_url": "https://www.xmu.edu.cn;;https://tamu.edu", + "aff_unique_abbr": "XMU;;TAMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;2;0", + "aff_country_unique": "China;;United States" + }, + { + "id": "article-26963", + "title": "Transformer-Based Multi-Hop Question Generation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Question generation is the parallel task of question answering, where given an input context and, optionally, an answer, the goal is to generate a relevant and fluent natural language question. Although recent works on question generation have experienced success by utilizing sequence-to-sequence models, there is a need for question generation models to handle increasingly complex input contexts to produce increasingly detailed questions. Multi-hop question generation is a more challenging task that aims to generate questions by connecting multiple facts from multiple input contexts. In this work, we apply a transformer model to the task of multi-hop question generation without utilizing any sentence-level supporting fact information. We utilize concepts that have proven effective in single-hop question generation, including a copy mechanism and placeholder tokens. We evaluate our model\u2019s performance on the HotpotQA dataset using automated evaluation metrics, including BLEU, ROUGE and METEOR and show an improvement over the previous work.", + "primary_area": "", + "author": "John Emerson; Yllias Chali", + "authorids": "", + "aff": "University of Lethbridge; University of Lethbridge", + "bibtex": "@article{Emerson_Chali_2024, title={Transformer-Based Multi-Hop Question Generation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26963}, DOI={10.1609/aaai.v37i13.26963}, abstractNote={Question generation is the parallel task of question answering, where given an input context and, optionally, an answer, the goal is to generate a relevant and fluent natural language question. Although recent works on question generation have experienced success by utilizing sequence-to-sequence models, there is a need for question generation models to handle increasingly complex input contexts to produce increasingly detailed questions. Multi-hop question generation is a more challenging task that aims to generate questions by connecting multiple facts from multiple input contexts. In this work, we apply a transformer model to the task of multi-hop question generation without utilizing any sentence-level supporting fact information. We utilize concepts that have proven effective in single-hop question generation, including a copy mechanism and placeholder tokens. We evaluate our model\u2019s performance on the HotpotQA dataset using automated evaluation metrics, including BLEU, ROUGE and METEOR and show an improvement over the previous work.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Emerson, John and Chali, Yllias}, year={2024}, month={Jul.}, pages={16206-16207} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26963/26735", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26963", + "pdf_size": 71649, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9098929781321321234&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "uleth.ca;uleth.ca", + "email": "uleth.ca;uleth.ca", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Lethbridge", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uleth.ca", + "aff_unique_abbr": "U of L", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26958", + "title": "Transformer-Based Named Entity Recognition for French Using Adversarial Adaptation to Similar Domain Corpora (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Named Entity Recognition (NER) involves the identification and classification of named entities in unstructured text into predefined classes. NER in languages with limited resources, like French, is still an open problem due to the lack of large, robust, labelled datasets. In this paper, we propose a transformer-based NER approach for French using adversarial adaptation to similar domain or general corpora for improved feature extraction and better generalization. We evaluate our approach on three labelled datasets and show that our adaptation framework outperforms the corresponding non-adaptive models for various combinations of transformer models, source datasets and target corpora.", + "primary_area": "", + "author": "Arjun Choudhry; Pankaj Gupta; Inder Khatri; Aaryan Gupta; Maxime Nicol; Marie-Jean Meurs; Dinesh Kumar Vishwakarma", + "authorids": "", + "aff": "Biometric Research Laboratory, Delhi Technological University, New Delhi, India+IKB Lab, Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Montr \u00b4eal, QC, Canada; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; Biometric Research Laboratory, Delhi Technological University, New Delhi, India; IKB Lab, Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Montr \u00b4eal, QC, Canada; IKB Lab, Universit \u00b4e du Qu \u00b4ebec `a Montr \u00b4eal, Montr \u00b4eal, QC, Canada; Biometric Research Laboratory, Delhi Technological University, New Delhi, India", + "bibtex": "@article{Choudhry_Gupta_Khatri_Gupta_Nicol_Meurs_Vishwakarma_2024, title={Transformer-Based Named Entity Recognition for French Using Adversarial Adaptation to Similar Domain Corpora (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26958}, DOI={10.1609/aaai.v37i13.26958}, abstractNote={Named Entity Recognition (NER) involves the identification and classification of named entities in unstructured text into predefined classes. NER in languages with limited resources, like French, is still an open problem due to the lack of large, robust, labelled datasets. In this paper, we propose a transformer-based NER approach for French using adversarial adaptation to similar domain or general corpora for improved feature extraction and better generalization. We evaluate our approach on three labelled datasets and show that our adaptation framework outperforms the corresponding non-adaptive models for various combinations of transformer models, source datasets and target corpora.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Choudhry, Arjun and Gupta, Pankaj and Khatri, Inder and Gupta, Aaryan and Nicol, Maxime and Meurs, Marie-Jean and Vishwakarma, Dinesh Kumar}, year={2024}, month={Jul.}, pages={16196-16197} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26958/26730", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26958", + "pdf_size": 115678, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3764676092014994718&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;courrier.uqam.ca;uqam.ca;dtu.ac.in", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;courrier.uqam.ca;uqam.ca;dtu.ac.in", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;0;1;1;0", + "aff_unique_norm": "Delhi Technological University;Universit\u00e9 du Qu\u00e9bec \u00e0 Montr\u00e9al", + "aff_unique_dep": "Biometric Research Laboratory;IKB Lab", + "aff_unique_url": "https://www.dtu.ac.in;https://www.uqam.ca", + "aff_unique_abbr": "DTU;UQAM", + "aff_campus_unique_index": "0+1;0;0;0;1;1;0", + "aff_campus_unique": "New Delhi;Montr\u00e9al", + "aff_country_unique_index": "0+1;0;0;0;1;1;0", + "aff_country_unique": "India;Canada" + }, + { + "id": "article-25822", + "title": "Tree Learning: Optimal Sample Complexity and Algorithms", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of learning a hierarchical tree representation of data from labeled samples, taken from an arbitrary (and possibly adversarial) distribution. Consider a collection of data tuples labeled according to their hierarchical structure. The smallest number of such tuples required in order to be able to accurately label subsequent tuples is of interest for data collection in machine learning. We present optimal sample complexity bounds for this problem in several learning settings, including (agnostic) PAC learning and online learning. Our results are based on tight bounds of the Natarajan and Littlestone dimensions of the associated problem. The corresponding tree classifiers can be constructed efficiently in near-linear time.", + "primary_area": "machine learning i", + "author": "Dmitrii Avdiukhin; Grigory Yaroslavtsev; Danny Vainstein; Orr Fischer; Sauman Das; Faraz Mirza", + "authorids": "", + "aff": "Indiana University, Department of Computer Science; George Mason University, Department of Computer Science; Tel-Aviv University, Blavatnik School of Computer Science; Weizmann Institute of Science, Department of Computer Science and Applied Mathematics; Thomas Jefferson High School for Science and Technology; Thomas Jefferson High School for Science and Technology", + "bibtex": "@article{Avdiukhin_Yaroslavtsev_Vainstein_Fischer_Das_Mirza_2023, title={Tree Learning: Optimal Sample Complexity and Algorithms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25822}, DOI={10.1609/aaai.v37i6.25822}, abstractNote={We study the problem of learning a hierarchical tree representation of data from labeled samples, taken from an arbitrary (and possibly adversarial) distribution. Consider a collection of data tuples labeled according to their hierarchical structure. The smallest number of such tuples required in order to be able to accurately label subsequent tuples is of interest for data collection in machine learning. We present optimal sample complexity bounds for this problem in several learning settings, including (agnostic) PAC learning and online learning. Our results are based on tight bounds of the Natarajan and Littlestone dimensions of the associated problem. The corresponding tree classifiers can be constructed efficiently in near-linear time.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Avdiukhin, Dmitrii and Yaroslavtsev, Grigory and Vainstein, Danny and Fischer, Orr and Das, Sauman and Mirza, Faraz}, year={2023}, month={Jun.}, pages={6701-6708} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25822/25594", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25822", + "pdf_size": 308163, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=170014101610840542&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "iu.edu;grigory.us;gmail.com;weizmann.ac.il;tjhsst.edu;tjhsst.edu", + "email": "iu.edu;grigory.us;gmail.com;weizmann.ac.il;tjhsst.edu;tjhsst.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;4", + "aff_unique_norm": "Indiana University;George Mason University;Tel-Aviv University;Weizmann Institute of Science;Thomas Jefferson High School for Science and Technology", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Blavatnik School of Computer Science;Department of Computer Science and Applied Mathematics;", + "aff_unique_url": "https://www.indiana.edu;https://www.gmu.edu;https://www.tau.ac.il;https://www.weizmann.ac.il;https://www.tjhsst.edu/", + "aff_unique_abbr": "IU;GMU;TAU;Weizmann;TJHSST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;0;0", + "aff_country_unique": "United States;Israel" + }, + { + "id": "article-25494", + "title": "Tree-Structured Trajectory Encoding for Vision-and-Language Navigation", + "track": "main", + "status": "Technical", + "abstract": "Over the past few years, the research on vision-and-language navigation (VLN) has made tremendous progress. Many previous works attempted to improve the performance from different aspects like training strategy, data augmentation, pre-training, etc. This work focuses on a rarely-explored aspect in VLN, namely the trajectory organization and encoding during the navigation. Most of existing state-of-the-art VLN models adopt a vanilla sequential strategy for encoding the trajectories. Such strategy takes the whole trajectory as a single sequence to estimate the current state, no matter whether the agent moved smoothly or perhaps made mistakes and backtracked in the past. We show that the sequential encoding may largely lose this kind of fine-grained structure in the trajectory, which could hamper the later state estimation and decision making. In order to solve this problem, this work proposes a novel tree-structured trajectory encoding strategy. The whole trajectory is organized as a tree rooted from the starting position, and encoded using our Tree-Transformer module to fully extract the fine-grained historical information. Besides, as the spatial topology could be easily embedded in the trajectory tree, we further design a tree-based action space to allow the agent making long-range error-correction in one decision. We implement the holistic agent based on cross-modal transformer and train it with a newly-proposed Tree-nDTW reward. On the benchmark dataset R2R, our model achieves a surpassing success rate (SR) of 68% on val-unseen and 66% on test. We further conduct extensive ablation studies and analyses to provide more insights for the effectiveness our designs.", + "primary_area": "computer vision iii", + "author": "Xinzhe Zhou; Yadong Mu", + "authorids": "", + "aff": "Wangxuan Institute of Computer Technology, Peking University; Wangxuan Institute of Computer Technology, Peking University + Peng Cheng Laboratory", + "bibtex": "@article{Zhou_Mu_2023, title={Tree-Structured Trajectory Encoding for Vision-and-Language Navigation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25494}, DOI={10.1609/aaai.v37i3.25494}, abstractNote={Over the past few years, the research on vision-and-language navigation (VLN) has made tremendous progress. Many previous works attempted to improve the performance from different aspects like training strategy, data augmentation, pre-training, etc. This work focuses on a rarely-explored aspect in VLN, namely the trajectory organization and encoding during the navigation. Most of existing state-of-the-art VLN models adopt a vanilla sequential strategy for encoding the trajectories. Such strategy takes the whole trajectory as a single sequence to estimate the current state, no matter whether the agent moved smoothly or perhaps made mistakes and backtracked in the past. We show that the sequential encoding may largely lose this kind of fine-grained structure in the trajectory, which could hamper the later state estimation and decision making. In order to solve this problem, this work proposes a novel tree-structured trajectory encoding strategy. The whole trajectory is organized as a tree rooted from the starting position, and encoded using our Tree-Transformer module to fully extract the fine-grained historical information. Besides, as the spatial topology could be easily embedded in the trajectory tree, we further design a tree-based action space to allow the agent making long-range error-correction in one decision. We implement the holistic agent based on cross-modal transformer and train it with a newly-proposed Tree-nDTW reward. On the benchmark dataset R2R, our model achieves a surpassing success rate (SR) of 68% on val-unseen and 66% on test. We further conduct extensive ablation studies and analyses to provide more insights for the effectiveness our designs.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Xinzhe and Mu, Yadong}, year={2023}, month={Jun.}, pages={3814-3824} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25494/25266", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25494", + "pdf_size": 791313, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1276681748403962374&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;", + "aff_unique_url": "http://www.pku.edu.cn;http://www.pcl.ac.cn", + "aff_unique_abbr": "PKU;PCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25882", + "title": "Tricking the Hashing Trick: A Tight Lower Bound on the Robustness of CountSketch to Adaptive Inputs", + "track": "main", + "status": "Technical", + "abstract": "CountSketch and Feature Hashing (the ``hashing trick'') are popular randomized dimensionality reduction methods that support recovery of l2 -heavy hitters and approximate inner products. When the inputs are not adaptive (do not depend on prior outputs), classic estimators applied to a sketch of size O(l / epsilon) are accurate for a number of queries that is exponential in l. When inputs are adaptive, however, an adversarial input can be constructed after O(l) queries with the classic estimator and the best known robust estimator only supports ~O(l^2) queries. In this work we show that this quadratic dependence is in a sense inherent: We design an attack that after O(l^2) queries produces an adversarial input vector whose sketch is highly biased. Our attack uses ``natural'' non-adaptive inputs (only the final adversarial input is chosen adaptively) and universally applies with any correct estimator, including one that is unknown to the attacker. In that, we expose inherent vulnerability of this fundamental method.", + "primary_area": "machine learning i", + "author": "Edith Cohen; Jelani Nelson; Tamas Sarlos; Uri Stemmer", + "authorids": "", + "aff": "Google Research + Tel Aviv University; UC Berkeley + Google Research; Google Research; Tel Aviv University + Google Research", + "bibtex": "@article{Cohen_Nelson_Sarlos_Stemmer_2023, title={Tricking the Hashing Trick: A Tight Lower Bound on the Robustness of CountSketch to Adaptive Inputs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25882}, DOI={10.1609/aaai.v37i6.25882}, abstractNote={CountSketch and Feature Hashing (the ``hashing trick\u2019\u2019) are popular randomized dimensionality reduction methods that support recovery of l2 -heavy hitters and approximate inner products. When the inputs are not adaptive (do not depend on prior outputs), classic estimators applied to a sketch of size O(l / epsilon) are accurate for a number of queries that is exponential in l. When inputs are adaptive, however, an adversarial input can be constructed after O(l) queries with the classic estimator and the best known robust estimator only supports ~O(l^2) queries. In this work we show that this quadratic dependence is in a sense inherent: We design an attack that after O(l^2) queries produces an adversarial input vector whose sketch is highly biased. Our attack uses ``natural\u2019\u2019 non-adaptive inputs (only the final adversarial input is chosen adaptively) and universally applies with any correct estimator, including one that is unknown to the attacker. In that, we expose inherent vulnerability of this fundamental method.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cohen, Edith and Nelson, Jelani and Sarlos, Tamas and Stemmer, Uri}, year={2023}, month={Jun.}, pages={7235-7243} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25882/25654", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25882", + "pdf_size": 212132, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6842095471041054843&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff_domain": "cohenwang.com;alum.mit.edu;google.com;uri.co.il", + "email": "cohenwang.com;alum.mit.edu;google.com;uri.co.il", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+0;0;1+0", + "aff_unique_norm": "Google;Tel Aviv University;University of California, Berkeley", + "aff_unique_dep": "Google Research;;", + "aff_unique_url": "https://research.google;https://www.tau.ac.il;https://www.berkeley.edu", + "aff_unique_abbr": "Google Research;TAU;UC Berkeley", + "aff_campus_unique_index": "0;2+0;0;0", + "aff_campus_unique": "Mountain View;;Berkeley", + "aff_country_unique_index": "0+1;0+0;0;1+0", + "aff_country_unique": "United States;Israel" + }, + { + "id": "article-25375", + "title": "Truncate-Split-Contrast: A Framework for Learning from Mislabeled Videos", + "track": "main", + "status": "Technical", + "abstract": "Learning with noisy label is a classic problem that has been extensively studied for image tasks, but much less for video in the literature. A straightforward migration from images to videos without considering temporal semantics and computational cost is not a sound choice. In this paper, we propose two new strategies for video analysis with noisy labels: 1) a lightweight channel selection method dubbed as Channel Truncation for feature-based label noise detection. This method selects the most discriminative channels to split clean and noisy instances in each category. 2) A novel contrastive strategy dubbed as Noise Contrastive Learning, which constructs the relationship between clean and noisy instances to regularize model training. Experiments on three well-known benchmark datasets for video classification show that our proposed truNcatE-split-contrAsT (NEAT) significantly outperforms the existing baselines. By reducing the dimension to 10% of it, our method achieves over 0.4 noise detection F1-score and 5% classification accuracy improvement on Mini-Kinetics dataset under severe noise (symmetric-80%). Thanks to Noise Contrastive Learning, the average classification accuracy improvement on Mini-Kinetics and Sth-Sth-V1 is over 1.6%.", + "primary_area": "computer vision iii", + "author": "Zixiao Wang; Junwu Weng; Chun Yuan; Jue Wang", + "authorids": "", + "aff": "Tsinghua University; Tencent AI Lab; Tsinghua University; Tencent AI Lab", + "bibtex": "@article{Wang_Weng_Yuan_Wang_2023, title={Truncate-Split-Contrast: A Framework for Learning from Mislabeled Videos}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25375}, DOI={10.1609/aaai.v37i3.25375}, abstractNote={Learning with noisy label is a classic problem that has been extensively studied for image tasks, but much less for video in the literature. A straightforward migration from images to videos without considering temporal semantics and computational cost is not a sound choice. In this paper, we propose two new strategies for video analysis with noisy labels: 1) a lightweight channel selection method dubbed as Channel Truncation for feature-based label noise detection. This method selects the most discriminative channels to split clean and noisy instances in each category. 2) A novel contrastive strategy dubbed as Noise Contrastive Learning, which constructs the relationship between clean and noisy instances to regularize model training. Experiments on three well-known benchmark datasets for video classification show that our proposed truNcatE-split-contrAsT (NEAT) significantly outperforms the existing baselines. By reducing the dimension to 10% of it, our method achieves over 0.4 noise detection F1-score and 5% classification accuracy improvement on Mini-Kinetics dataset under severe noise (symmetric-80%). Thanks to Noise Contrastive Learning, the average classification accuracy improvement on Mini-Kinetics and Sth-Sth-V1 is over 1.6%.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Zixiao and Weng, Junwu and Yuan, Chun and Wang, Jue}, year={2023}, month={Jun.}, pages={2751-2758} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25375/25147", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25375", + "pdf_size": 324308, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15739652325496201919&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.tsinghua.edu.cn;e.ntu.edu.sg;sz.tsinghua.edu.cn;gmail.com", + "email": "mails.tsinghua.edu.cn;e.ntu.edu.sg;sz.tsinghua.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Tsinghua University;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://ai.tencent.com", + "aff_unique_abbr": "THU;Tencent AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26265", + "title": "Trusted Fine-Grained Image Classification through Hierarchical Evidence Fusion", + "track": "main", + "status": "Technical", + "abstract": "Fine-Grained Image Classification (FGIC) aims to classify images into specific subordinate classes of a superclass. Due to insufficient training data and confusing data samples, FGIC may produce uncertain classification results that are untrusted for data applications. In fact, FGIC can be viewed as a hierarchical classification process and the multilayer information facilitates to reduce uncertainty and improve the reliability of FGIC. In this paper, we adopt the evidence theory to measure uncertainty and confidence in hierarchical classification process and propose a trusted FGIC method through fusing multilayer classification evidence. Comparing with the traditional approaches, the trusted FGIC method not only generates accurate classification results but also reduces the uncertainty of fine-grained classification. Specifically, we construct an evidence extractor at each classification layer to extract multilayer (multi-grained) evidence for image classification. To fuse the extracted multi-grained evidence from coarse to fine, we formulate evidence fusion with the Dirichlet hyper probability distribution and thereby hierarchically decompose the evidence of coarse-grained classes into fine-grained classes to enhance the classification performances. The ablation experiments validate that the hierarchical evidence fusion can improve the precision and also reduce the uncertainty of fine-grained classification. The comparison with state-of-the-art FGIC methods shows that our proposed method achieves competitive performances.", + "primary_area": "machine learning iv", + "author": "Zhikang Xu; Xiaodong Yue; Ying Lv; Wei Liu; Zihao Li", + "authorids": "", + "aff": "School of Computer Engineering and Science, Shanghai University, Shanghai, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China + Arti\ufb01cial Intelligence Institute of Shanghai University, Shanghai, China + VLN Lab, NA VI MedTech Co., Ltd. Shanghai, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China; College of Electronics and Information Engineering, Tongji University, Shanghai, China; School of Computer Engineering and Science, Shanghai University, Shanghai, China", + "bibtex": "@article{Xu_Yue_Lv_Liu_Li_2023, title={Trusted Fine-Grained Image Classification through Hierarchical Evidence Fusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26265}, DOI={10.1609/aaai.v37i9.26265}, abstractNote={Fine-Grained Image Classification (FGIC) aims to classify images into specific subordinate classes of a superclass. Due to insufficient training data and confusing data samples, FGIC may produce uncertain classification results that are untrusted for data applications. In fact, FGIC can be viewed as a hierarchical classification process and the multilayer information facilitates to reduce uncertainty and improve the reliability of FGIC. In this paper, we adopt the evidence theory to measure uncertainty and confidence in hierarchical classification process and propose a trusted FGIC method through fusing multilayer classification evidence. Comparing with the traditional approaches, the trusted FGIC method not only generates accurate classification results but also reduces the uncertainty of fine-grained classification. Specifically, we construct an evidence extractor at each classification layer to extract multilayer (multi-grained) evidence for image classification. To fuse the extracted multi-grained evidence from coarse to fine, we formulate evidence fusion with the Dirichlet hyper probability distribution and thereby hierarchically decompose the evidence of coarse-grained classes into fine-grained classes to enhance the classification performances. The ablation experiments validate that the hierarchical evidence fusion can improve the precision and also reduce the uncertainty of fine-grained classification. The comparison with state-of-the-art FGIC methods shows that our proposed method achieves competitive performances.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Zhikang and Yue, Xiaodong and Lv, Ying and Liu, Wei and Li, Zihao}, year={2023}, month={Jun.}, pages={10657-10665} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26265/26037", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26265", + "pdf_size": 1754229, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13974292100611272592&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "shu.edu.cn;shu.edu.cn;shu.edu.cn;tongji.edu.cn;shu.edu.cn", + "email": "shu.edu.cn;shu.edu.cn;shu.edu.cn;tongji.edu.cn;shu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+0+1;0;2;0", + "aff_unique_norm": "Shanghai University;NA VI MedTech Co., Ltd.;Tongji University", + "aff_unique_dep": "School of Computer Engineering and Science;VLN Lab;College of Electronics and Information Engineering", + "aff_unique_url": "https://www.shu.edu.cn;;http://www.tongji.edu.cn", + "aff_unique_abbr": "SHU;;Tongji", + "aff_campus_unique_index": "0;0+0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0+0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26842", + "title": "Trustworthy Residual Vehicle Value Prediction for Auto Finance", + "track": "iaai technical track", + "status": "Technical", + "abstract": "The residual value (RV) of a vehicle refers to its estimated worth at some point in the future. It is a core component in every auto financial product, used to determine the credit lines and the leasing rates. As such, an accurate prediction of RV is critical for the auto finance industry, since it can pose a risk of revenue loss by over-prediction or make the financial product incompetent by under-prediction. Although there are a number of prior studies on training machine learning models on a large amount of used car sales data, we had to cope with real-world operational requirements such as compliance with regulations (i.e. monotonicity of output with respect to a subset of features) and generalization to unseen input (i.e. new and rare car models). In this paper, we describe how we coped with these practical challenges and created value for our business at Hyundai Capital Services, the top auto financial service provider in Korea.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Mihye Kim; Jimyung Choi; Jaehyun Kim; Wooyoung Kim; Yeonung Baek; Gisuk Bang; Kwangwoon Son; Yeonman Ryou; Kee-Eung Kim", + "authorids": "", + "aff": "Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Hyundai Capital Services, Korea; Kim Jaechul Graduate School of AI, KAIST, Korea", + "bibtex": "@article{Kim_Choi_Kim_Kim_Baek_Bang_Son_Ryou_Kim_2024, title={Trustworthy Residual Vehicle Value Prediction for Auto Finance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26842}, DOI={10.1609/aaai.v37i13.26842}, abstractNote={The residual value (RV) of a vehicle refers to its estimated worth at some point in the future. It is a core component in every auto financial product, used to determine the credit lines and the leasing rates. As such, an accurate prediction of RV is critical for the auto finance industry, since it can pose a risk of revenue loss by over-prediction or make the financial product incompetent by under-prediction. Although there are a number of prior studies on training machine learning models on a large amount of used car sales data, we had to cope with real-world operational requirements such as compliance with regulations (i.e. monotonicity of output with respect to a subset of features) and generalization to unseen input (i.e. new and rare car models). In this paper, we describe how we coped with these practical challenges and created value for our business at Hyundai Capital Services, the top auto financial service provider in Korea.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Mihye and Choi, Jimyung and Kim, Jaehyun and Kim, Wooyoung and Baek, Yeonung and Bang, Gisuk and Son, Kwangwoon and Ryou, Yeonman and Kim, Kee-Eung}, year={2024}, month={Jul.}, pages={15537-15544} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26842/26614", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26842", + "pdf_size": 2181873, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12821570107664824266&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff_domain": "hcs.com;hcs.com;hcs.com;hcs.com; ; ; ; ;kaist.edu", + "email": "hcs.com;hcs.com;hcs.com;hcs.com; ; ; ; ;kaist.edu", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;1", + "aff_unique_norm": "Hyundai Capital Services;KAIST", + "aff_unique_dep": ";Kim Jaechul Graduate School of AI", + "aff_unique_url": "https://www.hyundaicapital.com;https://www.kaist.edu", + "aff_unique_abbr": "HCS;KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-25729", + "title": "Truthful Mechanisms for Steiner Tree Problems", + "track": "main", + "status": "Technical", + "abstract": "Consider an undirected graph G=(V,E) model for a communication network, where each edge is owned by a selfish agent, who reports the cost for offering the use of her edge. Note that each edge agent may misreport her own cost for the use of the edge for her own benefit. In such a non-cooperative setting, we aim at designing an approximately truthful mechanism for establishing a Steiner tree, a minimum cost tree spanning over all the terminals. We present a truthful-in-expectation mechanism that achieves the approximation ratio ln 4 + \u03b5 \u2248 1.39, which matches the current best algorithmic ratio for STP.", + "primary_area": "game theory and economic paradigms", + "author": "Jinshan Zhang; Zhengyang Liu; Xiaotie Deng; Jianwei Yin", + "authorids": "", + "aff": "Zhejiang University; Beijing Institute of Technology; Peking University; Zhejiang University", + "bibtex": "@article{Zhang_Liu_Deng_Yin_2023, title={Truthful Mechanisms for Steiner Tree Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25729}, DOI={10.1609/aaai.v37i5.25729}, abstractNote={Consider an undirected graph G=(V,E) model for a communication network, where each edge is owned by a selfish agent, who reports the cost for offering the use of her edge. Note that each edge agent may misreport her own cost for the use of the edge for her own benefit. In such a non-cooperative setting, we aim at designing an approximately truthful mechanism for establishing a Steiner tree, a minimum cost tree spanning over all the terminals. We present a truthful-in-expectation mechanism that achieves the approximation ratio ln 4 + \u03b5 \u2248 1.39, which matches the current best algorithmic ratio for STP.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jinshan and Liu, Zhengyang and Deng, Xiaotie and Yin, Jianwei}, year={2023}, month={Jun.}, pages={5884-5891} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25729/25501", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25729", + "pdf_size": 153082, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12050846478325941878&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "zju.edu.cn;bit.edu.cn;pku.edu.cn;cs.zju.edu.cn", + "email": "zju.edu.cn;bit.edu.cn;pku.edu.cn;cs.zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Zhejiang University;Beijing Institute of Technology;Peking University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zju.edu.cn;http://www.bit.edu.cn/;http://www.pku.edu.cn", + "aff_unique_abbr": "ZJU;BIT;Peking U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25310", + "title": "Two Heads Are Better than One: Image-Point Cloud Network for Depth-Based 3D Hand Pose Estimation", + "track": "main", + "status": "Technical", + "abstract": "Depth images and point clouds are the two most commonly used data representations for depth-based 3D hand pose estimation. Benefiting from the structuring of image data and the inherent inductive biases of the 2D Convolutional Neural Network (CNN), image-based methods are highly efficient and effective. However, treating the depth data as a 2D image inevitably ignores the 3D nature of depth data. Point cloud-based methods can better mine the 3D geometric structure of depth data. However, these methods suffer from the disorder and non-structure of point cloud data, which is computationally inefficient. In this paper, we propose an Image-Point cloud Network (IPNet) for accurate and robust 3D hand pose estimation. IPNet utilizes 2D CNN to extract visual representations in 2D image space and performs iterative correction in 3D point cloud space to exploit the 3D geometry information of depth data. In particular, we propose a sparse anchor-based \"aggregation-interaction-propagation'' paradigm to enhance point cloud features and refine the hand pose, which reduces irregular data access. Furthermore, we introduce a 3D hand model to the iterative correction process, which significantly improves the robustness of IPNet to occlusion and depth holes. Experiments show that IPNet outperforms state-of-the-art methods on three challenging hand datasets.", + "primary_area": "computer vision ii", + "author": "Pengfei Ren; Yuchen Chen; Jiachang Hao; Haifeng Sun; Qi Qi; Jingyu Wang; Jianxin Liao", + "authorids": "", + "aff": "State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications", + "bibtex": "@article{Ren_Chen_Hao_Sun_Qi_Wang_Liao_2023, title={Two Heads Are Better than One: Image-Point Cloud Network for Depth-Based 3D Hand Pose Estimation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25310}, DOI={10.1609/aaai.v37i2.25310}, abstractNote={Depth images and point clouds are the two most commonly used data representations for depth-based 3D hand pose estimation. Benefiting from the structuring of image data and the inherent inductive biases of the 2D Convolutional Neural Network (CNN), image-based methods are highly efficient and effective. However, treating the depth data as a 2D image inevitably ignores the 3D nature of depth data. Point cloud-based methods can better mine the 3D geometric structure of depth data. However, these methods suffer from the disorder and non-structure of point cloud data, which is computationally inefficient. In this paper, we propose an Image-Point cloud Network (IPNet) for accurate and robust 3D hand pose estimation. IPNet utilizes 2D CNN to extract visual representations in 2D image space and performs iterative correction in 3D point cloud space to exploit the 3D geometry information of depth data. In particular, we propose a sparse anchor-based "aggregation-interaction-propagation\u2019\u2019 paradigm to enhance point cloud features and refine the hand pose, which reduces irregular data access. Furthermore, we introduce a 3D hand model to the iterative correction process, which significantly improves the robustness of IPNet to occlusion and depth holes. Experiments show that IPNet outperforms state-of-the-art methods on three challenging hand datasets.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ren, Pengfei and Chen, Yuchen and Hao, Jiachang and Sun, Haifeng and Qi, Qi and Wang, Jingyu and Liao, Jianxin}, year={2023}, month={Jun.}, pages={2163-2171} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25310/25082", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25310", + "pdf_size": 1688151, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6751518307363049183&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "State Key Laboratory of Networking and Switching Technology", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25793", + "title": "Two Views of Constrained Differential Privacy: Belief Revision and Update", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we provide two views of constrained differential private (DP) mechanisms. The first one is as belief revision. A constrained DP mechanism is obtained by standard probabilistic conditioning, and hence can be naturally implemented by Monte Carlo algorithms. The other is as belief update. A constrained DP is defined according to l2-distance minimization postprocessing or projection and hence can be naturally implemented by optimization algorithms. The main advantage of these two perspectives is that we can make full use of the machinery of belief revision and update to show basic properties for constrained differential privacy especially some important new composition properties. Within the framework established in this paper, constrained DP algorithms in the literature can be classified either as belief revision or belief update. At the end of the paper, we demonstrate their differences especially in utility on a couple of scenarios.", + "primary_area": "knowledge representation and reasoning", + "author": "Likang Liu; Keke Sun; Chunlai Zhou; Yuan Feng", + "authorids": "", + "aff": "School of Information, Renmin University of China, Beijing, China; School of Information, Renmin University of China, Beijing, China; School of Information, Renmin University of China, Beijing, China; Centre of Quantum Software and Information, University of Technology Sydney, Australia", + "bibtex": "@article{Liu_Sun_Zhou_Feng_2023, title={Two Views of Constrained Differential Privacy: Belief Revision and Update}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25793}, DOI={10.1609/aaai.v37i5.25793}, abstractNote={In this paper, we provide two views of constrained differential private (DP) mechanisms. The first one is as belief revision. A constrained DP mechanism is obtained by standard probabilistic conditioning, and hence can be naturally implemented by Monte Carlo algorithms. The other is as belief update. A constrained DP is defined according to l2-distance minimization postprocessing or projection and hence can be naturally implemented by optimization algorithms. The main advantage of these two perspectives is that we can make full use of the machinery of belief revision and update to show basic properties for constrained differential privacy especially some important new composition properties. Within the framework established in this paper, constrained DP algorithms in the literature can be classified either as belief revision or belief update. At the end of the paper, we demonstrate their differences especially in utility on a couple of scenarios.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Likang and Sun, Keke and Zhou, Chunlai and Feng, Yuan}, year={2023}, month={Jun.}, pages={6450-6457} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25793/25565", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25793", + "pdf_size": 249539, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18396760047770819394&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;ruc.edu.cn;ruc.edu.cn;uts.edu.au", + "email": "gmail.com;ruc.edu.cn;ruc.edu.cn;uts.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Renmin University of China;University of Technology Sydney", + "aff_unique_dep": "School of Information;Centre of Quantum Software and Information", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "RUC;UTS", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Beijing;Sydney", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26725", + "title": "Two Wrongs Don\u2019t Make a Right: Combating Confirmation Bias in Learning with Label Noise", + "track": "aaai special track", + "status": "Technical", + "abstract": "Noisy labels damage the performance of deep networks. For robust learning, a prominent two-stage pipeline alternates between eliminating possible incorrect labels and semi-supervised training. However, discarding part of noisy labels could result in a loss of information, especially when the corruption has a dependency on data, e.g., class-dependent or instance-dependent. Moreover, from the training dynamics of a representative two-stage method DivideMix, we identify the domination of confirmation bias: pseudo-labels fail to correct a considerable amount of noisy labels, and consequently, the errors accumulate. To sufficiently exploit information from noisy labels and mitigate wrong corrections, we propose Robust Label Refurbishment (Robust LR)\u2014a new hybrid method that integrates pseudo-labeling and confidence estimation techniques to refurbish noisy labels. We show that our method successfully alleviates the damage of both label noise and confirmation bias. As a result, it achieves state-of-the-art performance across datasets and noise types, namely CIFAR under different levels of synthetic noise and mini-WebVision and ANIMAL-10N with real-world noise.", + "primary_area": "safe and robust ai", + "author": "Mingcai Chen; Hao Cheng; Yuntao Du; Ming Xu; Wenyu Jiang; Chongjun Wang", + "authorids": "", + "aff": "State Key Laboratory for Novel Software Technology at Nanjing University; State Key Laboratory for Novel Software Technology at Nanjing University; State Key Laboratory for Novel Software Technology at Nanjing University; State Key Laboratory for Novel Software Technology at Nanjing University; State Key Laboratory for Novel Software Technology at Nanjing University; State Key Laboratory for Novel Software Technology at Nanjing University", + "bibtex": "@article{Chen_Cheng_Du_Xu_Jiang_Wang_2023, title={Two Wrongs Don\u2019t Make a Right: Combating Confirmation Bias in Learning with Label Noise}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26725}, DOI={10.1609/aaai.v37i12.26725}, abstractNote={Noisy labels damage the performance of deep networks. For robust learning, a prominent two-stage pipeline alternates between eliminating possible incorrect labels and semi-supervised training. However, discarding part of noisy labels could result in a loss of information, especially when the corruption has a dependency on data, e.g., class-dependent or instance-dependent. Moreover, from the training dynamics of a representative two-stage method DivideMix, we identify the domination of confirmation bias: pseudo-labels fail to correct a considerable amount of noisy labels, and consequently, the errors accumulate. To sufficiently exploit information from noisy labels and mitigate wrong corrections, we propose Robust Label Refurbishment (Robust LR)\u2014a new hybrid method that integrates pseudo-labeling and confidence estimation techniques to refurbish noisy labels. We show that our method successfully alleviates the damage of both label noise and confirmation bias. As a result, it achieves state-of-the-art performance across datasets and noise types, namely CIFAR under different levels of synthetic noise and mini-WebVision and ANIMAL-10N with real-world noise.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Mingcai and Cheng, Hao and Du, Yuntao and Xu, Ming and Jiang, Wenyu and Wang, Chongjun}, year={2023}, month={Jun.}, pages={14765-14773} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26725/26497", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26725", + "pdf_size": 391946, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17363939806436382469&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;gmail.com;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;gmail.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "NJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-27030", + "title": "Two-Streams: Dark and Light Networks with Graph Convolution for Action Recognition from Dark Videos (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "In this article, we propose a two-stream action recognition technique for recognizing human actions from dark videos. The proposed action recognition network consists of an image enhancement network with Self-Calibrated Illumination (SCI) module, followed by a two-stream action recognition network. We have used R(2+1)D as a feature extractor for both streams with shared weights. Graph Convolutional Network (GCN), a temporal graph encoder is utilized to enhance the obtained features which are then further fed to a classification head to recognize the actions in a video. The experimental results are presented on the recent benchmark ``ARID\" dark-video database.", + "primary_area": "", + "author": "Saurabh Suman; Nilay Naharas; Badri Narayan Subudhi; Vinit Jakhetiya", + "authorids": "", + "aff": "Indian Institute of Technology Jammu; Indian Institute of Technology Jammu; Indian Institute of Technology Jammu; Indian Institute of Technology Jammu", + "bibtex": "@article{Suman_Naharas_Subudhi_Jakhetiya_2024, title={Two-Streams: Dark and Light Networks with Graph Convolution for Action Recognition from Dark Videos (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27030}, DOI={10.1609/aaai.v37i13.27030}, abstractNote={In this article, we propose a two-stream action recognition technique for recognizing human actions from dark videos. The proposed action recognition network consists of an image enhancement network with Self-Calibrated Illumination (SCI) module, followed by a two-stream action recognition network. We have used R(2+1)D as a feature extractor for both streams with shared weights. Graph Convolutional Network (GCN), a temporal graph encoder is utilized to enhance the obtained features which are then further fed to a classification head to recognize the actions in a video. The experimental results are presented on the recent benchmark ``ARID" dark-video database.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Suman, Saurabh and Naharas, Nilay and Subudhi, Badri Narayan and Jakhetiya, Vinit}, year={2024}, month={Jul.}, pages={16340-16341} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27030/26802", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27030", + "pdf_size": 368430, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6498309450590516142&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "iitjammu.ac.in;iitjammu.ac.in;iitjammu.ac.in;iitjammu.ac.in", + "email": "iitjammu.ac.in;iitjammu.ac.in;iitjammu.ac.in;iitjammu.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Indian Institute of Technology Jammu", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitjammu.ac.in", + "aff_unique_abbr": "IIT Jammu", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Jammu", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "article-25348", + "title": "UCoL: Unsupervised Learning of Discriminative Facial Representations via Uncertainty-Aware Contrast", + "track": "main", + "status": "Technical", + "abstract": "This paper presents Uncertainty-aware Contrastive Learning (UCoL): a fully unsupervised framework for discriminative facial representation learning. Our UCoL is built upon a momentum contrastive network, referred to as Dual-path Momentum Network. Specifically, two flows of pairwise contrastive training are conducted simultaneously: one is formed with intra-instance self augmentation, and the other is to identify positive pairs collected by online pairwise prediction. We introduce a novel uncertainty-aware consistency K-nearest neighbors algorithm to generate predicted positive pairs, which enables efficient discriminative learning from large-scale open-world unlabeled data. Experiments show that UCoL significantly improves the baselines of unsupervised models and performs on par with the semi-supervised and supervised face representation learning methods.", + "primary_area": "computer vision ii", + "author": "Hao Wang; Min Li; Yangyang Song; Youjian Zhang; Liying Chi", + "authorids": "", + "aff": "ByteDance Inc.; ByteDance Inc.; ByteDance Inc.; The University of Sydney; ByteDance Inc.", + "bibtex": "@article{Wang_Li_Song_Zhang_Chi_2023, title={UCoL: Unsupervised Learning of Discriminative Facial Representations via Uncertainty-Aware Contrast}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25348}, DOI={10.1609/aaai.v37i2.25348}, abstractNote={This paper presents Uncertainty-aware Contrastive Learning (UCoL): a fully unsupervised framework for discriminative facial representation learning. Our UCoL is built upon a momentum contrastive network, referred to as Dual-path Momentum Network. Specifically, two flows of pairwise contrastive training are conducted simultaneously: one is formed with intra-instance self augmentation, and the other is to identify positive pairs collected by online pairwise prediction. We introduce a novel uncertainty-aware consistency K-nearest neighbors algorithm to generate predicted positive pairs, which enables efficient discriminative learning from large-scale open-world unlabeled data. Experiments show that UCoL significantly improves the baselines of unsupervised models and performs on par with the semi-supervised and supervised face representation learning methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Hao and Li, Min and Song, Yangyang and Zhang, Youjian and Chi, Liying}, year={2023}, month={Jun.}, pages={2510-2518} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25348/25120", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25348", + "pdf_size": 320051, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5029064804931273013&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com; ; ; ; ", + "email": "gmail.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "ByteDance;University of Sydney", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bytedance.com;https://www.sydney.edu.au", + "aff_unique_abbr": "ByteDance;USYD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26011", + "title": "UEQMS: UMAP Embedded Quick Mean Shift Algorithm for High Dimensional Clustering", + "track": "main", + "status": "Technical", + "abstract": "The mean shift algorithm is a simple yet very effective clustering method widely used for image and video segmentation as well as other exploratory data analysis applications. Recently, a new algorithm called MeanShift++ (MS++) for low-dimensional clustering was proposed with a speedup of 4000 times over the vanilla mean shift. In this work, starting with a first-of-its-kind theoretical analysis of MS++, we extend its reach to high-dimensional data clustering by integrating the Uniform Manifold Approximation and Projection (UMAP) based dimensionality reduction in the same framework. Analytically, we show that MS++ can indeed converge to a non-critical point. Subsequently, we suggest modifications to MS++ to improve its convergence characteristics. In addition, we propose a way to further speed up MS++ by avoiding the execution of the MS++ iterations for every data point. By incorporating UMAP with modified MS++, we design a faster algorithm, named UMAP embedded quick mean shift (UEQMS), for partitioning data with a relatively large number of recorded features. Through extensive experiments, we showcase the efficacy of UEQMS over other state-of-the-art algorithms in terms of accuracy and runtime.", + "primary_area": "machine learning ii", + "author": "Abhishek Kumar; Swagatam Das; Rammohan Mallipeddi", + "authorids": "", + "aff": "IAI, TCG Creast, Kolkata, India-700091; Electronics and Communication Sciences Unit, Indian Statistical Institute, Kolkata, Indian-700108; Department of Artificial Intelligence, Kyungpook National University, Daegu, Republic of Korea - 41566", + "bibtex": "@article{Kumar_Das_Mallipeddi_2023, title={UEQMS: UMAP Embedded Quick Mean Shift Algorithm for High Dimensional Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26011}, DOI={10.1609/aaai.v37i7.26011}, abstractNote={The mean shift algorithm is a simple yet very effective clustering method widely used for image and video segmentation as well as other exploratory data analysis applications. Recently, a new algorithm called MeanShift++ (MS++) for low-dimensional clustering was proposed with a speedup of 4000 times over the vanilla mean shift. In this work, starting with a first-of-its-kind theoretical analysis of MS++, we extend its reach to high-dimensional data clustering by integrating the Uniform Manifold Approximation and Projection (UMAP) based dimensionality reduction in the same framework. Analytically, we show that MS++ can indeed converge to a non-critical point. Subsequently, we suggest modifications to MS++ to improve its convergence characteristics. In addition, we propose a way to further speed up MS++ by avoiding the execution of the MS++ iterations for every data point. By incorporating UMAP with modified MS++, we design a faster algorithm, named UMAP embedded quick mean shift (UEQMS), for partitioning data with a relatively large number of recorded features. Through extensive experiments, we showcase the efficacy of UEQMS over other state-of-the-art algorithms in terms of accuracy and runtime.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kumar, Abhishek and Das, Swagatam and Mallipeddi, Rammohan}, year={2023}, month={Jun.}, pages={8386-8395} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26011/25783", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26011", + "pdf_size": 892078, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2292747048071332744&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "iitbhu.ac.in;isical.ac.in;knu.ac.kr", + "email": "iitbhu.ac.in;isical.ac.in;knu.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "IAI;Indian Statistical Institute;Kyungpook National University", + "aff_unique_dep": ";Electronics and Communication Sciences Unit;Department of Artificial Intelligence", + "aff_unique_url": ";https://www.isical.ac.in;http://www.knu.ac.kr", + "aff_unique_abbr": ";ISI;KNU", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Kolkata;Daegu", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "India;Republic of Korea" + }, + { + "id": "article-26264", + "title": "USDNL: Uncertainty-Based Single Dropout in Noisy Label Learning", + "track": "main", + "status": "Technical", + "abstract": "Deep Neural Networks (DNNs) possess powerful prediction capability thanks to their over-parameterization design, although the large model complexity makes it suffer from noisy supervision. Recent approaches seek to eliminate impacts from noisy labels by excluding data points with large loss values and showing promising performance. However, these approaches usually associate with significant computation overhead and lack of theoretical analysis. In this paper, we adopt a perspective to connect label noise with epistemic uncertainty. We design a simple, efficient, and theoretically provable robust algorithm named USDNL for DNNs with uncertainty-based Dropout. Specifically, we estimate the epistemic uncertainty of the network prediction after early training through single Dropout. The epistemic uncertainty is then combined with cross-entropy loss to select the clean samples during training. Finally, we theoretically show the equivalence of replacing selection loss with single cross-entropy loss. Compared to existing small-loss selection methods, USDNL features its simplicity for practical scenarios by only applying Dropout to a standard network, while still achieving high model accuracy. Extensive empirical results on both synthetic and real-world datasets show that USDNL outperforms other methods. Our code is available at https://github.com/kovelxyz/USDNL.", + "primary_area": "machine learning iv", + "author": "Yuanzhuo Xu; Xiaoguang Niu; Jie Yang; Steve Drew; Jiayu Zhou; Ruizhi Chen", + "authorids": "", + "aff": "School of Computer Science, Wuhan University, China; School of Computer Science, Wuhan University, China + LIESMARS, Wuhan University, China; School of Computer Science, Wuhan University, China; Department of Electrical and Software Engineering, University of Calgary, Canada; Department of Computer Science and Engineering, Michigan State University, USA; LIESMARS, Wuhan University, China", + "bibtex": "@article{Xu_Niu_Yang_Drew_Zhou_Chen_2023, title={USDNL: Uncertainty-Based Single Dropout in Noisy Label Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26264}, DOI={10.1609/aaai.v37i9.26264}, abstractNote={Deep Neural Networks (DNNs) possess powerful prediction capability thanks to their over-parameterization design, although the large model complexity makes it suffer from noisy supervision. Recent approaches seek to eliminate impacts from noisy labels by excluding data points with large loss values and showing promising performance. However, these approaches usually associate with significant computation overhead and lack of theoretical analysis. In this paper, we adopt a perspective to connect label noise with epistemic uncertainty. We design a simple, efficient, and theoretically provable robust algorithm named USDNL for DNNs with uncertainty-based Dropout. Specifically, we estimate the epistemic uncertainty of the network prediction after early training through single Dropout. The epistemic uncertainty is then combined with cross-entropy loss to select the clean samples during training. Finally, we theoretically show the equivalence of replacing selection loss with single cross-entropy loss. Compared to existing small-loss selection methods, USDNL features its simplicity for practical scenarios by only applying Dropout to a standard network, while still achieving high model accuracy. Extensive empirical results on both synthetic and real-world datasets show that USDNL outperforms other methods. Our code is available at https://github.com/kovelxyz/USDNL.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Yuanzhuo and Niu, Xiaoguang and Yang, Jie and Drew, Steve and Zhou, Jiayu and Chen, Ruizhi}, year={2023}, month={Jun.}, pages={10648-10656} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26264/26036", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26264", + "pdf_size": 864899, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4994464864226952346&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "xyzxyz; xgniu; csyangjie;whu.edu.cn;ucalgary.ca;msu.edu", + "email": "xyzxyz; xgniu; csyangjie;whu.edu.cn;ucalgary.ca;msu.edu", + "github": "https://github.com/kovelxyz/USDNL", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+0;0;1;2;0", + "aff_unique_norm": "Wuhan University;University of Calgary;Michigan State University", + "aff_unique_dep": "School of Computer Science;Department of Electrical and Software Engineering;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.whu.edu.cn;https://www.ucalgary.ca;https://www.msu.edu", + "aff_unique_abbr": "WHU;U of C;MSU", + "aff_campus_unique_index": "0;0+0;0;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;0+0;0;1;2;0", + "aff_country_unique": "China;Canada;United States" + }, + { + "id": "article-26219", + "title": "USER: Unsupervised Structural Entropy-Based Robust Graph Neural Network", + "track": "main", + "status": "Technical", + "abstract": "Unsupervised/self-supervised graph neural networks (GNN) are susceptible to the inherent randomness in the input graph data, which adversely affects the model's performance in downstream tasks. In this paper, we propose USER, an unsupervised and robust version of GNN based on structural entropy, to alleviate the interference of graph perturbations and learn appropriate representations of nodes without label information. To mitigate the effects of undesirable perturbations, we analyze the property of intrinsic connectivity and define the intrinsic connectivity graph. We also identify the rank of the adjacency matrix as a crucial factor in revealing a graph that provides the same embeddings as the intrinsic connectivity graph. To capture such a graph, we introduce structural entropy in the objective function. Extensive experiments conducted on clustering and link prediction tasks under random-perturbation and meta-attack over three datasets show that USER outperforms benchmarks and is robust to heavier perturbations.", + "primary_area": "machine learning iii", + "author": "Yifei Wang; Yupan Wang; Zeyu Zhang; Song Yang; Kaiqi Zhao; Jiamou Liu", + "authorids": "", + "aff": "School of Computer Science, The University of Auckland, New Zealand; School of Computer Science, The University of Auckland, New Zealand; School of Computer Science, The University of Auckland, New Zealand; School of Computer Science, The University of Auckland, New Zealand; School of Computer Science, The University of Auckland, New Zealand; School of Computer Science, The University of Auckland, New Zealand", + "bibtex": "@article{Wang_Wang_Zhang_Yang_Zhao_Liu_2023, title={USER: Unsupervised Structural Entropy-Based Robust Graph Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26219}, DOI={10.1609/aaai.v37i8.26219}, abstractNote={Unsupervised/self-supervised graph neural networks (GNN) are susceptible to the inherent randomness in the input graph data, which adversely affects the model\u2019s performance in downstream tasks. In this paper, we propose USER, an unsupervised and robust version of GNN based on structural entropy, to alleviate the interference of graph perturbations and learn appropriate representations of nodes without label information. To mitigate the effects of undesirable perturbations, we analyze the property of intrinsic connectivity and define the intrinsic connectivity graph. We also identify the rank of the adjacency matrix as a crucial factor in revealing a graph that provides the same embeddings as the intrinsic connectivity graph. To capture such a graph, we introduce structural entropy in the objective function. Extensive experiments conducted on clustering and link prediction tasks under random-perturbation and meta-attack over three datasets show that USER outperforms benchmarks and is robust to heavier perturbations.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yifei and Wang, Yupan and Zhang, Zeyu and Yang, Song and Zhao, Kaiqi and Liu, Jiamou}, year={2023}, month={Jun.}, pages={10235-10243} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26219/25991", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26219", + "pdf_size": 2391417, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11683045891423005960&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "aucklanduni.ac.nz;aucklanduni.ac.nz;aucklanduni.ac.nz;aucklanduni.ac.nz;auckland.ac.nz;auckland.ac.nz", + "email": "aucklanduni.ac.nz;aucklanduni.ac.nz;aucklanduni.ac.nz;aucklanduni.ac.nz;auckland.ac.nz;auckland.ac.nz", + "github": "https://github.com/wangyifeibeijing/USER", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "The University of Auckland", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.auckland.ac.nz", + "aff_unique_abbr": "UoA", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Auckland", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "New Zealand" + }, + { + "id": "article-25364", + "title": "Ultra-High-Definition Low-Light Image Enhancement: A Benchmark and Transformer-Based Method", + "track": "main", + "status": "Technical", + "abstract": "As the quality of optical sensors improves, there is a need for processing large-scale images. In particular, the ability of devices to capture ultra-high definition (UHD) images and video places new demands on the image processing pipeline. In this paper, we consider the task of low-light image enhancement (LLIE) and introduce a large-scale database consisting of images at 4K and 8K resolution. We conduct systematic benchmarking studies and provide a comparison of current LLIE algorithms. As a second contribution, we introduce LLFormer, a transformer-based low-light enhancement method. The core components of LLFormer are the axis-based multi-head self-attention and cross-layer attention fusion block, which significantly reduces the linear complexity. Extensive experiments on the new dataset and existing public datasets show that LLFormer outperforms state-of-the-art methods. We also show that employing existing LLIE methods trained on our benchmark as a pre-processing step significantly improves the performance of downstream tasks, e.g., face detection in low-light conditions. The source code and pre-trained models are available at https://github.com/TaoWangzj/LLFormer.", + "primary_area": "computer vision iii", + "author": "Tao Wang; Kaihao Zhang; Tianrun Shen; Wenhan Luo; Bjorn Stenger; Tong Lu", + "authorids": "", + "aff": "State Key Lab for Novel Software Technology, Nanjing University, China; Australian National University, Australia; State Key Lab for Novel Software Technology, Nanjing University, China; Shenzhen Campus of Sun Yat-sen University, China; Rakuten Institute of Technology, Japan; State Key Lab for Novel Software Technology, Nanjing University, China", + "bibtex": "@article{Wang_Zhang_Shen_Luo_Stenger_Lu_2023, title={Ultra-High-Definition Low-Light Image Enhancement: A Benchmark and Transformer-Based Method}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25364}, DOI={10.1609/aaai.v37i3.25364}, abstractNote={As the quality of optical sensors improves, there is a need for processing large-scale images. In particular, the ability of devices to capture ultra-high definition (UHD) images and video places new demands on the image processing pipeline. In this paper, we consider the task of low-light image enhancement (LLIE) and introduce a large-scale database consisting of images at 4K and 8K resolution. We conduct systematic benchmarking studies and provide a comparison of current LLIE algorithms. As a second contribution, we introduce LLFormer, a transformer-based low-light enhancement method. The core components of LLFormer are the axis-based multi-head self-attention and cross-layer attention fusion block, which significantly reduces the linear complexity. Extensive experiments on the new dataset and existing public datasets show that LLFormer outperforms state-of-the-art methods. We also show that employing existing LLIE methods trained on our benchmark as a pre-processing step significantly improves the performance of downstream tasks, e.g., face detection in low-light conditions. The source code and pre-trained models are available at https://github.com/TaoWangzj/LLFormer.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Tao and Zhang, Kaihao and Shen, Tianrun and Luo, Wenhan and Stenger, Bjorn and Lu, Tong}, year={2023}, month={Jun.}, pages={2654-2662} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25364/25136", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25364", + "pdf_size": 11129375, + "gs_citation": 287, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9300283135395738858&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "gmail.com;gmail.com;yeah.net;gmail.com;cantab.net;nju.edu.cn", + "email": "gmail.com;gmail.com;yeah.net;gmail.com;cantab.net;nju.edu.cn", + "github": "https://github.com/TaoWangzj/LLFormer", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2;3;0", + "aff_unique_norm": "Nanjing University;Australian National University;Sun Yat-sen University;Rakuten Institute of Technology", + "aff_unique_dep": "State Key Lab for Novel Software Technology;;;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.anu.edu.au;http://www.sysu.edu.cn/;https://rit.rakuten.com", + "aff_unique_abbr": "Nanjing U;ANU;SYSU;RIT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;1;0;0;2;0", + "aff_country_unique": "China;Australia;Japan" + }, + { + "id": "article-26463", + "title": "Ultrafast Euclidean Shortest Path Computation Using Hub Labeling", + "track": "main", + "status": "Technical", + "abstract": "Finding shortest paths in a Euclidean plane containing polygonal obstacles is a well-studied problem motivated by a variety of real-world applications. \nThe state-of-the-art algorithms require finding obstacle corners visible to the source and target, and need to consider potentially a large number of candidate paths. This adversely affects their query processing cost. We address these limitations by proposing a novel adaptation of hub labeling which is the state-of-the-art approach for shortest distance computation in road networks. Our experimental study conducted on the widely used benchmark maps shows that our approach is typically 1-2 orders of magnitude faster than two state-of-the-art algorithms.", + "primary_area": "search and optimization", + "author": "Jinchun Du; Bojie Shen; Muhammad Aamir Cheema", + "authorids": "", + "aff": "Faculty of Information Technology, Monash University, Melbourne, Australia; Faculty of Information Technology, Monash University, Melbourne, Australia; Faculty of Information Technology, Monash University, Melbourne, Australia", + "bibtex": "@article{Du_Shen_Cheema_2023, title={Ultrafast Euclidean Shortest Path Computation Using Hub Labeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26463}, DOI={10.1609/aaai.v37i10.26463}, abstractNote={Finding shortest paths in a Euclidean plane containing polygonal obstacles is a well-studied problem motivated by a variety of real-world applications. The state-of-the-art algorithms require finding obstacle corners visible to the source and target, and need to consider potentially a large number of candidate paths. This adversely affects their query processing cost. We address these limitations by proposing a novel adaptation of hub labeling which is the state-of-the-art approach for shortest distance computation in road networks. Our experimental study conducted on the widely used benchmark maps shows that our approach is typically 1-2 orders of magnitude faster than two state-of-the-art algorithms.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Du, Jinchun and Shen, Bojie and Cheema, Muhammad Aamir}, year={2023}, month={Jun.}, pages={12417-12426} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26463/26235", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26463", + "pdf_size": 407552, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17438461758949912315&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "monash.edu;monash.edu;monash.edu", + "email": "monash.edu;monash.edu;monash.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Monash University", + "aff_unique_dep": "Faculty of Information Technology", + "aff_unique_url": "https://www.monash.edu", + "aff_unique_abbr": "Monash", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Melbourne", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26193", + "title": "Unbalanced CO-optimal Transport", + "track": "main", + "status": "Technical", + "abstract": "Optimal transport (OT) compares probability distributions by computing a meaningful alignment between their samples. CO-optimal transport (COOT) takes this comparison further by inferring an alignment between features as well. While this approach leads to better alignments and generalizes both OT and Gromov-Wasserstein distances, we provide a theoretical result showing that it is sensitive to outliers that are omnipresent in real-world data. This prompts us to propose unbalanced COOT for which we provably show its robustness to noise in the compared datasets. To the best of our knowledge, this is the first such result for OT methods in incomparable spaces. With this result in hand, we provide empirical evidence of this robustness for the challenging tasks of heterogeneous domain adaptation with and without varying proportions of classes and simultaneous alignment of samples and features across two single-cell measurements.", + "primary_area": "machine learning iii", + "author": "Quang Huy Tran; Hicham Janati; Nicolas Courty; R\u00e9mi Flamary; Ievgen Redko; Pinar Demetci; Ritambhara Singh", + "authorids": "", + "aff": "Universit\u00e9 Bretagne Sud, IRISA+CMAP, Ecole Polytechnique, IP Paris; LTCI, T\u00e9l\u00e9com Paris, IP Paris; Universit\u00e9 Bretagne Sud, IRISA; CMAP, Ecole Polytechnique, IP Paris; Univ. Lyon, UJM-Saint-Etienne, CNRS, UMR 5516; Center for Computational Molecular Biology, Brown University+Department of Computer Science, Brown University; Center for Computational Molecular Biology, Brown University+Department of Computer Science, Brown University", + "bibtex": "@article{Tran_Janati_Courty_Flamary_Redko_Demetci_Singh_2023, title={Unbalanced CO-optimal Transport}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26193}, DOI={10.1609/aaai.v37i8.26193}, abstractNote={Optimal transport (OT) compares probability distributions by computing a meaningful alignment between their samples. CO-optimal transport (COOT) takes this comparison further by inferring an alignment between features as well. While this approach leads to better alignments and generalizes both OT and Gromov-Wasserstein distances, we provide a theoretical result showing that it is sensitive to outliers that are omnipresent in real-world data. This prompts us to propose unbalanced COOT for which we provably show its robustness to noise in the compared datasets. To the best of our knowledge, this is the first such result for OT methods in incomparable spaces. With this result in hand, we provide empirical evidence of this robustness for the challenging tasks of heterogeneous domain adaptation with and without varying proportions of classes and simultaneous alignment of samples and features across two single-cell measurements.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tran, Quang Huy and Janati, Hicham and Courty, Nicolas and Flamary, R\u00e9mi and Redko, Ievgen and Demetci, Pinar and Singh, Ritambhara}, year={2023}, month={Jun.}, pages={10006-10016} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26193/25965", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26193", + "pdf_size": 5001288, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10404839145011238283&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 20, + "aff_domain": "univ-ubs.fr;telecom-paris.fr;univ-ubs.fr;polytechnique.edu;univ-st-etienne.fr;gmail.com;brown.edu", + "email": "univ-ubs.fr;telecom-paris.fr;univ-ubs.fr;polytechnique.edu;univ-st-etienne.fr;gmail.com;brown.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;0;1;3;4+4;4+4", + "aff_unique_norm": "Universit\u00e9 Bretagne Sud;Ecole Polytechnique;T\u00e9l\u00e9com Paris;Universit\u00e9 de Lyon;Brown University", + "aff_unique_dep": "IRISA;CMAP;LTCI;;Center for Computational Molecular Biology", + "aff_unique_url": "https://www.univ-ubs.fr;https://www.polytechnique.edu;https://www.telecom-paris.fr;https://www.universite-lyon.fr;https://www.brown.edu", + "aff_unique_abbr": "UBS;Polytechnique;T\u00e9l\u00e9com Paris;Univ. Lyon;Brown", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;1+1;1+1", + "aff_country_unique": "France;United States" + }, + { + "id": "article-25435", + "title": "Unbiased Heterogeneous Scene Graph Generation with Relation-Aware Message Passing Neural Network", + "track": "main", + "status": "Technical", + "abstract": "Recent scene graph generation (SGG) frameworks have focused on learning complex relationships among multiple objects in an image. Thanks to the nature of the message passing neural network (MPNN) that models high-order interactions between objects and their neighboring objects, they are dominant representation learning modules for SGG. However, existing MPNN-based frameworks assume the scene graph as a homogeneous graph, which restricts the context-awareness of visual relations between objects. That is, they overlook the fact that the relations tend to be highly dependent on the objects with which the relations are associated. In this paper, we propose an unbiased heterogeneous scene graph generation (HetSGG) framework that captures\nrelation-aware context using message passing neural networks. We devise a novel message passing layer, called relation-aware message passing neural network (RMP), that aggregates the contextual information of an image considering the predicate type between objects. Our extensive evaluations demonstrate that HetSGG outperforms state-of-the-art methods, especially outperforming on tail predicate classes. The source code for HetSGG is available at https://github.com/KanghoonYoon/hetsgg-torch", + "primary_area": "computer vision iii", + "author": "Kanghoon Yoon; Kibum Kim; Jinyoung Moon; Chanyoung Park", + "authorids": "", + "aff": "Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea+Graduate School of Artificial Intelligence, KAIST, Daejeon, Republic of Korea; Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea+Graduate School of Artificial Intelligence, KAIST, Daejeon, Republic of Korea; Electronics and Telecommunications Research Institute, 218 Gajeong-ro, Yuseong-gu, Daejeon, Republic of Korea+ETRI School, University of Science and Technology, 218 Gajeong-ro, Yuseong-gu, Daejeon, Republic of Korea; Dept. of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea+Graduate School of Artificial Intelligence, KAIST, Daejeon, Republic of Korea", + "bibtex": "@article{Yoon_Kim_Moon_Park_2023, title={Unbiased Heterogeneous Scene Graph Generation with Relation-Aware Message Passing Neural Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25435}, DOI={10.1609/aaai.v37i3.25435}, abstractNote={Recent scene graph generation (SGG) frameworks have focused on learning complex relationships among multiple objects in an image. Thanks to the nature of the message passing neural network (MPNN) that models high-order interactions between objects and their neighboring objects, they are dominant representation learning modules for SGG. However, existing MPNN-based frameworks assume the scene graph as a homogeneous graph, which restricts the context-awareness of visual relations between objects. That is, they overlook the fact that the relations tend to be highly dependent on the objects with which the relations are associated. In this paper, we propose an unbiased heterogeneous scene graph generation (HetSGG) framework that captures\nrelation-aware context using message passing neural networks. We devise a novel message passing layer, called relation-aware message passing neural network (RMP), that aggregates the contextual information of an image considering the predicate type between objects. Our extensive evaluations demonstrate that HetSGG outperforms state-of-the-art methods, especially outperforming on tail predicate classes. The source code for HetSGG is available at https://github.com/KanghoonYoon/hetsgg-torch}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yoon, Kanghoon and Kim, Kibum and Moon, Jinyoung and Park, Chanyoung}, year={2023}, month={Jun.}, pages={3285-3294} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25435/25207", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25435", + "pdf_size": 1111382, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=127177223671204379&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "kaist.ac.kr;kaist.ac.kr;etri.re.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;etri.re.kr;kaist.ac.kr", + "github": "https://github.com/KanghoonYoon/hetsgg-torch", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1+2;0+0", + "aff_unique_norm": "KAIST;Electronics and Telecommunications Research Institute;University of Science and Technology", + "aff_unique_dep": "Dept. of Industrial and Systems Engineering;;ETRI School", + "aff_unique_url": "https://www.kaist.ac.kr;;", + "aff_unique_abbr": "KAIST;;", + "aff_campus_unique_index": "0+0;0+0;0;0+0", + "aff_campus_unique": "Daejeon;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "article-25137", + "title": "Uncertainty-Aware Image Captioning", + "track": "main", + "status": "Technical", + "abstract": "It is well believed that the higher uncertainty in a word of the caption, the more inter-correlated context information is required to determine it. However, current image captioning methods usually consider the generation of all words in a sentence sequentially and equally. In this paper, we propose an uncertainty-aware image captioning framework, which parallelly and iteratively operates insertion of discontinuous candidate words between existing words from easy to difficult until converged. We hypothesize that high-uncertainty words in a sentence need more prior information to make a correct decision and should be produced at a later stage. The resulting non-autoregressive hierarchy makes the caption generation explainable and intuitive. Specifically, we utilize an image-conditioned bag-of-word model to measure the word uncertainty and apply a dynamic programming algorithm to construct the training pairs. During inference, we devise an uncertainty-adaptive parallel beam search technique that yields an empirically logarithmic time complexity. Extensive experiments on the MS COCO benchmark reveal that our approach outperforms the strong baseline and related methods on both captioning quality as well as decoding speed.", + "primary_area": "computer vision i", + "author": "Zhengcong Fei; Mingyuan Fan; Li Zhu; Junshi Huang; Xiaoming Wei; Xiaolin Wei", + "authorids": "", + "aff": "Meituan; Meituan; Meituan; Meituan; Meituan; Meituan", + "bibtex": "@article{Fei_Fan_Zhu_Huang_Wei_Wei_2023, title={Uncertainty-Aware Image Captioning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25137}, DOI={10.1609/aaai.v37i1.25137}, abstractNote={It is well believed that the higher uncertainty in a word of the caption, the more inter-correlated context information is required to determine it. However, current image captioning methods usually consider the generation of all words in a sentence sequentially and equally. In this paper, we propose an uncertainty-aware image captioning framework, which parallelly and iteratively operates insertion of discontinuous candidate words between existing words from easy to difficult until converged. We hypothesize that high-uncertainty words in a sentence need more prior information to make a correct decision and should be produced at a later stage. The resulting non-autoregressive hierarchy makes the caption generation explainable and intuitive. Specifically, we utilize an image-conditioned bag-of-word model to measure the word uncertainty and apply a dynamic programming algorithm to construct the training pairs. During inference, we devise an uncertainty-adaptive parallel beam search technique that yields an empirically logarithmic time complexity. Extensive experiments on the MS COCO benchmark reveal that our approach outperforms the strong baseline and related methods on both captioning quality as well as decoding speed.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fei, Zhengcong and Fan, Mingyuan and Zhu, Li and Huang, Junshi and Wei, Xiaoming and Wei, Xiaolin}, year={2023}, month={Jun.}, pages={614-622} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25137/24909", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25137", + "pdf_size": 1191120, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7684435875110448683&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com", + "email": "meituan.com;meituan.com;meituan.com;meituan.com;meituan.com;meituan.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Meituan", + "aff_unique_dep": "", + "aff_unique_url": "https://www.meituan.com", + "aff_unique_abbr": "Meituan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26603", + "title": "Uncertainty-Aware Self-Training for Low-Resource Neural Sequence Labeling", + "track": "main", + "status": "Technical", + "abstract": "Neural sequence labeling (NSL) aims at assigning labels for input language tokens, which covers a broad range of applications, such as named entity recognition (NER) and slot filling, etc. However, the satisfying results achieved by traditional supervised-based approaches heavily depend on the large amounts of human annotation data, which may not be feasible in real-world scenarios due to data privacy and computation efficiency issues. This paper presents SeqUST, a novel uncertain-aware self-training framework for NSL to address the labeled data scarcity issue and to effectively utilize unlabeled data. Specifically, we incorporate Monte Carlo (MC) dropout in Bayesian neural network (BNN) to perform uncertainty estimation at the token level and then select reliable language tokens from unlabeled data based on the model confidence and certainty. A well-designed masked sequence labeling task with a noise-robust loss supports robust training, which aims to suppress the problem of noisy pseudo labels. In addition, we develop a Gaussian-based consistency regularization technique to further improve the model robustness on Gaussian-distributed perturbed representations. This effectively alleviates the over-fitting dilemma originating from pseudo-labeled augmented data. Extensive experiments over six benchmarks demonstrate that our SeqUST framework effectively improves the performance of self-training, and consistently outperforms strong baselines by a large margin in low-resource scenarios.", + "primary_area": "speech natural language processing", + "author": "Jianing Wang; Chengyu Wang; Jun Huang; Ming Gao; Aoying Zhou", + "authorids": "", + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; School of Data Science and Engineering, East China Normal University, Shanghai, China + KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China", + "bibtex": "@article{Wang_Wang_Huang_Gao_Zhou_2023, title={Uncertainty-Aware Self-Training for Low-Resource Neural Sequence Labeling}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26603}, DOI={10.1609/aaai.v37i11.26603}, abstractNote={Neural sequence labeling (NSL) aims at assigning labels for input language tokens, which covers a broad range of applications, such as named entity recognition (NER) and slot filling, etc. However, the satisfying results achieved by traditional supervised-based approaches heavily depend on the large amounts of human annotation data, which may not be feasible in real-world scenarios due to data privacy and computation efficiency issues. This paper presents SeqUST, a novel uncertain-aware self-training framework for NSL to address the labeled data scarcity issue and to effectively utilize unlabeled data. Specifically, we incorporate Monte Carlo (MC) dropout in Bayesian neural network (BNN) to perform uncertainty estimation at the token level and then select reliable language tokens from unlabeled data based on the model confidence and certainty. A well-designed masked sequence labeling task with a noise-robust loss supports robust training, which aims to suppress the problem of noisy pseudo labels. In addition, we develop a Gaussian-based consistency regularization technique to further improve the model robustness on Gaussian-distributed perturbed representations. This effectively alleviates the over-fitting dilemma originating from pseudo-labeled augmented data. Extensive experiments over six benchmarks demonstrate that our SeqUST framework effectively improves the performance of self-training, and consistently outperforms strong baselines by a large margin in low-resource scenarios.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Jianing and Wang, Chengyu and Huang, Jun and Gao, Ming and Zhou, Aoying}, year={2023}, month={Jun.}, pages={13682-13690} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26603/26375", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26603", + "pdf_size": 1185977, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6956525080812165693&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff_domain": "gmail.com;alibaba-inc.com;alibaba-inc.com;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "email": "gmail.com;alibaba-inc.com;alibaba-inc.com;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0+0;0", + "aff_unique_norm": "East China Normal University;Alibaba Group", + "aff_unique_dep": "School of Data Science and Engineering;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ECNU;Alibaba", + "aff_campus_unique_index": "0;1;1;0+0;0", + "aff_campus_unique": "Shanghai;Hangzhou", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26978", + "title": "Understand Restart of SAT Solver Using Search Similarity Index (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "SAT solvers are widely used to solve many industrial problems because of their high performance, which is achieved by various heuristic methods.\nUnderstanding why these methods are effective is essential to improving them. One approach to this is analyzing them using qualitative measurements.\nIn our previous study, we proposed search similarity index (SSI), a metric to quantify the similarity between searches. SSI significantly improved the performance of the parallel SAT solver.\nHere, we apply SSI to analyze the effect of restart, a key SAT solver technique.\nExperiments using SSI reveal the correlation between the difficulty of instances and the search change effect by restart, and the reason behind the effectiveness of the state-of-the-art restart method is also explained.", + "primary_area": "", + "author": "Yoichiro Iida; Tomohiro Sonobe; Mary Inaba", + "authorids": "", + "aff": "Information Science and Technology, The University of Tokyo; National Institute of Informatics, Japan; Information Science and Technology, The University of Tokyo", + "bibtex": "@article{Iida_Sonobe_Inaba_2024, title={Understand Restart of SAT Solver Using Search Similarity Index (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26978}, DOI={10.1609/aaai.v37i13.26978}, abstractNote={SAT solvers are widely used to solve many industrial problems because of their high performance, which is achieved by various heuristic methods.\nUnderstanding why these methods are effective is essential to improving them. One approach to this is analyzing them using qualitative measurements.\nIn our previous study, we proposed search similarity index (SSI), a metric to quantify the similarity between searches. SSI significantly improved the performance of the parallel SAT solver.\nHere, we apply SSI to analyze the effect of restart, a key SAT solver technique.\nExperiments using SSI reveal the correlation between the difficulty of instances and the search change effect by restart, and the reason behind the effectiveness of the state-of-the-art restart method is also explained.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Iida, Yoichiro and Sonobe, Tomohiro and Inaba, Mary}, year={2024}, month={Jul.}, pages={16236-16237} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26978/26750", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26978", + "pdf_size": 2368046, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:t9msH8fvPR4J:scholar.google.com/&scioq=Understand+Restart+of+SAT+Solver+Using+Search+Similarity+Index+(Student+Abstract)&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff_domain": "g.ecc.u-tokyo.ac.jp;nii.ac.jp;is.s.u-tokyo.ac.jp", + "email": "g.ecc.u-tokyo.ac.jp;nii.ac.jp;is.s.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "The University of Tokyo;National Institute of Informatics", + "aff_unique_dep": "Information Science and Technology;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.nii.ac.jp", + "aff_unique_abbr": "UTokyo;NII", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26282", + "title": "Understanding Representation Learnability of Nonlinear Self-Supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "Self-supervised learning (SSL) has empirically shown its data representation learnability in many downstream tasks. There are only a few theoretical works on data representation learnability, and many of those focus on final data representation, treating the nonlinear neural network as a ``black box\". However, the accurate learning results of neural networks are crucial for describing the data distribution features learned by SSL models. Our paper is the first to analyze the learning results of the nonlinear SSL model accurately. We consider a toy data distribution that contains two features: the label-related feature and the hidden feature. Unlike previous linear setting work that depends on closed-form solutions, we use the gradient descent algorithm to train a 1-layer nonlinear SSL model with a certain initialization region and prove that the model converges to a local minimum. Furthermore, different from the complex iterative analysis, we propose a new analysis process which uses the exact version of Inverse Function Theorem to accurately describe the features learned by the local minimum. With this local minimum, we prove that the nonlinear SSL model can capture the label-related feature and hidden feature at the same time. In contrast, the nonlinear supervised learning (SL) model can only learn the label-related feature. We also present the learning processes and results of the nonlinear SSL and SL model via simulation experiments.", + "primary_area": "machine learning iv", + "author": "Ruofeng Yang; Xiangyuan Li; Bo Jiang; Shuai Li", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{Yang_Li_Jiang_Li_2023, title={Understanding Representation Learnability of Nonlinear Self-Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26282}, DOI={10.1609/aaai.v37i9.26282}, abstractNote={Self-supervised learning (SSL) has empirically shown its data representation learnability in many downstream tasks. There are only a few theoretical works on data representation learnability, and many of those focus on final data representation, treating the nonlinear neural network as a ``black box". However, the accurate learning results of neural networks are crucial for describing the data distribution features learned by SSL models. Our paper is the first to analyze the learning results of the nonlinear SSL model accurately. We consider a toy data distribution that contains two features: the label-related feature and the hidden feature. Unlike previous linear setting work that depends on closed-form solutions, we use the gradient descent algorithm to train a 1-layer nonlinear SSL model with a certain initialization region and prove that the model converges to a local minimum. Furthermore, different from the complex iterative analysis, we propose a new analysis process which uses the exact version of Inverse Function Theorem to accurately describe the features learned by the local minimum. With this local minimum, we prove that the nonlinear SSL model can capture the label-related feature and hidden feature at the same time. In contrast, the nonlinear supervised learning (SL) model can only learn the label-related feature. We also present the learning processes and results of the nonlinear SSL and SL model via simulation experiments.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Ruofeng and Li, Xiangyuan and Jiang, Bo and Li, Shuai}, year={2023}, month={Jun.}, pages={10807-10815} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26282/26054", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26282", + "pdf_size": 885957, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2081627522549454649&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26765", + "title": "Understanding and Enhancing Robustness of Concept-Based Models", + "track": "aaai special track", + "status": "Technical", + "abstract": "Rising usage of deep neural networks to perform decision making in critical applications like medical diagnosis and fi- nancial analysis have raised concerns regarding their reliability and trustworthiness. As automated systems become more mainstream, it is important their decisions be transparent, reliable and understandable by humans for better trust and confidence. To this effect, concept-based models such as Concept Bottleneck Models (CBMs) and Self-Explaining Neural Networks (SENN) have been proposed which constrain the latent space of a model to represent high level concepts easily understood by domain experts in the field. Although concept-based models promise a good approach to both increasing explainability and reliability, it is yet to be shown if they demonstrate robustness and output consistent concepts under systematic perturbations to their inputs. To better understand performance of concept-based models on curated malicious samples, in this paper, we aim to study their robustness to adversarial perturbations, which are also known as the imperceptible changes to the input data that are crafted by an attacker to fool a well-learned concept-based model. Specifically, we first propose and analyze different malicious attacks to evaluate the security vulnerability of concept based models. Subsequently, we propose a potential general adversarial training-based defense mechanism to increase robustness of these systems to the proposed malicious attacks. Extensive experiments on one synthetic and two real-world datasets demonstrate the effectiveness of the proposed attacks and the defense approach. An appendix of the paper with more comprehensive results can also be viewed at https://arxiv.org/abs/2211.16080.", + "primary_area": "safe and robust ai", + "author": "Sanchit Sinha; Mengdi Huai; Jianhui Sun; Aidong Zhang", + "authorids": "", + "aff": "University of Virginia; University of Virginia + Iowa State University; University of Virginia; University of Virginia", + "bibtex": "@article{Sinha_Huai_Sun_Zhang_2023, title={Understanding and Enhancing Robustness of Concept-Based Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26765}, DOI={10.1609/aaai.v37i12.26765}, abstractNote={Rising usage of deep neural networks to perform decision making in critical applications like medical diagnosis and fi- nancial analysis have raised concerns regarding their reliability and trustworthiness. As automated systems become more mainstream, it is important their decisions be transparent, reliable and understandable by humans for better trust and confidence. To this effect, concept-based models such as Concept Bottleneck Models (CBMs) and Self-Explaining Neural Networks (SENN) have been proposed which constrain the latent space of a model to represent high level concepts easily understood by domain experts in the field. Although concept-based models promise a good approach to both increasing explainability and reliability, it is yet to be shown if they demonstrate robustness and output consistent concepts under systematic perturbations to their inputs. To better understand performance of concept-based models on curated malicious samples, in this paper, we aim to study their robustness to adversarial perturbations, which are also known as the imperceptible changes to the input data that are crafted by an attacker to fool a well-learned concept-based model. Specifically, we first propose and analyze different malicious attacks to evaluate the security vulnerability of concept based models. Subsequently, we propose a potential general adversarial training-based defense mechanism to increase robustness of these systems to the proposed malicious attacks. Extensive experiments on one synthetic and two real-world datasets demonstrate the effectiveness of the proposed attacks and the defense approach. An appendix of the paper with more comprehensive results can also be viewed at https://arxiv.org/abs/2211.16080.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sinha, Sanchit and Huai, Mengdi and Sun, Jianhui and Zhang, Aidong}, year={2023}, month={Jun.}, pages={15127-15135} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26765/26537", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26765", + "pdf_size": 511724, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11257894419693518532&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "virginia.edu;iastate.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;iastate.edu;virginia.edu;virginia.edu", + "github": "", + "project": "https://arxiv.org/abs/2211.16080", + "author_num": 4, + "aff_unique_index": "0;0+1;0;0", + "aff_unique_norm": "University of Virginia;Iowa State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.virginia.edu;https://www.iastate.edu", + "aff_unique_abbr": "UVA;ISU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26037", + "title": "Understanding the Generalization Performance of Spectral Clustering Algorithms", + "track": "main", + "status": "Technical", + "abstract": "The theoretical analysis of spectral clustering is mainly devoted to consistency, while there is little research on its generalization performance. In this paper, we study the excess risk bounds of the popular spectral clustering algorithms: relaxed RatioCut and relaxed NCut. Our analysis follows the two practical steps of spectral clustering algorithms: continuous solution and discrete solution. Firstly, we provide the convergence rate of the excess risk bounds between the empirical continuous optimal solution and the population-level continuous optimal solution. Secondly, we show the fundamental quantity influencing the excess risk between the empirical discrete optimal solution and the population-level discrete optimal solution. At the empirical level, algorithms can be designed to reduce this quantity. Based on our theoretical analysis, we propose two novel algorithms that can penalize this quantity and, additionally, can cluster the out-of-sample data without re-eigendecomposition on the overall samples. Numerical experiments on toy and real datasets confirm the effectiveness of our proposed algorithms.", + "primary_area": "machine learning ii", + "author": "Shaojie Li; Sheng Ouyang; Yong Liu", + "authorids": "", + "aff": "1Gaoling School of Arti\ufb01cial Intelligence, Renmin University of China, Beijing, China; 2Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China; 1Gaoling School of Arti\ufb01cial Intelligence, Renmin University of China, Beijing, China + 2Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China", + "bibtex": "@article{Li_Ouyang_Liu_2023, title={Understanding the Generalization Performance of Spectral Clustering Algorithms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26037}, DOI={10.1609/aaai.v37i7.26037}, abstractNote={The theoretical analysis of spectral clustering is mainly devoted to consistency, while there is little research on its generalization performance. In this paper, we study the excess risk bounds of the popular spectral clustering algorithms: relaxed RatioCut and relaxed NCut. Our analysis follows the two practical steps of spectral clustering algorithms: continuous solution and discrete solution. Firstly, we provide the convergence rate of the excess risk bounds between the empirical continuous optimal solution and the population-level continuous optimal solution. Secondly, we show the fundamental quantity influencing the excess risk between the empirical discrete optimal solution and the population-level discrete optimal solution. At the empirical level, algorithms can be designed to reduce this quantity. Based on our theoretical analysis, we propose two novel algorithms that can penalize this quantity and, additionally, can cluster the out-of-sample data without re-eigendecomposition on the overall samples. Numerical experiments on toy and real datasets confirm the effectiveness of our proposed algorithms.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Shaojie and Ouyang, Sheng and Liu, Yong}, year={2023}, month={Jun.}, pages={8614-8621} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26037/25809", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26037", + "pdf_size": 145768, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1019071018613295965&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 5, + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_dep": "School of Arti\ufb01cial Intelligence;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_url": "http://www.ruc.edu.cn;", + "aff_unique_abbr": "RUC;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25147", + "title": "Underwater Ranker: Learn Which Is Better and How to Be Better", + "track": "main", + "status": "Technical", + "abstract": "In this paper, we present a ranking-based underwater image quality assessment (UIQA) method, abbreviated as URanker. The URanker is built on the efficient conv-attentional image Transformer. In terms of underwater images, we specially devise (1) the histogram prior that embeds the color distribution of an underwater image as histogram token to attend global degradation and (2) the dynamic cross-scale correspondence to model local degradation. The final prediction depends on the class tokens from different scales, which comprehensively considers multi-scale dependencies. With the margin ranking loss, our URanker can accurately rank the order of underwater images of the same scene enhanced by different underwater image enhancement (UIE) algorithms according to their visual quality. To achieve that, we also contribute a dataset, URankerSet, containing sufficient results enhanced by different UIE algorithms and the corresponding perceptual rankings, to train our URanker. Apart from the good performance of URanker, we found that a simple U-shape UIE network can obtain promising performance when it is coupled with our pre-trained URanker as additional supervision. In addition, we also propose a normalization tail that can significantly improve the performance of UIE networks. Extensive experiments demonstrate the state-of-the-art performance of our method. The key designs of our method are discussed. Our code and dataset are available at https://li-chongyi.github.io/URanker_files/.", + "primary_area": "computer vision i", + "author": "Chunle Guo; Ruiqi Wu; Xin Jin; Linghao Han; Weidong Zhang; Zhi Chai; Chongyi Li", + "authorids": "", + "aff": "TMCC, CS, Nankai University; TMCC, CS, Nankai University; TMCC, CS, Nankai University; TMCC, CS, Nankai University; School of Information Engineering, Henan Institute of Science and Technology; Hisilicon Technologies Co. Ltd.; S-Lab, Nanyang Technological University", + "bibtex": "@article{Guo_Wu_Jin_Han_Zhang_Chai_Li_2023, title={Underwater Ranker: Learn Which Is Better and How to Be Better}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25147}, DOI={10.1609/aaai.v37i1.25147}, abstractNote={In this paper, we present a ranking-based underwater image quality assessment (UIQA) method, abbreviated as URanker. The URanker is built on the efficient conv-attentional image Transformer. In terms of underwater images, we specially devise (1) the histogram prior that embeds the color distribution of an underwater image as histogram token to attend global degradation and (2) the dynamic cross-scale correspondence to model local degradation. The final prediction depends on the class tokens from different scales, which comprehensively considers multi-scale dependencies. With the margin ranking loss, our URanker can accurately rank the order of underwater images of the same scene enhanced by different underwater image enhancement (UIE) algorithms according to their visual quality. To achieve that, we also contribute a dataset, URankerSet, containing sufficient results enhanced by different UIE algorithms and the corresponding perceptual rankings, to train our URanker. Apart from the good performance of URanker, we found that a simple U-shape UIE network can obtain promising performance when it is coupled with our pre-trained URanker as additional supervision. In addition, we also propose a normalization tail that can significantly improve the performance of UIE networks. Extensive experiments demonstrate the state-of-the-art performance of our method. The key designs of our method are discussed. Our code and dataset are available at https://li-chongyi.github.io/URanker_files/.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Guo, Chunle and Wu, Ruiqi and Jin, Xin and Han, Linghao and Zhang, Weidong and Chai, Zhi and Li, Chongyi}, year={2023}, month={Jun.}, pages={702-709} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25147/24919", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25147", + "pdf_size": 4092812, + "gs_citation": 132, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3273439911848059661&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "nankai.edu.cn;mail.nankai.edu.cn;mail.nankai.edu.cn;mail.nankai.edu.cn;163.com;huawei.com;ntu.edu.sg", + "email": "nankai.edu.cn;mail.nankai.edu.cn;mail.nankai.edu.cn;mail.nankai.edu.cn;163.com;huawei.com;ntu.edu.sg", + "github": "https://li-chongyi.github.io/URanker", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;3", + "aff_unique_norm": "Nankai University;Henan Institute of Science and Technology;Hisilicon Technologies;Nanyang Technological University", + "aff_unique_dep": "Computer Science;School of Information Engineering;;S-Lab", + "aff_unique_url": "http://www.nankai.edu.cn;;https://www.hisilicon.com;https://www.ntu.edu.sg", + "aff_unique_abbr": "Nankai;;HiSilicon;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25847", + "title": "Unfooling Perturbation-Based Post Hoc Explainers", + "track": "main", + "status": "Technical", + "abstract": "Monumental advancements in artificial intelligence (AI) have lured the interest of doctors, lenders, judges, and other professionals. While these high-stakes decision-makers are optimistic about the technology, those familiar with AI systems are wary about the lack of transparency of its decision-making processes. Perturbation-based post hoc explainers offer a model agnostic means of interpreting these systems while only requiring query-level access. However, recent work demonstrates that these explainers can be fooled adversarially. This discovery has adverse implications for auditors, regulators, and other sentinels. With this in mind, several natural questions arise - how can we audit these black box systems? And how can we ascertain that the auditee is complying with the audit in good faith? In this work, we rigorously formalize this problem and devise a defense against adversarial attacks on perturbation-based explainers. We propose algorithms for the detection (CAD-Detect) and defense (CAD-Defend) of these attacks, which are aided by our novel conditional anomaly detection approach, KNN-CAD. We demonstrate that our approach successfully detects whether a black box system adversarially conceals its decision-making process and mitigates the adversarial attack on real-world data for the prevalent explainers, LIME and SHAP. The code for this work is available at https://github.com/craymichael/unfooling.", + "primary_area": "machine learning i", + "author": "Zachariah Carmichael; Walter J. Scheirer", + "authorids": "", + "aff": "University of Notre Dame; University of Notre Dame", + "bibtex": "@article{Carmichael_Scheirer_2023, title={Unfooling Perturbation-Based Post Hoc Explainers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25847}, DOI={10.1609/aaai.v37i6.25847}, abstractNote={Monumental advancements in artificial intelligence (AI) have lured the interest of doctors, lenders, judges, and other professionals. While these high-stakes decision-makers are optimistic about the technology, those familiar with AI systems are wary about the lack of transparency of its decision-making processes. Perturbation-based post hoc explainers offer a model agnostic means of interpreting these systems while only requiring query-level access. However, recent work demonstrates that these explainers can be fooled adversarially. This discovery has adverse implications for auditors, regulators, and other sentinels. With this in mind, several natural questions arise - how can we audit these black box systems? And how can we ascertain that the auditee is complying with the audit in good faith? In this work, we rigorously formalize this problem and devise a defense against adversarial attacks on perturbation-based explainers. We propose algorithms for the detection (CAD-Detect) and defense (CAD-Defend) of these attacks, which are aided by our novel conditional anomaly detection approach, KNN-CAD. We demonstrate that our approach successfully detects whether a black box system adversarially conceals its decision-making process and mitigates the adversarial attack on real-world data for the prevalent explainers, LIME and SHAP. The code for this work is available at https://github.com/craymichael/unfooling.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carmichael, Zachariah and Scheirer, Walter J.}, year={2023}, month={Jun.}, pages={6925-6934} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25847/25619", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25847", + "pdf_size": 269833, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8083551569660313279&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "nd.edu;nd.edu", + "email": "nd.edu;nd.edu", + "github": "https://github.com/craymichael/unfooling", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Notre Dame", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nd.edu", + "aff_unique_abbr": "Notre Dame", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26530", + "title": "UniSyn: An End-to-End Unified Model for Text-to-Speech and Singing Voice Synthesis", + "track": "main", + "status": "Technical", + "abstract": "Text-to-speech (TTS) and singing voice synthesis (SVS) aim at generating high-quality speaking and singing voice according to textual input and music scores, respectively. Unifying TTS and SVS into a single system is crucial to the applications requiring both of them. Existing methods usually suffer from some limitations, which rely on either both singing and speaking data from the same person or cascaded models of multiple tasks. To address these problems, a simplified elegant framework for TTS and SVS, named UniSyn, is proposed in this paper. It is an end-to-end unified model that can make a voice speak and sing with only singing or speaking data from this person. To be specific, a multi-conditional variational autoencoder (MC-VAE), which constructs two independent latent sub-spaces with the speaker- and style-related (i.e. speak or sing) conditions for flexible control, is proposed in UniSyn. Moreover, supervised guided-VAE and timbre perturbation with the Wasserstein distance constraint are leveraged to further disentangle the speaker timbre and style. Experiments conducted on two speakers and two singers demonstrate that UniSyn can generate natural speaking and singing voice without corresponding training data. The proposed approach outperforms the state-of-the-art end-to-end voice generation work, which proves the effectiveness and advantages of UniSyn.", + "primary_area": "speech natural language processing", + "author": "Yi Lei; Shan Yang; Xinsheng Wang; Qicong Xie; Jixun Yao; Lei Xie; Dan Su", + "authorids": "", + "aff": "Audio, Speech and Language Processing Group (ASLP@NPU), School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China; Tencent AI Lab, China; Audio, Speech and Language Processing Group (ASLP@NPU), School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China; Audio, Speech and Language Processing Group (ASLP@NPU), School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China; Audio, Speech and Language Processing Group (ASLP@NPU), School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China; Audio, Speech and Language Processing Group (ASLP@NPU), School of Computer Science, Northwestern Polytechnical University, Xi\u2019an, China+Tencent AI Lab, China; Tencent AI Lab, China", + "bibtex": "@article{Lei_Yang_Wang_Xie_Yao_Xie_Su_2023, title={UniSyn: An End-to-End Unified Model for Text-to-Speech and Singing Voice Synthesis}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26530}, DOI={10.1609/aaai.v37i11.26530}, abstractNote={Text-to-speech (TTS) and singing voice synthesis (SVS) aim at generating high-quality speaking and singing voice according to textual input and music scores, respectively. Unifying TTS and SVS into a single system is crucial to the applications requiring both of them. Existing methods usually suffer from some limitations, which rely on either both singing and speaking data from the same person or cascaded models of multiple tasks. To address these problems, a simplified elegant framework for TTS and SVS, named UniSyn, is proposed in this paper. It is an end-to-end unified model that can make a voice speak and sing with only singing or speaking data from this person. To be specific, a multi-conditional variational autoencoder (MC-VAE), which constructs two independent latent sub-spaces with the speaker- and style-related (i.e. speak or sing) conditions for flexible control, is proposed in UniSyn. Moreover, supervised guided-VAE and timbre perturbation with the Wasserstein distance constraint are leveraged to further disentangle the speaker timbre and style. Experiments conducted on two speakers and two singers demonstrate that UniSyn can generate natural speaking and singing voice without corresponding training data. The proposed approach outperforms the state-of-the-art end-to-end voice generation work, which proves the effectiveness and advantages of UniSyn.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lei, Yi and Yang, Shan and Wang, Xinsheng and Xie, Qicong and Yao, Jixun and Xie, Lei and Su, Dan}, year={2023}, month={Jun.}, pages={13025-13033} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26530/26302", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26530", + "pdf_size": 418032, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17241265171939974978&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": "npu-aslp.org;tencent.com;gmail.com;mail.nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn;tencent.com", + "email": "npu-aslp.org;tencent.com;gmail.com;mail.nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn;tencent.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;0;0+1;1", + "aff_unique_norm": "Northwestern Polytechnical University;Tencent", + "aff_unique_dep": "School of Computer Science;Tencent AI Lab", + "aff_unique_url": "http://www.nwpu.edu.cn;https://ai.tencent.com", + "aff_unique_abbr": "NPU;Tencent AI Lab", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Xi'an;", + "aff_country_unique_index": "0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25540", + "title": "Uniform Sequence Better: Time Interval Aware Data Augmentation for Sequential Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Sequential recommendation is an important task to predict the next-item to access based on a sequence of interacted items. Most existing works learn user preference as the transition pattern from the previous item to the next one, ignoring the time interval between these two items. However, we observe that the time interval in a sequence may vary significantly different, and thus result in the ineffectiveness of user modeling due to the issue of preference drift. In fact, we conducted an empirical study to validate this observation, and found that a sequence with uniformly distributed time interval (denoted as uniform sequence) is more beneficial for performance improvement than that with greatly varying time interval. Therefore, we propose to augment sequence data from the perspective of time interval, which is not studied in the literature. Specifically, we design five operators (Ti-Crop, Ti-Reorder, Ti-Mask, Ti-Substitute, Ti-Insert) to transform the original non-uniform sequence to uniform sequence with the consideration of variance of time intervals. Then, we devise a control strategy to execute data augmentation on item sequences in different lengths. Finally, we implement these improvements on a state-of-the-art model CoSeRec and validate our approach on four real datasets. The experimental results show that our approach reaches significantly better performance than the other 9 competing methods. Our implementation is available: https://github.com/KingGugu/TiCoSeRec.", + "primary_area": "data mining and knowledge management", + "author": "Yizhou Dang; Enneng Yang; Guibing Guo; Linying Jiang; Xingwei Wang; Xiaoxiao Xu; Qinghui Sun; Hong Liu", + "authorids": "", + "aff": "Software College, Northeastern University, China; Software College, Northeastern University, China; Software College, Northeastern University, China; Software College, Northeastern University, China; School of Computer Science and Engineering, Northeastern University, China; Alibaba Group; Alibaba Group; Alibaba Group", + "bibtex": "@article{Dang_Yang_Guo_Jiang_Wang_Xu_Sun_Liu_2023, title={Uniform Sequence Better: Time Interval Aware Data Augmentation for Sequential Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25540}, DOI={10.1609/aaai.v37i4.25540}, abstractNote={Sequential recommendation is an important task to predict the next-item to access based on a sequence of interacted items. Most existing works learn user preference as the transition pattern from the previous item to the next one, ignoring the time interval between these two items. However, we observe that the time interval in a sequence may vary significantly different, and thus result in the ineffectiveness of user modeling due to the issue of preference drift. In fact, we conducted an empirical study to validate this observation, and found that a sequence with uniformly distributed time interval (denoted as uniform sequence) is more beneficial for performance improvement than that with greatly varying time interval. Therefore, we propose to augment sequence data from the perspective of time interval, which is not studied in the literature. Specifically, we design five operators (Ti-Crop, Ti-Reorder, Ti-Mask, Ti-Substitute, Ti-Insert) to transform the original non-uniform sequence to uniform sequence with the consideration of variance of time intervals. Then, we devise a control strategy to execute data augmentation on item sequences in different lengths. Finally, we implement these improvements on a state-of-the-art model CoSeRec and validate our approach on four real datasets. The experimental results show that our approach reaches significantly better performance than the other 9 competing methods. Our implementation is available: https://github.com/KingGugu/TiCoSeRec.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dang, Yizhou and Yang, Enneng and Guo, Guibing and Jiang, Linying and Wang, Xingwei and Xu, Xiaoxiao and Sun, Qinghui and Liu, Hong}, year={2023}, month={Jun.}, pages={4225-4232} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25540/25312", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25540", + "pdf_size": 993355, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12735615747058688813&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "stumail.neu.edu.cn;stumail.neu.edu.cn;swc.neu.edu.cn;swc.neu.edu.cn;mail.neu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "stumail.neu.edu.cn;stumail.neu.edu.cn;swc.neu.edu.cn;swc.neu.edu.cn;mail.neu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/KingGugu/TiCoSeRec", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1;1;1", + "aff_unique_norm": "Northeastern University;Alibaba Group", + "aff_unique_dep": "Software College;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.alibaba.com", + "aff_unique_abbr": "NEU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25178", + "title": "Unifying Vision-Language Representation Space with Single-Tower Transformer", + "track": "main", + "status": "Technical", + "abstract": "Contrastive learning is a form of distance learning that aims to learn invariant features from two related representations. In this work, we explore the hypothesis that an image and caption can be regarded as two different views of the underlying mutual information, and train a model to learn a unified vision-language representation space that encodes both modalities at once in a modality-agnostic manner. We first identify difficulties in learning a one-tower model for vision-language pretraining (VLP), and propose One Representation (OneR) as a simple yet effective framework for our goal. We discover intriguing properties that distinguish OneR from the previous works that have modality-specific representation spaces such as zero-shot localization, text-guided visual reasoning and multi-modal retrieval, and present analyses to provide insights into this new form of multi-modal representation learning. Thorough evaluations demonstrate the potential of a unified modality-agnostic VLP framework.", + "primary_area": "computer vision i", + "author": "Jiho Jang; Chaerin Kong; DongHyeon Jeon; Seonhoon Kim; Nojun Kwak", + "authorids": "", + "aff": "Seoul National University; NA VER; Coupang; Seoul National University+NA VER; Seoul National University", + "bibtex": "@article{Jang_Kong_Jeon_Kim_Kwak_2023, title={Unifying Vision-Language Representation Space with Single-Tower Transformer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25178}, DOI={10.1609/aaai.v37i1.25178}, abstractNote={Contrastive learning is a form of distance learning that aims to learn invariant features from two related representations. In this work, we explore the hypothesis that an image and caption can be regarded as two different views of the underlying mutual information, and train a model to learn a unified vision-language representation space that encodes both modalities at once in a modality-agnostic manner. We first identify difficulties in learning a one-tower model for vision-language pretraining (VLP), and propose One Representation (OneR) as a simple yet effective framework for our goal. We discover intriguing properties that distinguish OneR from the previous works that have modality-specific representation spaces such as zero-shot localization, text-guided visual reasoning and multi-modal retrieval, and present analyses to provide insights into this new form of multi-modal representation learning. Thorough evaluations demonstrate the potential of a unified modality-agnostic VLP framework.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jang, Jiho and Kong, Chaerin and Jeon, DongHyeon and Kim, Seonhoon and Kwak, Nojun}, year={2023}, month={Jun.}, pages={980-988} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25178/24950", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25178", + "pdf_size": 11551208, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6830526869318829957&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "snu.ac.kr;snu.ac.kr;navercorp.com;coupang.com;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr;navercorp.com;coupang.com;snu.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;2;0;0", + "aff_unique_norm": "Seoul National University;;Coupang", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.snu.ac.kr;;https://www.coupang.com", + "aff_unique_abbr": "SNU;;Coupang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "article-26563", + "title": "Universal Information Extraction as Unified Semantic Matching", + "track": "main", + "status": "Technical", + "abstract": "The challenge of information extraction (IE) lies in the diversity of label schemas and the heterogeneity of structures.\nTraditional methods require task-specific model design and rely heavily on expensive supervision, making them difficult to generalize to new schemas.\nIn this paper, we decouple IE into two basic abilities, structuring and conceptualizing, which are shared by different tasks and schemas.\nBased on this paradigm, we propose to universally model various IE tasks with Unified Semantic Matching (USM) framework, which introduces three unified token linking operations to model the abilities of structuring and conceptualizing.\nIn this way, USM can jointly encode schema and input text, uniformly extract substructures in parallel, and controllably decode target structures on demand.\nEmpirical evaluation on 4 IE tasks shows that the proposed method achieves state-of-the-art performance under the supervised experiments and shows strong generalization ability in zero/few-shot transfer settings.", + "primary_area": "speech natural language processing", + "author": "Jie Lou; Yaojie Lu; Dai Dai; Wei Jia; Hongyu Lin; Xianpei Han; Le Sun; Hua Wu", + "authorids": "", + "aff": "Baidu Inc., Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China + State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China; Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China + State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China", + "bibtex": "@article{Lou_Lu_Dai_Jia_Lin_Han_Sun_Wu_2023, title={Universal Information Extraction as Unified Semantic Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26563}, DOI={10.1609/aaai.v37i11.26563}, abstractNote={The challenge of information extraction (IE) lies in the diversity of label schemas and the heterogeneity of structures.\nTraditional methods require task-specific model design and rely heavily on expensive supervision, making them difficult to generalize to new schemas.\nIn this paper, we decouple IE into two basic abilities, structuring and conceptualizing, which are shared by different tasks and schemas.\nBased on this paradigm, we propose to universally model various IE tasks with Unified Semantic Matching (USM) framework, which introduces three unified token linking operations to model the abilities of structuring and conceptualizing.\nIn this way, USM can jointly encode schema and input text, uniformly extract substructures in parallel, and controllably decode target structures on demand.\nEmpirical evaluation on 4 IE tasks shows that the proposed method achieves state-of-the-art performance under the supervised experiments and shows strong generalization ability in zero/few-shot transfer settings.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lou, Jie and Lu, Yaojie and Dai, Dai and Jia, Wei and Lin, Hongyu and Han, Xianpei and Sun, Le and Wu, Hua}, year={2023}, month={Jun.}, pages={13318-13326} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26563/26335", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26563", + "pdf_size": 381545, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15442017356046989934&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "baidu.com;iscas.ac.cn;baidu.com;baidu.com;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;baidu.com", + "email": "baidu.com;iscas.ac.cn;baidu.com;baidu.com;iscas.ac.cn;iscas.ac.cn;iscas.ac.cn;baidu.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;1;1+1;1+1;0", + "aff_unique_norm": "Baidu Inc.;Chinese Academy of Sciences", + "aff_unique_dep": ";Institute of Software", + "aff_unique_url": "https://www.baidu.com;https://www.cas.cn", + "aff_unique_abbr": "Baidu;CAS", + "aff_campus_unique_index": "0;0;0;0;0;0+0;0+0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25290", + "title": "Universe Points Representation Learning for Partial Multi-Graph Matching", + "track": "main", + "status": "Technical", + "abstract": "Many challenges from natural world can be formulated as a graph matching problem. Previous deep learning-based methods mainly consider a full two-graph matching setting. In this work, we study the more general partial matching problem with multi-graph cycle consistency guarantees. Building on a recent progress in deep learning on graphs, we propose a novel data-driven method (URL) for partial multi-graph matching, which uses an object-to-universe formulation and learns latent representations of abstract universe points. The proposed approach advances the state of the art in semantic keypoint matching problem, evaluated on Pascal VOC, CUB, and Willow datasets. Moreover, the set of controlled experiments on a synthetic graph matching dataset demonstrates the scalability of our method to graphs with large number of nodes and its robustness to high partiality.", + "primary_area": "computer vision ii", + "author": "Zhakshylyk Nurlanov; Frank R. Schmidt; Florian Bernard", + "authorids": "", + "aff": "Bosch Center for Artificial Intelligence; Bosch Center for Artificial Intelligence + University of Bonn; University of Bonn", + "bibtex": "@article{Nurlanov_Schmidt_Bernard_2023, title={Universe Points Representation Learning for Partial Multi-Graph Matching}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25290}, DOI={10.1609/aaai.v37i2.25290}, abstractNote={Many challenges from natural world can be formulated as a graph matching problem. Previous deep learning-based methods mainly consider a full two-graph matching setting. In this work, we study the more general partial matching problem with multi-graph cycle consistency guarantees. Building on a recent progress in deep learning on graphs, we propose a novel data-driven method (URL) for partial multi-graph matching, which uses an object-to-universe formulation and learns latent representations of abstract universe points. The proposed approach advances the state of the art in semantic keypoint matching problem, evaluated on Pascal VOC, CUB, and Willow datasets. Moreover, the set of controlled experiments on a synthetic graph matching dataset demonstrates the scalability of our method to graphs with large number of nodes and its robustness to high partiality.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nurlanov, Zhakshylyk and Schmidt, Frank R. and Bernard, Florian}, year={2023}, month={Jun.}, pages={1984-1992} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25290/25062", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25290", + "pdf_size": 14283513, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2643256171261594272&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "de.bosch.com;de.bosch.com;uni-bonn.de", + "email": "de.bosch.com;de.bosch.com;uni-bonn.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;1", + "aff_unique_norm": "Bosch Center for Artificial Intelligence;University of Bonn", + "aff_unique_dep": "Center for Artificial Intelligence;", + "aff_unique_url": "https://www.bosch-ai.com;https://www.uni-bonn.de/", + "aff_unique_abbr": "BCAI;UBonn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-26222", + "title": "Unlabeled Imperfect Demonstrations in Adversarial Imitation Learning", + "track": "main", + "status": "Technical", + "abstract": "Adversarial imitation learning has become a widely used imitation learning framework. The discriminator is often trained by taking expert demonstrations and policy trajectories as examples respectively from two categories (positive vs. negative) and the policy is then expected to produce trajectories that are indistinguishable from the expert demonstrations. But in the real world, the collected expert demonstrations are more likely to be imperfect, where only an unknown fraction of the demonstrations are optimal. Instead of treating imperfect expert demonstrations as absolutely positive or negative, we investigate unlabeled imperfect expert demonstrations as they are. A positive-unlabeled adversarial imitation learning algorithm is developed to dynamically sample expert demonstrations that can well match the trajectories from the constantly optimized agent policy. The trajectories of an initial agent policy could be closer to those non-optimal expert demonstrations, but within the framework of adversarial imitation learning, agent policy will be optimized to cheat the discriminator and produce trajectories that are similar to those optimal expert demonstrations. Theoretical analysis shows that our method learns from the imperfect demonstrations via a self-paced way. Experimental results on MuJoCo and RoboSuite platforms demonstrate the effectiveness of our method from different aspects.", + "primary_area": "machine learning iii", + "author": "Yunke Wang; Bo Du; Chang Xu", + "authorids": "", + "aff": "School of Computer Science, National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, Wuhan, China; School of Computer Science, National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, Wuhan, China; School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", + "bibtex": "@article{Wang_Du_Xu_2023, title={Unlabeled Imperfect Demonstrations in Adversarial Imitation Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26222}, DOI={10.1609/aaai.v37i8.26222}, abstractNote={Adversarial imitation learning has become a widely used imitation learning framework. The discriminator is often trained by taking expert demonstrations and policy trajectories as examples respectively from two categories (positive vs. negative) and the policy is then expected to produce trajectories that are indistinguishable from the expert demonstrations. But in the real world, the collected expert demonstrations are more likely to be imperfect, where only an unknown fraction of the demonstrations are optimal. Instead of treating imperfect expert demonstrations as absolutely positive or negative, we investigate unlabeled imperfect expert demonstrations as they are. A positive-unlabeled adversarial imitation learning algorithm is developed to dynamically sample expert demonstrations that can well match the trajectories from the constantly optimized agent policy. The trajectories of an initial agent policy could be closer to those non-optimal expert demonstrations, but within the framework of adversarial imitation learning, agent policy will be optimized to cheat the discriminator and produce trajectories that are similar to those optimal expert demonstrations. Theoretical analysis shows that our method learns from the imperfect demonstrations via a self-paced way. Experimental results on MuJoCo and RoboSuite platforms demonstrate the effectiveness of our method from different aspects.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Yunke and Du, Bo and Xu, Chang}, year={2023}, month={Jun.}, pages={10262-10270} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26222/25994", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26222", + "pdf_size": 2344873, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4005534489967016830&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "whu.edu.cn;whu.edu.cn;sydney.edu.au", + "email": "whu.edu.cn;whu.edu.cn;sydney.edu.au", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Wuhan University;The University of Sydney", + "aff_unique_dep": "School of Computer Science;School of Computer Science", + "aff_unique_url": "http://www.whu.edu.cn;https://www.sydney.edu.au", + "aff_unique_abbr": "WHU;USYD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "article-26971", + "title": "Unsupervised Contrastive Representation Learning for 3D Mesh Segmentation (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "3D deep learning is a growing field of interest due to the vast amount of information stored in 3D formats. Triangular meshes are an efficient representation for irregular, non-uniform 3D objects. However, meshes are often challenging to annotate due to their high computational complexity. Therefore, it is desirable to train segmentation networks with limited-labeled data. Self-supervised learning (SSL), a form of unsupervised representation learning, is a growing alternative to fully-supervised learning which can decrease the burden of supervision for training. Specifically, contrastive learning (CL), a form of SSL, has recently been explored to solve limited-labeled data tasks. We propose SSL-MeshCNN, a CL method for pre-training CNNs for mesh segmentation. We take inspiration from prior CL frameworks to design a novel CL algorithm specialized for meshes. Our preliminary experiments show promising results in reducing the heavy labeled data requirement needed for mesh segmentation by at least 33%.", + "primary_area": "", + "author": "Ayaan Haque; Hankyu Moon; Heng Hao; Sima Didari; Jae Oh Woo; Patrick Bangert", + "authorids": "", + "aff": "University of California, Berkeley; Samsung SDS Research America; Samsung SDS Research America; Samsung SDS Research America; Samsung SDS Research America; Samsung SDS Research America", + "bibtex": "@article{Haque_Moon_Hao_Didari_Woo_Bangert_2024, title={Unsupervised Contrastive Representation Learning for 3D Mesh Segmentation (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26971}, DOI={10.1609/aaai.v37i13.26971}, abstractNote={3D deep learning is a growing field of interest due to the vast amount of information stored in 3D formats. Triangular meshes are an efficient representation for irregular, non-uniform 3D objects. However, meshes are often challenging to annotate due to their high computational complexity. Therefore, it is desirable to train segmentation networks with limited-labeled data. Self-supervised learning (SSL), a form of unsupervised representation learning, is a growing alternative to fully-supervised learning which can decrease the burden of supervision for training. Specifically, contrastive learning (CL), a form of SSL, has recently been explored to solve limited-labeled data tasks. We propose SSL-MeshCNN, a CL method for pre-training CNNs for mesh segmentation. We take inspiration from prior CL frameworks to design a novel CL algorithm specialized for meshes. Our preliminary experiments show promising results in reducing the heavy labeled data requirement needed for mesh segmentation by at least 33%.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Haque, Ayaan and Moon, Hankyu and Hao, Heng and Didari, Sima and Woo, Jae Oh and Bangert, Patrick}, year={2024}, month={Jul.}, pages={16222-16223} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26971/26743", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26971", + "pdf_size": 646797, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6504137178982945350&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff_domain": "berkeley.edu; ; ; ; ; ", + "email": "berkeley.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "University of California, Berkeley;Samsung SDS Research America", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.berkeley.edu;https://www.samsungsds.com", + "aff_unique_abbr": "UC Berkeley;SSRA", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Berkeley;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26584", + "title": "Unsupervised Cross-Domain Rumor Detection with Contrastive Learning and Cross-Attention", + "track": "main", + "status": "Technical", + "abstract": "Massive rumors usually appear along with breaking news or trending topics, seriously hindering the truth. Existing rumor detection methods are mostly focused on the same domain, thus have poor performance in cross-domain scenarios due to domain shift. In this work, we propose an end-to-end instance-wise and prototype-wise contrastive learning model with cross-attention mechanism for cross-domain rumor detection. The model not only performs cross-domain\nfeature alignment, but also enforces target samples to align with the corresponding prototypes of a given source domain. Since target labels in a target domain are unavailable, we use a clustering-based approach with carefully initialized centers\nby a batch of source domain samples to produce pseudo labels. Moreover, we use a cross-attention mechanism on a pair of source data and target data with the same labels to learn domain-invariant representations. Because the samples in a\ndomain pair tend to express similar semantic patterns especially on the people\u2019s attitudes (e.g., supporting or denying) towards the same category of rumors, the discrepancy between a pair of source domain and target domain will be decreased. We conduct experiments on four groups of cross-domain datasets and show that our proposed model achieves state-of-the-art performance.", + "primary_area": "speech natural language processing", + "author": "Hongyan Ran; Caiyan Jia", + "authorids": "", + "aff": "School of Computer and Information Technology & Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing 100044, China; School of Computer and Information Technology & Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing 100044, China", + "bibtex": "@article{Ran_Jia_2023, title={Unsupervised Cross-Domain Rumor Detection with Contrastive Learning and Cross-Attention}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26584}, DOI={10.1609/aaai.v37i11.26584}, abstractNote={Massive rumors usually appear along with breaking news or trending topics, seriously hindering the truth. Existing rumor detection methods are mostly focused on the same domain, thus have poor performance in cross-domain scenarios due to domain shift. In this work, we propose an end-to-end instance-wise and prototype-wise contrastive learning model with cross-attention mechanism for cross-domain rumor detection. The model not only performs cross-domain\nfeature alignment, but also enforces target samples to align with the corresponding prototypes of a given source domain. Since target labels in a target domain are unavailable, we use a clustering-based approach with carefully initialized centers\nby a batch of source domain samples to produce pseudo labels. Moreover, we use a cross-attention mechanism on a pair of source data and target data with the same labels to learn domain-invariant representations. Because the samples in a\ndomain pair tend to express similar semantic patterns especially on the people\u2019s attitudes (e.g., supporting or denying) towards the same category of rumors, the discrepancy between a pair of source domain and target domain will be decreased. We conduct experiments on four groups of cross-domain datasets and show that our proposed model achieves state-of-the-art performance.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ran, Hongyan and Jia, Caiyan}, year={2023}, month={Jun.}, pages={13510-13518} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26584/26356", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26584", + "pdf_size": 1531217, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=286825679381253845&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff_domain": "bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Beijing Jiaotong University", + "aff_unique_dep": "School of Computer and Information Technology", + "aff_unique_url": "http://www.bjtu.edu.cn", + "aff_unique_abbr": "BJTU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25631", + "title": "Unsupervised Deep Embedded Fusion Representation of Single-Cell Transcriptomics", + "track": "main", + "status": "Technical", + "abstract": "Cell clustering is a critical step in analyzing single-cell RNA sequencing (scRNA-seq) data, which allows us to characterize the cellular heterogeneity of transcriptional profiling at the single-cell level. Single-cell deep embedded representation models have recently become popular since they can learn feature representation and clustering simultaneously. However, the model still suffers from a variety of significant challenges, including the massive amount of data, pervasive dropout events, and complicated noise patterns in transcriptional profiling. Here, we propose a Single-Cell Deep Embedding Fusion Representation (scDEFR) model, which develop a deep embedded fusion representation to learn fused heterogeneous latent embedding that contains both the transcriptome gene-level information and the cell topology information. We first fuse them layer by layer to obtain compressed representations of intercellular relationships and transcriptome information. After that, the zero-inflated negative binomial model (ZINB)-based decoder is proposed to capture the global probabilistic structure of the data and reconstruct the final gene expression information and cell graph. Finally, by simultaneously integrating the clustering loss, crossentropy loss, ZINB loss, and the cell graph reconstruction loss,\nscDEFR can optimize clustering performance and learn the latent representation in fused information under a joint mutual supervised strategy. We conducted extensive and comprehensive experiments on 15 single-cell RNA-seq datasets from different sequencing platforms to demonstrate the superiority of scDEFR over a variety of state-of-the-art methods.", + "primary_area": "domain s of application", + "author": "Yue Cheng; Yanchi Su; Zhuohan Yu; Yanchun Liang; Ka-Chun Wong; Xiangtao Li", + "authorids": "", + "aff": "School of Artificial Intelligence, Jilin University, Jilin, China; School of Artificial Intelligence, Jilin University, Jilin, China; School of Artificial Intelligence, Jilin University, Jilin, China; Zhuhai Laboratory of Key Laboratory of Symbol Computation and Knowledge Engineering of Ministry of Education, Zhuhai College of Science and Technology, Zhuhai 519041, China; Department of Computer Science, City University of Hong Kong, Hong Kong SAR; School of Artificial Intelligence, Jilin University, Jilin, China", + "bibtex": "@article{Cheng_Su_Yu_Liang_Wong_Li_2023, title={Unsupervised Deep Embedded Fusion Representation of Single-Cell Transcriptomics}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25631}, DOI={10.1609/aaai.v37i4.25631}, abstractNote={Cell clustering is a critical step in analyzing single-cell RNA sequencing (scRNA-seq) data, which allows us to characterize the cellular heterogeneity of transcriptional profiling at the single-cell level. Single-cell deep embedded representation models have recently become popular since they can learn feature representation and clustering simultaneously. However, the model still suffers from a variety of significant challenges, including the massive amount of data, pervasive dropout events, and complicated noise patterns in transcriptional profiling. Here, we propose a Single-Cell Deep Embedding Fusion Representation (scDEFR) model, which develop a deep embedded fusion representation to learn fused heterogeneous latent embedding that contains both the transcriptome gene-level information and the cell topology information. We first fuse them layer by layer to obtain compressed representations of intercellular relationships and transcriptome information. After that, the zero-inflated negative binomial model (ZINB)-based decoder is proposed to capture the global probabilistic structure of the data and reconstruct the final gene expression information and cell graph. Finally, by simultaneously integrating the clustering loss, crossentropy loss, ZINB loss, and the cell graph reconstruction loss,\nscDEFR can optimize clustering performance and learn the latent representation in fused information under a joint mutual supervised strategy. We conducted extensive and comprehensive experiments on 15 single-cell RNA-seq datasets from different sequencing platforms to demonstrate the superiority of scDEFR over a variety of state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Yue and Su, Yanchi and Yu, Zhuohan and Liang, Yanchun and Wong, Ka-Chun and Li, Xiangtao}, year={2023}, month={Jun.}, pages={5036-5044} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25631/25403", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25631", + "pdf_size": 1202586, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3065823564432488974&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.jlu.edu.cn;gmail.com;mails.jlu.edu.cn;jlu.edu.cn;cityu.edu.hk;jlu.edu.cn", + "email": "mails.jlu.edu.cn;gmail.com;mails.jlu.edu.cn;jlu.edu.cn;cityu.edu.hk;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;0", + "aff_unique_norm": "Jilin University;Zhuhai College of Science and Technology;City University of Hong Kong", + "aff_unique_dep": "School of Artificial Intelligence;Key Laboratory of Symbol Computation and Knowledge Engineering;Department of Computer Science", + "aff_unique_url": "http://www.jlu.edu.cn;;https://www.cityu.edu.hk", + "aff_unique_abbr": "JLU;;CityU", + "aff_campus_unique_index": "0;0;0;1;2;0", + "aff_campus_unique": "Jilin;Zhuhai;Hong Kong SAR", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25306", + "title": "Unsupervised Deep Learning for Phase Retrieval via Teacher-Student Distillation", + "track": "main", + "status": "Technical", + "abstract": "Phase retrieval (PR) is a challenging nonlinear inverse problem in scientific imaging that involves reconstructing the phase of a signal from its intensity measurements. Recently, there has been an increasing interest in deep learning-based PR. Motivated by the challenge of collecting ground-truth (GT) images in many domains, this paper proposes a fully-unsupervised learning approach for PR, which trains an end-to-end deep model via a GT-free teacher-student online distillation framework. Specifically, a teacher model is trained using a self-expressive loss with noise resistance, while a student model is trained with a consistency loss on augmented data to exploit the teacher's dark knowledge. Additionally, we develop an enhanced unfolding network for both the teacher and student models. Extensive experiments show that our proposed approach outperforms existing unsupervised PR methods with higher computational efficiency and performs competitively against supervised methods.", + "primary_area": "computer vision ii", + "author": "Yuhui Quan; Zhile Chen; Tongyao Pang; Hui Ji", + "authorids": "", + "aff": "School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China + Pazhou Lab, Guangzhou 510320, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China + Pazhou Lab, Guangzhou 510320, China; Department of Mathematics, National University of Singapore, 119076, Singapore; Department of Mathematics, National University of Singapore, 119076, Singapore", + "bibtex": "@article{Quan_Chen_Pang_Ji_2023, title={Unsupervised Deep Learning for Phase Retrieval via Teacher-Student Distillation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25306}, DOI={10.1609/aaai.v37i2.25306}, abstractNote={Phase retrieval (PR) is a challenging nonlinear inverse problem in scientific imaging that involves reconstructing the phase of a signal from its intensity measurements. Recently, there has been an increasing interest in deep learning-based PR. Motivated by the challenge of collecting ground-truth (GT) images in many domains, this paper proposes a fully-unsupervised learning approach for PR, which trains an end-to-end deep model via a GT-free teacher-student online distillation framework. Specifically, a teacher model is trained using a self-expressive loss with noise resistance, while a student model is trained with a consistency loss on augmented data to exploit the teacher\u2019s dark knowledge. Additionally, we develop an enhanced unfolding network for both the teacher and student models. Extensive experiments show that our proposed approach outperforms existing unsupervised PR methods with higher computational efficiency and performs competitively against supervised methods.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Quan, Yuhui and Chen, Zhile and Pang, Tongyao and Ji, Hui}, year={2023}, month={Jun.}, pages={2128-2136} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25306/25078", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25306", + "pdf_size": 892327, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2354121748663280103&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "scut.edu.cn;mail.scut.edu.cn;nus.edu.sg;nus.edu.sg", + "email": "scut.edu.cn;mail.scut.edu.cn;nus.edu.sg;nus.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;2", + "aff_unique_norm": "South China University of Technology;Pazhou Lab;National University of Singapore", + "aff_unique_dep": "School of Computer Science and Engineering;;Department of Mathematics", + "aff_unique_url": "http://www.scut.edu.cn;;https://www.nus.edu.sg", + "aff_unique_abbr": "SCUT;;NUS", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;0+0;1;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "article-25476", + "title": "Unsupervised Deep Video Denoising with Untrained Network", + "track": "main", + "status": "Technical", + "abstract": "Deep learning has become a prominent tool for video denoising. However, most existing deep video denoising methods require supervised training using noise-free videos. Collecting noise-free videos can be costly and challenging in many applications. Therefore, this paper aims to develop an unsupervised deep learning method for video denoising that only uses a single test noisy video for training. To achieve this, an unsupervised loss function is presented that provides an unbiased estimator of its supervised counterpart defined on noise-free video. Additionally, a temporal attention mechanism is proposed to exploit redundancy among frames. The experiments on video denoising demonstrate that the proposed unsupervised method outperforms existing unsupervised methods and remains competitive against recent supervised deep learning methods.", + "primary_area": "computer vision iii", + "author": "Huan Zheng; Tongyao Pang; Hui Ji", + "authorids": "", + "aff": "Department of Mathematics at National University of Singapore, Singapore; Department of Mathematics at National University of Singapore, Singapore; Department of Mathematics at National University of Singapore, Singapore", + "bibtex": "@article{Zheng_Pang_Ji_2023, title={Unsupervised Deep Video Denoising with Untrained Network}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25476}, DOI={10.1609/aaai.v37i3.25476}, abstractNote={Deep learning has become a prominent tool for video denoising. However, most existing deep video denoising methods require supervised training using noise-free videos. Collecting noise-free videos can be costly and challenging in many applications. Therefore, this paper aims to develop an unsupervised deep learning method for video denoising that only uses a single test noisy video for training. To achieve this, an unsupervised loss function is presented that provides an unbiased estimator of its supervised counterpart defined on noise-free video. Additionally, a temporal attention mechanism is proposed to exploit redundancy among frames. The experiments on video denoising demonstrate that the proposed unsupervised method outperforms existing unsupervised methods and remains competitive against recent supervised deep learning methods.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zheng, Huan and Pang, Tongyao and Ji, Hui}, year={2023}, month={Jun.}, pages={3651-3659} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25476/25248", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25476", + "pdf_size": 10942752, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3371184957652721627&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "u.nus.edu;nus.edu.sg;nus.edu.sg", + "email": "u.nus.edu;nus.edu.sg;nus.edu.sg", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "Department of Mathematics", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-25138", + "title": "Unsupervised Domain Adaptation for Medical Image Segmentation by Selective Entropy Constraints and Adaptive Semantic Alignment", + "track": "main", + "status": "Technical", + "abstract": "Generalizing a deep learning model to new domains is crucial for computer-aided medical diagnosis systems. Most existing unsupervised domain adaptation methods have made significant progress in reducing the domain distribution gap through adversarial training. However, these methods may still produce overconfident but erroneous results on unseen target images. This paper proposes a new unsupervised domain adaptation framework for cross-modality medical image segmentation. Specifically, We first introduce two data augmentation approaches to generate two sets of semantics-preserving augmented images. Based on the model's predictive consistency on these two sets of augmented images, we identify reliable and unreliable pixels. We then perform a selective entropy constraint: we minimize the entropy of reliable pixels to increase their confidence while maximizing the entropy of unreliable pixels to reduce their confidence. Based on the identified reliable and unreliable pixels, we further propose an adaptive semantic alignment module which performs class-level distribution adaptation by minimizing the distance between same class prototypes between domains, where unreliable pixels are removed to derive more accurate prototypes. We have conducted extensive experiments on the cross-modality cardiac structure segmentation task. The experimental results show that the proposed method significantly outperforms the state-of-the-art comparison algorithms. Our code and data are available at https://github.com/fengweie/SE_ASA.", + "primary_area": "computer vision i", + "author": "Wei Feng; Lie Ju; Lin Wang; Kaimin Song; Xin Zhao; Zongyuan Ge", + "authorids": "", + "aff": "Monash eResearch Center, Monash University + Monash Medical AI Group, Monash University + Airdoc Monash Research Centre, Monash University; Monash eResearch Center, Monash University + Monash Medical AI Group, Monash University + Airdoc Monash Research Centre, Monash University; Monash eResearch Center, Monash University + Monash Medical AI Group, Monash University + Airdoc Monash Research Centre, Monash University; Airdoc LLC; Airdoc LLC; Monash eResearch Center, Monash University + Monash Medical AI Group, Monash University + Airdoc Monash Research Centre, Monash University", + "bibtex": "@article{Feng_Ju_Wang_Song_Zhao_Ge_2023, title={Unsupervised Domain Adaptation for Medical Image Segmentation by Selective Entropy Constraints and Adaptive Semantic Alignment}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25138}, DOI={10.1609/aaai.v37i1.25138}, abstractNote={Generalizing a deep learning model to new domains is crucial for computer-aided medical diagnosis systems. Most existing unsupervised domain adaptation methods have made significant progress in reducing the domain distribution gap through adversarial training. However, these methods may still produce overconfident but erroneous results on unseen target images. This paper proposes a new unsupervised domain adaptation framework for cross-modality medical image segmentation. Specifically, We first introduce two data augmentation approaches to generate two sets of semantics-preserving augmented images. Based on the model\u2019s predictive consistency on these two sets of augmented images, we identify reliable and unreliable pixels. We then perform a selective entropy constraint: we minimize the entropy of reliable pixels to increase their confidence while maximizing the entropy of unreliable pixels to reduce their confidence. Based on the identified reliable and unreliable pixels, we further propose an adaptive semantic alignment module which performs class-level distribution adaptation by minimizing the distance between same class prototypes between domains, where unreliable pixels are removed to derive more accurate prototypes. We have conducted extensive experiments on the cross-modality cardiac structure segmentation task. The experimental results show that the proposed method significantly outperforms the state-of-the-art comparison algorithms. Our code and data are available at https://github.com/fengweie/SE_ASA.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Feng, Wei and Ju, Lie and Wang, Lin and Song, Kaimin and Zhao, Xin and Ge, Zongyuan}, year={2023}, month={Jun.}, pages={623-631} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25138/24910", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25138", + "pdf_size": 912307, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2536986861957080092&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "monash.edu;gmail.com;gmail.com;gmail.com;airdoc.com;monash.edu", + "email": "monash.edu;gmail.com;gmail.com;gmail.com;airdoc.com;monash.edu", + "github": "https://github.com/fengweie/SE ASA", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0+0;0+0+0;0+0+0;1;1;0+0+0", + "aff_unique_norm": "Monash University;Airdoc", + "aff_unique_dep": "Monash eResearch Center;", + "aff_unique_url": "https://www.monash.edu;", + "aff_unique_abbr": "Monash;Airdoc", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Monash", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;1;1;0+0+0", + "aff_country_unique": "Australia;United States" + }, + { + "id": "article-26494", + "title": "Unsupervised Explanation Generation via Correct Instantiations", + "track": "main", + "status": "Technical", + "abstract": "While large pre-trained language models (PLM) have shown their great skills at solving discriminative tasks, a significant gap remains when compared with humans for explanation-related tasks. Among them, explaining the reason why a statement is wrong (e.g., against commonsense) is incredibly challenging. \nThe major difficulty is finding the conflict point, where the statement contradicts our real world. This paper proposes Neon, a two-phrase, unsupervised explanation generation framework. Neon first generates corrected instantiations of the statement (phase I), then uses them to prompt large PLMs to find the conflict point and complete the explanation (phase II). We conduct extensive experiments on two standard explanation benchmarks, i.e., ComVE and e-SNLI. According to both automatic and human evaluations, Neon outperforms baselines, even for those with human-annotated instantiations. In addition to explaining a negative prediction, we further demonstrate that Neon remains effective when generalizing to different scenarios. The resources of Neon are available at: https://github.com/Shark-NLP/Neon.", + "primary_area": "speech natural language processing", + "author": "Sijie Cheng; Zhiyong Wu; Jiangjie Chen; Zhixing Li; Yang Liu; Lingpeng Kong", + "authorids": "", + "aff": "Shanghai Artificial Intelligence Laboratory + Fudan University; Shanghai Artificial Intelligence Laboratory; Fudan University; Full Truck Alliance; Institute for AI Industry Research, Tsinghua University + Department of Computer Science and Technology, Tsinghua University; Shanghai Artificial Intelligence Laboratory + The University of Hong Kong", + "bibtex": "@article{Cheng_Wu_Chen_Li_Liu_Kong_2023, title={Unsupervised Explanation Generation via Correct Instantiations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26494}, DOI={10.1609/aaai.v37i11.26494}, abstractNote={While large pre-trained language models (PLM) have shown their great skills at solving discriminative tasks, a significant gap remains when compared with humans for explanation-related tasks. Among them, explaining the reason why a statement is wrong (e.g., against commonsense) is incredibly challenging. The major difficulty is finding the conflict point, where the statement contradicts our real world. This paper proposes Neon, a two-phrase, unsupervised explanation generation framework. Neon first generates corrected instantiations of the statement (phase I), then uses them to prompt large PLMs to find the conflict point and complete the explanation (phase II). We conduct extensive experiments on two standard explanation benchmarks, i.e., ComVE and e-SNLI. According to both automatic and human evaluations, Neon outperforms baselines, even for those with human-annotated instantiations. In addition to explaining a negative prediction, we further demonstrate that Neon remains effective when generalizing to different scenarios. The resources of Neon are available at: https://github.com/Shark-NLP/Neon.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Sijie and Wu, Zhiyong and Chen, Jiangjie and Li, Zhixing and Liu, Yang and Kong, Lingpeng}, year={2023}, month={Jun.}, pages={12700-12708} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26494/26266", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26494", + "pdf_size": 1086924, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16375252442250889733&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "fudan.edu.cn;pjlab.org.cn; ; ; ; ", + "email": "fudan.edu.cn;pjlab.org.cn; ; ; ; ", + "github": "https://github.com/Shark-NLP/Neon", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;1;2;3+3;0+4", + "aff_unique_norm": "Shanghai Artificial Intelligence Laboratory;Fudan University;Full Truck Alliance;Tsinghua University;The University of Hong Kong", + "aff_unique_dep": ";;;Institute for AI Industry Research;", + "aff_unique_url": "http://www.shailab.org/;https://www.fudan.edu.cn;https://www.fulltruckalliance.com;https://www.tsinghua.edu.cn;https://www.hku.hk", + "aff_unique_abbr": "Shanghai AI Lab;Fudan;FTA;Tsinghua;HKU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25490", + "title": "Unsupervised Hierarchical Domain Adaptation for Adverse Weather Optical Flow", + "track": "main", + "status": "Technical", + "abstract": "Optical flow estimation has made great progress, but usually suffers from degradation under adverse weather. Although semi/full-supervised methods have made good attempts, the domain shift between the synthetic and real adverse weather images would deteriorate their performance. To alleviate this issue, our start point is to unsupervisedly transfer the knowledge from source clean domain to target degraded domain. Our key insight is that adverse weather does not change the intrinsic optical flow of the scene, but causes a significant difference for the warp error between clean and degraded images. In this work, we propose the first unsupervised framework for adverse weather optical flow via hierarchical motion-boundary adaptation. Specifically, we first employ image translation to construct the transformation relationship between clean and degraded domains. In motion adaptation, we utilize the flow consistency knowledge to align the cross-domain optical flows into a motion-invariance common space, where the optical flow from clean weather is used as the guidance-knowledge to obtain a preliminary optical flow for adverse weather. Furthermore, we leverage the warp error inconsistency which measures the motion misalignment of the boundary between the clean and degraded domains, and propose a joint intra- and inter-scene boundary contrastive adaptation to refine the motion boundary. The hierarchical motion and boundary adaptation jointly promotes optical flow in a unified framework. Extensive quantitative and qualitative experiments have been performed to verify the superiority of the proposed method.", + "primary_area": "computer vision iii", + "author": "Hanyu Zhou; Yi Chang; Gang Chen; Luxin Yan", + "authorids": "", + "aff": "National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology; School of Computer Science and Engineering, Sun Yat-sen University; National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology", + "bibtex": "@article{Zhou_Chang_Chen_Yan_2023, title={Unsupervised Hierarchical Domain Adaptation for Adverse Weather Optical Flow}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25490}, DOI={10.1609/aaai.v37i3.25490}, abstractNote={Optical flow estimation has made great progress, but usually suffers from degradation under adverse weather. Although semi/full-supervised methods have made good attempts, the domain shift between the synthetic and real adverse weather images would deteriorate their performance. To alleviate this issue, our start point is to unsupervisedly transfer the knowledge from source clean domain to target degraded domain. Our key insight is that adverse weather does not change the intrinsic optical flow of the scene, but causes a significant difference for the warp error between clean and degraded images. In this work, we propose the first unsupervised framework for adverse weather optical flow via hierarchical motion-boundary adaptation. Specifically, we first employ image translation to construct the transformation relationship between clean and degraded domains. In motion adaptation, we utilize the flow consistency knowledge to align the cross-domain optical flows into a motion-invariance common space, where the optical flow from clean weather is used as the guidance-knowledge to obtain a preliminary optical flow for adverse weather. Furthermore, we leverage the warp error inconsistency which measures the motion misalignment of the boundary between the clean and degraded domains, and propose a joint intra- and inter-scene boundary contrastive adaptation to refine the motion boundary. The hierarchical motion and boundary adaptation jointly promotes optical flow in a unified framework. Extensive quantitative and qualitative experiments have been performed to verify the superiority of the proposed method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Hanyu and Chang, Yi and Chen, Gang and Yan, Luxin}, year={2023}, month={Jun.}, pages={3778-3786} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25490/25262", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25490", + "pdf_size": 8411566, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7983237140268053394&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "hust.edu.cn;hust.edu.cn;mail.sysu.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;mail.sysu.edu.cn;hust.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Sun Yat-sen University", + "aff_unique_dep": "School of Artificial Intelligence and Automation;School of Computer Science and Engineering", + "aff_unique_url": "http://www.hust.edu.cn;http://www.sysu.edu.cn", + "aff_unique_abbr": "HUST;SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25603", + "title": "Unsupervised Legal Evidence Retrieval via Contrastive Learning with Approximate Aggregated Positive", + "track": "main", + "status": "Technical", + "abstract": "Verifying the facts alleged by the prosecutors before the trial requires the judges to retrieve evidence within the massive materials accompanied.\nExisting Legal AI applications often assume the facts are already determined and fail to notice the difficulty of reconstructing them. To build a practical Legal AI application and free the judges from the manually searching work, we introduce the task of Legal Evidence Retrieval, which aims at automatically retrieving the precise fact-related verbal evidence within a single case. We formulate the task in a dense retrieval paradigm, and jointly learn the constrastive representations and alignments between facts and evidence. To get rid of the tedious annotations, we construct an approximated positive vector for a given fact by aggregating a set of evidence from the same case. An entropy-based denoise technique is further applied to mitigate the impact of false positive samples. We train our models on tens of thousands of unlabeled cases and evaluate them on a labeled dataset containing 919 cases and 4,336 queries. Experimental results indicate that our approach is effective and outperforms other state-of-the-art representation and retrieval models. The dataset and code are available at https://github.com/yaof20/LER.", + "primary_area": "data mining and knowledge management", + "author": "Feng Yao; Jingyuan Zhang; Yating Zhang; Xiaozhong Liu; Changlong Sun; Yun Liu; Weixing Shen", + "authorids": "", + "aff": "School of Law, Institute for AI and Law, Tsinghua University, Beijing, China; DAMO Academy, Alibaba Group, Hangzhou, Zhejiang, China; DAMO Academy, Alibaba Group, Hangzhou, Zhejiang, China; Worcester Polytechnic Institute, MA, USA; DAMO Academy, Alibaba Group, Hangzhou, Zhejiang, China; School of Law, Institute for AI and Law, Tsinghua University, Beijing, China; School of Law, Institute for AI and Law, Tsinghua University, Beijing, China", + "bibtex": "@article{Yao_Zhang_Zhang_Liu_Sun_Liu_Shen_2023, title={Unsupervised Legal Evidence Retrieval via Contrastive Learning with Approximate Aggregated Positive}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25603}, DOI={10.1609/aaai.v37i4.25603}, abstractNote={Verifying the facts alleged by the prosecutors before the trial requires the judges to retrieve evidence within the massive materials accompanied.\nExisting Legal AI applications often assume the facts are already determined and fail to notice the difficulty of reconstructing them. To build a practical Legal AI application and free the judges from the manually searching work, we introduce the task of Legal Evidence Retrieval, which aims at automatically retrieving the precise fact-related verbal evidence within a single case. We formulate the task in a dense retrieval paradigm, and jointly learn the constrastive representations and alignments between facts and evidence. To get rid of the tedious annotations, we construct an approximated positive vector for a given fact by aggregating a set of evidence from the same case. An entropy-based denoise technique is further applied to mitigate the impact of false positive samples. We train our models on tens of thousands of unlabeled cases and evaluate them on a labeled dataset containing 919 cases and 4,336 queries. Experimental results indicate that our approach is effective and outperforms other state-of-the-art representation and retrieval models. The dataset and code are available at https://github.com/yaof20/LER.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yao, Feng and Zhang, Jingyuan and Zhang, Yating and Liu, Xiaozhong and Sun, Changlong and Liu, Yun and Shen, Weixing}, year={2023}, month={Jun.}, pages={4783-4791} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25603/25375", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25603", + "pdf_size": 337204, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7953780374874531418&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "mails.tsinghua.edu.cn;;;;;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;;;;;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/yaof20/LER", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;2;1;0;0", + "aff_unique_norm": "Tsinghua University;Alibaba Group;Worcester Polytechnic Institute", + "aff_unique_dep": "School of Law, Institute for AI and Law;DAMO Academy;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.alibaba.com;https://www.wpi.edu", + "aff_unique_abbr": "Tsinghua;Alibaba;WPI", + "aff_campus_unique_index": "0;1;1;1;0;0", + "aff_campus_unique": "Beijing;Hangzhou;", + "aff_country_unique_index": "0;0;0;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25404", + "title": "Unsupervised Multi-Exposure Image Fusion Breaking Exposure Limits via Contrastive Learning", + "track": "main", + "status": "Technical", + "abstract": "This paper proposes an unsupervised multi-exposure image fusion (MEF) method via contrastive learning, termed as MEF-CL. It breaks exposure limits and performance bottleneck faced by existing methods. MEF-CL firstly designs similarity constraints to preserve contents in source images. It eliminates the need for ground truth (actually not exist and created artificially) and thus avoids negative impacts of inappropriate ground truth on performance and generalization. Moreover, we explore a latent feature space and apply contrastive learning in this space to guide fused image to approximate normal-light samples and stay away from inappropriately exposed ones. In this way, characteristics of fused images (e.g., illumination, colors) can be further improved without being subject to source images. Therefore, MEF-CL is applicable to image pairs of any multiple exposures rather than a pair of under-exposed and over-exposed images mandated by existing methods. By alleviating dependence on source images, MEF-CL shows better generalization for various scenes. Consequently, our results exhibit appropriate illumination, detailed textures, and saturated colors. Qualitative, quantitative, and ablation experiments validate the superiority and generalization of MEF-CL. Our code is publicly available at https://github.com/hanna-xu/MEF-CL.", + "primary_area": "computer vision iii", + "author": "Han Xu; Liang Haochen; Jiayi Ma", + "authorids": "", + "aff": "Electronic Information School, Wuhan University; Electronic Information School, Wuhan University; Electronic Information School, Wuhan University", + "bibtex": "@article{Xu_Haochen_Ma_2023, title={Unsupervised Multi-Exposure Image Fusion Breaking Exposure Limits via Contrastive Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25404}, DOI={10.1609/aaai.v37i3.25404}, abstractNote={This paper proposes an unsupervised multi-exposure image fusion (MEF) method via contrastive learning, termed as MEF-CL. It breaks exposure limits and performance bottleneck faced by existing methods. MEF-CL firstly designs similarity constraints to preserve contents in source images. It eliminates the need for ground truth (actually not exist and created artificially) and thus avoids negative impacts of inappropriate ground truth on performance and generalization. Moreover, we explore a latent feature space and apply contrastive learning in this space to guide fused image to approximate normal-light samples and stay away from inappropriately exposed ones. In this way, characteristics of fused images (e.g., illumination, colors) can be further improved without being subject to source images. Therefore, MEF-CL is applicable to image pairs of any multiple exposures rather than a pair of under-exposed and over-exposed images mandated by existing methods. By alleviating dependence on source images, MEF-CL shows better generalization for various scenes. Consequently, our results exhibit appropriate illumination, detailed textures, and saturated colors. Qualitative, quantitative, and ablation experiments validate the superiority and generalization of MEF-CL. Our code is publicly available at https://github.com/hanna-xu/MEF-CL.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Han and Haochen, Liang and Ma, Jiayi}, year={2023}, month={Jun.}, pages={3010-3017} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25404/25176", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25404", + "pdf_size": 13593750, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1074160511441849008&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "whu.edu.cn;163.com;gmail.com", + "email": "whu.edu.cn;163.com;gmail.com", + "github": "https://github.com/hanna-xu/MEF-CL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "Electronic Information School", + "aff_unique_url": "http://www.whu.edu.cn", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26558", + "title": "Unsupervised Paraphrasing under Syntax Knowledge", + "track": "main", + "status": "Technical", + "abstract": "The soundness of syntax is an important issue for the paraphrase generation task. \nMost methods control the syntax of paraphrases by embedding the syntax and semantics in the generation process, which cannot guarantee the syntactical correctness of the results. \nDifferent from them, in this paper we investigate the structural patterns of word usages termed as the word composable knowledge and integrate it into the paraphrase generation to control the syntax in an explicit way.\nThis syntax knowledge is pretrained on a large corpus with the dependency relationships and formed as the probabilistic functions on the word-level syntactical soundness.\nFor the sentence-level correctness, we design a hierarchical syntax structure loss to quantitatively verify the syntactical soundness of the paraphrase against the given dependency template. \nThus, the generation process can select the appropriate words with consideration on both semantics and syntax. \nThe proposed method is evaluated on a few paraphrase datasets.\nThe experimental results show that the quality of paraphrases by our proposed method outperforms the compared methods, especially in terms of syntax correctness.", + "primary_area": "speech natural language processing", + "author": "Tianyuan Liu; Yuqing Sun; Jiaqi Wu; Xi Xu; Yuchen Han; Cheng Li; Bin Gong", + "authorids": "", + "aff": "School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University; School of Software, Shandong University", + "bibtex": "@article{Liu_Sun_Wu_Xu_Han_Li_Gong_2023, title={Unsupervised Paraphrasing under Syntax Knowledge}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26558}, DOI={10.1609/aaai.v37i11.26558}, abstractNote={The soundness of syntax is an important issue for the paraphrase generation task. Most methods control the syntax of paraphrases by embedding the syntax and semantics in the generation process, which cannot guarantee the syntactical correctness of the results. Different from them, in this paper we investigate the structural patterns of word usages termed as the word composable knowledge and integrate it into the paraphrase generation to control the syntax in an explicit way.\nThis syntax knowledge is pretrained on a large corpus with the dependency relationships and formed as the probabilistic functions on the word-level syntactical soundness.\nFor the sentence-level correctness, we design a hierarchical syntax structure loss to quantitatively verify the syntactical soundness of the paraphrase against the given dependency template. Thus, the generation process can select the appropriate words with consideration on both semantics and syntax. The proposed method is evaluated on a few paraphrase datasets.\nThe experimental results show that the quality of paraphrases by our proposed method outperforms the compared methods, especially in terms of syntax correctness.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Tianyuan and Sun, Yuqing and Wu, Jiaqi and Xu, Xi and Han, Yuchen and Li, Cheng and Gong, Bin}, year={2023}, month={Jun.}, pages={13273-13281} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26558/26330", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26558", + "pdf_size": 474542, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11567966571731331709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "foxmail.com;sdu.edu.cn;163.com; fsidxu;mail.sdu.edu.cn;qq.com;sdu.edu.cn", + "email": "foxmail.com;sdu.edu.cn;163.com; fsidxu;mail.sdu.edu.cn;qq.com;sdu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Shandong University", + "aff_unique_dep": "School of Software", + "aff_unique_url": "http://www.sdu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25782", + "title": "Untangled: A Complete Dynamic Topological Logic", + "track": "main", + "status": "Technical", + "abstract": "Dynamical systems are general models of change or movement over time with a broad area of applicability to many branches of science, including computer science and AI. Dynamic topological logic (DTL) is a formal framework for symbolic reasoning about dynamical systems. DTL can express various liveness and reachability conditions on such systems, but has the drawback that the only known axiomatisation requires an extended language. In this paper, we consider dynamic topological logic restricted to the class of scattered spaces. Scattered spaces appear in the context of computational logic as they provide semantics for provability and enjoy definable fixed points. We exhibit the first sound and complete dynamic topological logic in the original language of DTL. In particular, we show that the version of DTL based on the class of scattered spaces is finitely axiomatisable, and that the natural axiomatisation is sound and complete.", + "primary_area": "knowledge representation and reasoning", + "author": "David Fern\u00e1ndez-Duque; Yo\u00e0v Montacute", + "authorids": "", + "aff": "ICS of the Czech Academy of Sciences + Ghent University; University of Cambridge", + "bibtex": "@article{Fern\u00e1ndez-Duque_Montacute_2023, title={Untangled: A Complete Dynamic Topological Logic}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25782}, DOI={10.1609/aaai.v37i5.25782}, abstractNote={Dynamical systems are general models of change or movement over time with a broad area of applicability to many branches of science, including computer science and AI. Dynamic topological logic (DTL) is a formal framework for symbolic reasoning about dynamical systems. DTL can express various liveness and reachability conditions on such systems, but has the drawback that the only known axiomatisation requires an extended language. In this paper, we consider dynamic topological logic restricted to the class of scattered spaces. Scattered spaces appear in the context of computational logic as they provide semantics for provability and enjoy definable fixed points. We exhibit the first sound and complete dynamic topological logic in the original language of DTL. In particular, we show that the version of DTL based on the class of scattered spaces is finitely axiomatisable, and that the natural axiomatisation is sound and complete.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fern\u00e1ndez-Duque, David and Montacute, Yo\u00e0v}, year={2023}, month={Jun.}, pages={6355-6362} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25782/25554", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25782", + "pdf_size": 168824, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6340768916555826960&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "cs.cas.cz;cl.cam.ac.uk", + "email": "cs.cas.cz;cl.cam.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "Czech Academy of Sciences;Ghent University;University of Cambridge", + "aff_unique_dep": "Institute of Computer Science;;", + "aff_unique_url": "https://www.cas.cz;https://www.ugent.be/en;https://www.cam.ac.uk", + "aff_unique_abbr": "CAS;UGent;Cambridge", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0+1;2", + "aff_country_unique": "Czech Republic;Belgium;United Kingdom" + }, + { + "id": "article-25611", + "title": "Untargeted Attack against Federated Recommendation Systems via Poisonous Item Embeddings and the Defense", + "track": "main", + "status": "Technical", + "abstract": "Federated recommendation (FedRec) can train personalized recommenders without collecting user data, but the decentralized nature makes it susceptible to poisoning attacks. Most previous studies focus on the targeted attack to promote certain items, while the untargeted attack that aims to degrade the overall performance of the FedRec system remains less explored. In fact, untargeted attacks can disrupt the user experience and bring severe \ufb01nancial loss to the service provider. However, existing untargeted attack methods are either inapplicable or ineffective against FedRec systems. In this paper, we delve into the untargeted attack and its defense for FedRec systems. (i) We propose ClusterAttack, a novel untargeted attack method. It uploads poisonous gradients that converge the item embeddings into several dense clusters, which make the recommender generate similar scores for these items in the same cluster and perturb the ranking order. (ii) We propose a uniformity-based defense mechanism (UNION) to protect FedRec systems from such attacks. We design a contrastive learning task that regularizes the item embeddings toward a uniform distribution. Then the server \ufb01lters out these malicious gradients by estimating the uniformity of updated item embeddings. Experiments on two public datasets show that ClusterAttack can effectively degrade the performance of FedRec systems while circumventing many defense methods, and UNION can improve the resistance of the system against various untargeted attacks, including our ClusterAttack.", + "primary_area": "data mining and knowledge management", + "author": "Yang Yu; Qi Liu; Likang Wu; Runlong Yu; Sanshi Lei Yu; Zaixi Zhang", + "authorids": "", + "aff": "Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Anhui Province Key Laboratory of Big Data Analysis and Application, School of Computer Science and Technology, University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence", + "bibtex": "@article{Yu_Liu_Wu_Yu_Yu_Zhang_2023, title={Untargeted Attack against Federated Recommendation Systems via Poisonous Item Embeddings and the Defense}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25611}, DOI={10.1609/aaai.v37i4.25611}, abstractNote={Federated recommendation (FedRec) can train personalized recommenders without collecting user data, but the decentralized nature makes it susceptible to poisoning attacks. Most previous studies focus on the targeted attack to promote certain items, while the untargeted attack that aims to degrade the overall performance of the FedRec system remains less explored. In fact, untargeted attacks can disrupt the user experience and bring severe \ufb01nancial loss to the service provider. However, existing untargeted attack methods are either inapplicable or ineffective against FedRec systems. In this paper, we delve into the untargeted attack and its defense for FedRec systems. (i) We propose ClusterAttack, a novel untargeted attack method. It uploads poisonous gradients that converge the item embeddings into several dense clusters, which make the recommender generate similar scores for these items in the same cluster and perturb the ranking order. (ii) We propose a uniformity-based defense mechanism (UNION) to protect FedRec systems from such attacks. We design a contrastive learning task that regularizes the item embeddings toward a uniform distribution. Then the server \ufb01lters out these malicious gradients by estimating the uniformity of updated item embeddings. Experiments on two public datasets show that ClusterAttack can effectively degrade the performance of FedRec systems while circumventing many defense methods, and UNION can improve the resistance of the system against various untargeted attacks, including our ClusterAttack.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yu, Yang and Liu, Qi and Wu, Likang and Yu, Runlong and Yu, Sanshi Lei and Zhang, Zaixi}, year={2023}, month={Jun.}, pages={4854-4863} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25611/25383", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25611", + "pdf_size": 354109, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8115917470282969854&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;gmail.com;mail.ustc.edu.cn", + "email": "mail.ustc.edu.cn;ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;gmail.com;mail.ustc.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://www.ustc.edu.cn;", + "aff_unique_abbr": "USTC;", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26572", + "title": "Unveiling the Black Box of PLMs with Semantic Anchors: Towards Interpretable Neural Semantic Parsing", + "track": "main", + "status": "Technical", + "abstract": "The recent prevalence of pretrained language models (PLMs) has dramatically shifted the paradigm of semantic parsing, where the mapping from natural language utterances to structured logical forms is now formulated as a Seq2Seq task. Despite the promising performance, previous PLM-based approaches often suffer from hallucination problems due to their negligence of the structural information contained in the sentence, which essentially constitutes the key semantics of the logical forms. Furthermore, most works treat PLM as a black box in which the generation process of the target logical form is hidden beneath the decoder modules, which greatly hinders the model's intrinsic interpretability. To address these two issues, we propose to incorporate the current PLMs with a hierarchical decoder network. By taking the first-principle structures as the semantic anchors, we propose two novel intermediate supervision tasks, namely Semantic Anchor Extraction and Semantic Anchor Alignment, for training the hierarchical decoders and probing the model intermediate representations in a self-adaptive manner alongside the fine-tuning process. We conduct intensive experiments on several semantic parsing benchmarks and demonstrate that our approach can consistently outperform the baselines. More importantly, by analyzing the intermediate representations of the hierarchical decoders, our approach also makes a huge step toward the interpretability of PLMs in the domain of semantic parsing.", + "primary_area": "speech natural language processing", + "author": "Lunyiu Nie; Jiuding Sun; Yanlin Wang; Lun Du; Shi Han; Dongmei Zhang; Lei Hou; Juanzi Li; Jidong Zhai", + "authorids": "", + "aff": "Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; School of Software Engineering, Sun Yat-sen University + Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University", + "bibtex": "@article{Nie_Sun_Wang_Du_Han_Zhang_Hou_Li_Zhai_2023, title={Unveiling the Black Box of PLMs with Semantic Anchors: Towards Interpretable Neural Semantic Parsing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26572}, DOI={10.1609/aaai.v37i11.26572}, abstractNote={The recent prevalence of pretrained language models (PLMs) has dramatically shifted the paradigm of semantic parsing, where the mapping from natural language utterances to structured logical forms is now formulated as a Seq2Seq task. Despite the promising performance, previous PLM-based approaches often suffer from hallucination problems due to their negligence of the structural information contained in the sentence, which essentially constitutes the key semantics of the logical forms. Furthermore, most works treat PLM as a black box in which the generation process of the target logical form is hidden beneath the decoder modules, which greatly hinders the model\u2019s intrinsic interpretability. To address these two issues, we propose to incorporate the current PLMs with a hierarchical decoder network. By taking the first-principle structures as the semantic anchors, we propose two novel intermediate supervision tasks, namely Semantic Anchor Extraction and Semantic Anchor Alignment, for training the hierarchical decoders and probing the model intermediate representations in a self-adaptive manner alongside the fine-tuning process. We conduct intensive experiments on several semantic parsing benchmarks and demonstrate that our approach can consistently outperform the baselines. More importantly, by analyzing the intermediate representations of the hierarchical decoders, our approach also makes a huge step toward the interpretability of PLMs in the domain of semantic parsing.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nie, Lunyiu and Sun, Jiuding and Wang, Yanlin and Du, Lun and Han, Shi and Zhang, Dongmei and Hou, Lei and Li, Juanzi and Zhai, Jidong}, year={2023}, month={Jun.}, pages={13400-13408} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26572/26344", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26572", + "pdf_size": 1002673, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11701094388444957258&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.sysu.edu.cn;microsoft.com;microsoft.com;microsoft.com;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mail.sysu.edu.cn;microsoft.com;microsoft.com;microsoft.com;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1+2;2;2;2;0;0;0", + "aff_unique_norm": "Tsinghua University;Sun Yat-sen University;Microsoft Research", + "aff_unique_dep": "Department of Computer Science and Technology;School of Software Engineering;Research", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.sysu.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "THU;SYSU;MSR Asia", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25117", + "title": "User-Controllable Arbitrary Style Transfer via Entropy Regularization", + "track": "main", + "status": "Technical", + "abstract": "Ensuring the overall end-user experience is a challenging task in arbitrary style transfer (AST) due to the subjective nature of style transfer quality. A good practice is to provide users many instead of one AST result. However, existing approaches require to run multiple AST models or inference a diversified AST (DAST) solution multiple times, and thus they are either slow in speed or limited in diversity. In this paper, we propose a novel solution ensuring both efficiency and diversity for generating multiple user-controllable AST results by systematically modulating AST behavior at run-time. We begin with reformulating three prominent AST methods into a unified assign-and-mix problem and discover that the entropies of their assignment matrices exhibit a large variance. We then solve the unified problem in an optimal transport framework using the Sinkhorn-Knopp algorithm with a user input \u03b5 to control the said entropy and thus modulate stylization. Empirical results demonstrate the superiority of the proposed solution, with speed and stylization quality comparable to or better than existing AST and significantly more diverse than previous DAST works. Code is available at https://github.com/cplusx/eps-Assign-and-Mix.", + "primary_area": "computer vision i", + "author": "Jiaxin Cheng; Yue Wu; Ayush Jaiswal; Xu Zhang; Pradeep Natarajan; Prem Natarajan", + "authorids": "", + "aff": "USC Information Sciences Institute; Amazon Alexa Natural Understanding; Amazon Alexa Natural Understanding; Amazon Alexa Natural Understanding; Amazon Alexa Natural Understanding; Amazon Alexa Natural Understanding", + "bibtex": "@article{Cheng_Wu_Jaiswal_Zhang_Natarajan_Natarajan_2023, title={User-Controllable Arbitrary Style Transfer via Entropy Regularization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25117}, DOI={10.1609/aaai.v37i1.25117}, abstractNote={Ensuring the overall end-user experience is a challenging task in arbitrary style transfer (AST) due to the subjective nature of style transfer quality. A good practice is to provide users many instead of one AST result. However, existing approaches require to run multiple AST models or inference a diversified AST (DAST) solution multiple times, and thus they are either slow in speed or limited in diversity. In this paper, we propose a novel solution ensuring both efficiency and diversity for generating multiple user-controllable AST results by systematically modulating AST behavior at run-time. We begin with reformulating three prominent AST methods into a unified assign-and-mix problem and discover that the entropies of their assignment matrices exhibit a large variance. We then solve the unified problem in an optimal transport framework using the Sinkhorn-Knopp algorithm with a user input \u03b5 to control the said entropy and thus modulate stylization. Empirical results demonstrate the superiority of the proposed solution, with speed and stylization quality comparable to or better than existing AST and significantly more diverse than previous DAST works. Code is available at https://github.com/cplusx/eps-Assign-and-Mix.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Jiaxin and Wu, Yue and Jaiswal, Ayush and Zhang, Xu and Natarajan, Pradeep and Natarajan, Prem}, year={2023}, month={Jun.}, pages={433-441} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25117/24889", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25117", + "pdf_size": 9403125, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16292233490879630536&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "isi.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "isi.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "https://github.com/cplusx/eps-Assign-and-Mix", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "University of Southern California;Amazon", + "aff_unique_dep": "Information Sciences Institute;Alexa Natural Understanding", + "aff_unique_url": "https://isi.usc.edu;https://www.amazon.com", + "aff_unique_abbr": "USC ISI;Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26781", + "title": "User-Oriented Robust Reinforcement Learning", + "track": "aaai special track", + "status": "Technical", + "abstract": "Recently, improving the robustness of policies across different environments attracts increasing attention in the reinforcement learning (RL) community. Existing robust RL methods mostly aim to achieve the max-min robustness by optimizing the policy\u2019s performance in the worst-case environment. However, in practice, a user that uses an RL policy may have different preferences over its performance across environments. Clearly, the aforementioned max-min robustness is oftentimes too conservative to satisfy user preference. Therefore, in this paper, we integrate user preference into policy learning in robust RL, and propose a novel User-Oriented Robust RL (UOR-RL) framework. Specifically, we define a new User-Oriented Robustness (UOR) metric for RL, which allocates different weights to the environments according to user preference and generalizes the max-min robustness metric. To optimize the UOR metric, we develop two different UOR-RL training algorithms for the scenarios with or without a priori known environment distribution, respectively. Theoretically, we prove that our UOR-RL training algorithms converge to near-optimal policies even with inaccurate or completely no knowledge about the environment distribution. Furthermore, we carry out extensive experimental evaluations in 6 MuJoCo tasks. The experimental results demonstrate that UOR-RL is comparable to the state-of-the-art baselines under the average-case and worst-case performance metrics, and more importantly establishes new state-of-the-art performance under the UOR metric.", + "primary_area": "safe and robust ai", + "author": "Haoyi You; Beichen Yu; Haiming Jin; Zhaoxing Yang; Jiahui Sun", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "bibtex": "@article{You_Yu_Jin_Yang_Sun_2023, title={User-Oriented Robust Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26781}, DOI={10.1609/aaai.v37i12.26781}, abstractNote={Recently, improving the robustness of policies across different environments attracts increasing attention in the reinforcement learning (RL) community. Existing robust RL methods mostly aim to achieve the max-min robustness by optimizing the policy\u2019s performance in the worst-case environment. However, in practice, a user that uses an RL policy may have different preferences over its performance across environments. Clearly, the aforementioned max-min robustness is oftentimes too conservative to satisfy user preference. Therefore, in this paper, we integrate user preference into policy learning in robust RL, and propose a novel User-Oriented Robust RL (UOR-RL) framework. Specifically, we define a new User-Oriented Robustness (UOR) metric for RL, which allocates different weights to the environments according to user preference and generalizes the max-min robustness metric. To optimize the UOR metric, we develop two different UOR-RL training algorithms for the scenarios with or without a priori known environment distribution, respectively. Theoretically, we prove that our UOR-RL training algorithms converge to near-optimal policies even with inaccurate or completely no knowledge about the environment distribution. Furthermore, we carry out extensive experimental evaluations in 6 MuJoCo tasks. The experimental results demonstrate that UOR-RL is comparable to the state-of-the-art baselines under the average-case and worst-case performance metrics, and more importantly establishes new state-of-the-art performance under the UOR metric.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={You, Haoyi and Yu, Beichen and Jin, Haiming and Yang, Zhaoxing and Sun, Jiahui}, year={2023}, month={Jun.}, pages={15269-15277} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26781/26553", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26781", + "pdf_size": 241919, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:gyri4OyNASoJ:scholar.google.com/&scioq=User-Oriented+Robust+Reinforcement+Learning&hl=en&as_sdt=0,5", + "gs_version_total": 6, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25718", + "title": "Utility Maximizer or Value Maximizer: Mechanism Design for Mixed Bidders in Online Advertising", + "track": "main", + "status": "Technical", + "abstract": "Digital advertising constitutes one of the main revenue sources for online platforms. In recent years, some advertisers tend to adopt auto-bidding tools to facilitate advertising performance optimization, making the classical utility maximizer model in auction theory not fit well. Some recent studies proposed a new model, called value maximizer, for auto-bidding advertisers with return-on-investment (ROI) constraints. However, the model of either utility maximizer or value maximizer could only characterize partial advertisers in real-world advertising platforms. In a mixed environment where utility maximizers and value maximizers coexist, the truthful ad auction design would be challenging since bidders could manipulate both their values and affiliated classes, leading to a multi-parameter mechanism design problem. In this work, we address this issue by proposing a payment rule which combines the corresponding ones in classical VCG and GSP mechanisms in a novel way. Based on this payment rule, we propose a truthful auction mechanism with an approximation ratio of 2 on social welfare, which is close to the lower bound of at least 5/4 that we also prove. The designed auction mechanism is a generalization of VCG for utility maximizers and GSP for value maximizers.", + "primary_area": "game theory and economic paradigms", + "author": "Hongtao Lv; Zhilin Zhang; Zhenzhe Zheng; Jinghan Liu; Chuan Yu; Lei Liu; Lizhen Cui; Fan Wu", + "authorids": "", + "aff": "School of Software & Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, China; Alibaba Group, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, China; SJTU-ParisTech Elite Institute of Technology, Shanghai Jiao Tong University, China; Alibaba Group, China; School of Software & Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, China; School of Software & Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR), Shandong University, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, China", + "bibtex": "@article{Lv_Zhang_Zheng_Liu_Yu_Liu_Cui_Wu_2023, title={Utility Maximizer or Value Maximizer: Mechanism Design for Mixed Bidders in Online Advertising}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25718}, DOI={10.1609/aaai.v37i5.25718}, abstractNote={Digital advertising constitutes one of the main revenue sources for online platforms. In recent years, some advertisers tend to adopt auto-bidding tools to facilitate advertising performance optimization, making the classical utility maximizer model in auction theory not fit well. Some recent studies proposed a new model, called value maximizer, for auto-bidding advertisers with return-on-investment (ROI) constraints. However, the model of either utility maximizer or value maximizer could only characterize partial advertisers in real-world advertising platforms. In a mixed environment where utility maximizers and value maximizers coexist, the truthful ad auction design would be challenging since bidders could manipulate both their values and affiliated classes, leading to a multi-parameter mechanism design problem. In this work, we address this issue by proposing a payment rule which combines the corresponding ones in classical VCG and GSP mechanisms in a novel way. Based on this payment rule, we propose a truthful auction mechanism with an approximation ratio of 2 on social welfare, which is close to the lower bound of at least 5/4 that we also prove. The designed auction mechanism is a generalization of VCG for utility maximizers and GSP for value maximizers.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lv, Hongtao and Zhang, Zhilin and Zheng, Zhenzhe and Liu, Jinghan and Yu, Chuan and Liu, Lei and Cui, Lizhen and Wu, Fan}, year={2023}, month={Jun.}, pages={5789-5796} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25718/25490", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25718", + "pdf_size": 188437, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11854749340724210270&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "sdu.edu.cn;alibaba-inc.com;sjtu.edu.cn;sjtu.edu.cn;alibaba-inc.com;sdu.edu.cn;sdu.edu.cn;sjtu.edu.cn", + "email": "sdu.edu.cn;alibaba-inc.com;sjtu.edu.cn;sjtu.edu.cn;alibaba-inc.com;sdu.edu.cn;sdu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2+2;2;1;0;0;2", + "aff_unique_norm": "Shandong University;Alibaba Group;Shanghai Jiao Tong University", + "aff_unique_dep": "School of Software & Joint SDU-NTU Centre for Artificial Intelligence Research (C-FAIR);;Department of Computer Science and Engineering", + "aff_unique_url": "http://www.sdu.edu.cn;https://www.alibaba.com;https://www.sjtu.edu.cn", + "aff_unique_abbr": ";Alibaba;SJTU", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";ParisTech", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25817", + "title": "Utilizing Prior Solutions for Reward Shaping and Composition in Entropy-Regularized Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "In reinforcement learning (RL), the ability to utilize prior knowledge from previously solved tasks can allow agents to quickly solve new problems. In some cases, these new problems may be approximately solved by composing the solutions of previously solved primitive tasks (task composition). Otherwise, prior knowledge can be used to adjust the reward function for a new problem, in a way that leaves the optimal policy unchanged but enables quicker learning (reward shaping). In this work, we develop a general framework for reward shaping and task composition in entropy-regularized RL. To do so, we derive an exact relation connecting the optimal soft value functions for two entropy-regularized RL problems with different reward functions and dynamics. We show how the derived relation leads to a general result for reward shaping in entropy-regularized RL. We then generalize this approach to derive an exact relation connecting optimal value functions for the composition of multiple tasks in entropy-regularized RL. We validate these theoretical contributions with experiments showing that reward shaping and task composition lead to faster learning in various settings.", + "primary_area": "machine learning i", + "author": "Jacob Adamczyk; Argenis Arriojas; Stas Tiomkin; Rahul V. Kulkarni", + "authorids": "", + "aff": "Department of Physics, University of Massachusetts Boston; Department of Physics, University of Massachusetts Boston; Department of Computer Engineering, San Jos\u00e9 State University; Department of Physics, University of Massachusetts Boston", + "bibtex": "@article{Adamczyk_Arriojas_Tiomkin_Kulkarni_2023, title={Utilizing Prior Solutions for Reward Shaping and Composition in Entropy-Regularized Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25817}, DOI={10.1609/aaai.v37i6.25817}, abstractNote={In reinforcement learning (RL), the ability to utilize prior knowledge from previously solved tasks can allow agents to quickly solve new problems. In some cases, these new problems may be approximately solved by composing the solutions of previously solved primitive tasks (task composition). Otherwise, prior knowledge can be used to adjust the reward function for a new problem, in a way that leaves the optimal policy unchanged but enables quicker learning (reward shaping). In this work, we develop a general framework for reward shaping and task composition in entropy-regularized RL. To do so, we derive an exact relation connecting the optimal soft value functions for two entropy-regularized RL problems with different reward functions and dynamics. We show how the derived relation leads to a general result for reward shaping in entropy-regularized RL. We then generalize this approach to derive an exact relation connecting optimal value functions for the composition of multiple tasks in entropy-regularized RL. We validate these theoretical contributions with experiments showing that reward shaping and task composition lead to faster learning in various settings.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Adamczyk, Jacob and Arriojas, Argenis and Tiomkin, Stas and Kulkarni, Rahul V.}, year={2023}, month={Jun.}, pages={6658-6665} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25817/25589", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25817", + "pdf_size": 929562, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7880509934245190295&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "umb.edu;umb.edu;sjsu.edu;umb.edu", + "email": "umb.edu;umb.edu;sjsu.edu;umb.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Massachusetts Boston;San Jos\u00e9 State University", + "aff_unique_dep": "Department of Physics;Department of Computer Engineering", + "aff_unique_url": "https://www.umb.edu;https://www.sjsu.edu", + "aff_unique_abbr": "UMass Boston;SJSU", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Boston;San Jos\u00e9", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25096", + "title": "VASR: Visual Analogies of Situation Recognition", + "track": "main", + "status": "Technical", + "abstract": "A core process in human cognition is analogical mapping: the ability to identify a similar relational structure between different situations.\nWe introduce a novel task, Visual Analogies of Situation Recognition, adapting the classical word-analogy task into the visual domain. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. \n\nWe leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies. Crowdsourced annotations for a sample of the data indicate that humans agree with the dataset label ~80% of the time (chance level 25%). Furthermore, we use human annotations to create a gold-standard dataset of 3,820 validated analogies.\nOur experiments demonstrate that state-of-the-art models do well when distractors are chosen randomly (~86%), but struggle with carefully chosen distractors (~53%, compared to 90% human accuracy). We hope our dataset will encourage the development of new analogy-making models. Website: https://vasr-dataset.github.io/", + "primary_area": "computer vision i", + "author": "Yonatan Bitton; Ron Yosef; Eliyahu Strugo; Dafna Shahaf; Roy Schwartz; Gabriel Stanovsky", + "authorids": "", + "aff": "The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem", + "bibtex": "@article{Bitton_Yosef_Strugo_Shahaf_Schwartz_Stanovsky_2023, title={VASR: Visual Analogies of Situation Recognition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25096}, DOI={10.1609/aaai.v37i1.25096}, abstractNote={A core process in human cognition is analogical mapping: the ability to identify a similar relational structure between different situations.\nWe introduce a novel task, Visual Analogies of Situation Recognition, adapting the classical word-analogy task into the visual domain. Given a triplet of images, the task is to select an image candidate B\u2019 that completes the analogy (A to A\u2019 is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. We leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies. Crowdsourced annotations for a sample of the data indicate that humans agree with the dataset label ~80% of the time (chance level 25%). Furthermore, we use human annotations to create a gold-standard dataset of 3,820 validated analogies.\nOur experiments demonstrate that state-of-the-art models do well when distractors are chosen randomly (~86%), but struggle with carefully chosen distractors (~53%, compared to 90% human accuracy). We hope our dataset will encourage the development of new analogy-making models. Website: https://vasr-dataset.github.io/}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Bitton, Yonatan and Yosef, Ron and Strugo, Eliyahu and Shahaf, Dafna and Schwartz, Roy and Stanovsky, Gabriel}, year={2023}, month={Jun.}, pages={241-249} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25096/24868", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25096", + "pdf_size": 7221315, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2428311112775512934&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "email": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "github": "", + "project": "https://vasr-dataset.github.io/", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "The Hebrew University of Jerusalem", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huji.ac.il", + "aff_unique_abbr": "HUJI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "article-26036", + "title": "VBLC: Visibility Boosting and Logit-Constraint Learning for Domain Adaptive Semantic Segmentation under Adverse Conditions", + "track": "main", + "status": "Technical", + "abstract": "Generalizing models trained on normal visual conditions to target domains under adverse conditions is demanding in the practical systems. One prevalent solution is to bridge the domain gap between clear- and adverse-condition images to make satisfactory prediction on the target. However, previous methods often reckon on additional reference images of the same scenes taken from normal conditions, which are quite tough to collect in reality. Furthermore, most of them mainly focus on individual adverse condition such as nighttime or foggy, weakening the model versatility when encountering other adverse weathers. To overcome the above limitations, we propose a novel framework, Visibility Boosting and Logit-Constraint learning (VBLC), tailored for superior normal-toadverse adaptation. VBLC explores the potential of getting rid of reference images and resolving the mixture of adverse conditions simultaneously. In detail, we first propose the visibility boost module to dynamically improve target images via certain priors in the image level. Then, we figure out the overconfident drawback in the conventional cross-entropy loss for self-training method and devise the logit-constraint learning, which enforces a constraint on logit outputs during training to mitigate this pain point. To the best of our knowledge, this is a new perspective for tackling such a challenging task. Extensive experiments on two normal-to-adverse domain adaptation benchmarks, i.e., Cityscapes to ACDC and Cityscapes to FoggyCityscapes + RainCityscapes, verify the effectiveness of VBLC, where it establishes the new state of the art. Code is available at https://github.com/BIT-DA/VBLC.", + "primary_area": "machine learning ii", + "author": "Mingjia Li; Binhui Xie; Shuang Li; Chi Harold Liu; Xinjing Cheng", + "authorids": "", + "aff": "School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; School of Software, BNRist, Tsinghua University, Beijing, China+Inceptio Technology, Shanghai, China", + "bibtex": "@article{Li_Xie_Li_Liu_Cheng_2023, title={VBLC: Visibility Boosting and Logit-Constraint Learning for Domain Adaptive Semantic Segmentation under Adverse Conditions}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26036}, DOI={10.1609/aaai.v37i7.26036}, abstractNote={Generalizing models trained on normal visual conditions to target domains under adverse conditions is demanding in the practical systems. One prevalent solution is to bridge the domain gap between clear- and adverse-condition images to make satisfactory prediction on the target. However, previous methods often reckon on additional reference images of the same scenes taken from normal conditions, which are quite tough to collect in reality. Furthermore, most of them mainly focus on individual adverse condition such as nighttime or foggy, weakening the model versatility when encountering other adverse weathers. To overcome the above limitations, we propose a novel framework, Visibility Boosting and Logit-Constraint learning (VBLC), tailored for superior normal-toadverse adaptation. VBLC explores the potential of getting rid of reference images and resolving the mixture of adverse conditions simultaneously. In detail, we first propose the visibility boost module to dynamically improve target images via certain priors in the image level. Then, we figure out the overconfident drawback in the conventional cross-entropy loss for self-training method and devise the logit-constraint learning, which enforces a constraint on logit outputs during training to mitigate this pain point. To the best of our knowledge, this is a new perspective for tackling such a challenging task. Extensive experiments on two normal-to-adverse domain adaptation benchmarks, i.e., Cityscapes to ACDC and Cityscapes to FoggyCityscapes + RainCityscapes, verify the effectiveness of VBLC, where it establishes the new state of the art. Code is available at https://github.com/BIT-DA/VBLC.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Mingjia and Xie, Binhui and Li, Shuang and Liu, Chi Harold and Cheng, Xinjing}, year={2023}, month={Jun.}, pages={8605-8613} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26036/25808", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26036", + "pdf_size": 3910263, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13357063075812500905&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 8, + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn;gmail.com", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn;gmail.com", + "github": "https://github.com/BIT-DA/VBLC", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1+2", + "aff_unique_norm": "Beijing Institute of Technology;Tsinghua University;Inceptio Technology", + "aff_unique_dep": "School of Computer Science and Technology;School of Software;", + "aff_unique_url": "http://www.bit.edu.cn;https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "BIT;THU;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26094", + "title": "VIDM: Video Implicit Diffusion Models", + "track": "main", + "status": "Technical", + "abstract": "Diffusion models have emerged as a powerful generative method for synthesizing high-quality and diverse set of images. In this paper, we propose a video generation method based on diffusion models, where the effects of motion are modeled in an implicit condition manner, i.e. one can sample plausible video motions according to the latent feature of frames. We improve the quality of the generated videos by proposing multiple strategies such as sampling space truncation, robustness penalty, and positional group normalization. Various experiments are conducted on datasets consisting of videos with different resolutions and different number of frames. Results show that the proposed method outperforms the state-of-the-art generative adversarial network-based methods by a significant margin in terms of FVD scores as well as perceptible visual quality.", + "primary_area": "machine learning iii", + "author": "Kangfu Mei; Vishal Patel", + "authorids": "", + "aff": "Johns Hopkins University; Johns Hopkins University", + "bibtex": "@article{Mei_Patel_2023, title={VIDM: Video Implicit Diffusion Models}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26094}, DOI={10.1609/aaai.v37i8.26094}, abstractNote={Diffusion models have emerged as a powerful generative method for synthesizing high-quality and diverse set of images. In this paper, we propose a video generation method based on diffusion models, where the effects of motion are modeled in an implicit condition manner, i.e. one can sample plausible video motions according to the latent feature of frames. We improve the quality of the generated videos by proposing multiple strategies such as sampling space truncation, robustness penalty, and positional group normalization. Various experiments are conducted on datasets consisting of videos with different resolutions and different number of frames. Results show that the proposed method outperforms the state-of-the-art generative adversarial network-based methods by a significant margin in terms of FVD scores as well as perceptible visual quality.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mei, Kangfu and Patel, Vishal}, year={2023}, month={Jun.}, pages={9117-9125} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26094/25866", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26094", + "pdf_size": 2008345, + "gs_citation": 96, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=789831927393914851&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff_domain": ";", + "email": ";", + "github": "", + "project": "https://kfmei.page/vidm/", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25412", + "title": "VLTinT: Visual-Linguistic Transformer-in-Transformer for Coherent Video Paragraph Captioning", + "track": "main", + "status": "Technical", + "abstract": "Video Paragraph Captioning aims to generate a multi-sentence description of an untrimmed video with multiple temporal event locations in a coherent storytelling. \nFollowing the human perception process, where the scene is effectively understood by decomposing it into visual (e.g. human, animal) and non-visual components (e.g. action, relations) under the mutual influence of vision and language, we first propose a visual-linguistic (VL) feature. In the proposed VL feature, the scene is modeled by three modalities including (i) a global visual environment; (ii) local visual main agents; (iii) linguistic scene elements. We then introduce an autoregressive Transformer-in-Transformer (TinT) to simultaneously capture the semantic coherence of intra- and inter-event contents within a video. Finally, we present a new VL contrastive loss function to guarantee the learnt embedding features are consistent with the captions semantics. Comprehensive experiments and extensive ablation studies on the ActivityNet Captions and YouCookII datasets show that the proposed Visual-Linguistic Transformer-in-Transform (VLTinT) outperforms previous state-of-the-art methods in terms of accuracy and diversity. The source code is made publicly available at: https://github.com/UARK-AICV/VLTinT.", + "primary_area": "computer vision iii", + "author": "Kashu Yamazaki; Khoa Vo; Quang Sang Truong; Bhiksha Raj; Ngan Le", + "authorids": "", + "aff": "AICV Lab, University of Arkansas, Fayetteville, Arkansas, USA; AICV Lab, University of Arkansas, Fayetteville, Arkansas, USA; AICV Lab, University of Arkansas, Fayetteville, Arkansas, USA; Carnegie Mellon University, Pittsburgh, Pennsylvania, USA + Mohammed bin Zayed University of AI; AICV Lab, University of Arkansas, Fayetteville, Arkansas, USA", + "bibtex": "@article{Yamazaki_Vo_Truong_Raj_Le_2023, title={VLTinT: Visual-Linguistic Transformer-in-Transformer for Coherent Video Paragraph Captioning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25412}, DOI={10.1609/aaai.v37i3.25412}, abstractNote={Video Paragraph Captioning aims to generate a multi-sentence description of an untrimmed video with multiple temporal event locations in a coherent storytelling. Following the human perception process, where the scene is effectively understood by decomposing it into visual (e.g. human, animal) and non-visual components (e.g. action, relations) under the mutual influence of vision and language, we first propose a visual-linguistic (VL) feature. In the proposed VL feature, the scene is modeled by three modalities including (i) a global visual environment; (ii) local visual main agents; (iii) linguistic scene elements. We then introduce an autoregressive Transformer-in-Transformer (TinT) to simultaneously capture the semantic coherence of intra- and inter-event contents within a video. Finally, we present a new VL contrastive loss function to guarantee the learnt embedding features are consistent with the captions semantics. Comprehensive experiments and extensive ablation studies on the ActivityNet Captions and YouCookII datasets show that the proposed Visual-Linguistic Transformer-in-Transform (VLTinT) outperforms previous state-of-the-art methods in terms of accuracy and diversity. The source code is made publicly available at: https://github.com/UARK-AICV/VLTinT.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yamazaki, Kashu and Vo, Khoa and Truong, Quang Sang and Raj, Bhiksha and Le, Ngan}, year={2023}, month={Jun.}, pages={3081-3090} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25412/25184", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25412", + "pdf_size": 3884458, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10720316826422217400&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "uark.edu;uark.edu;uark.edu;cs.cmu.edu;uark.edu", + "email": "uark.edu;uark.edu;uark.edu;cs.cmu.edu;uark.edu", + "github": "https://github.com/UARK-AICV/VLTinT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+2;0", + "aff_unique_norm": "University of Arkansas;Carnegie Mellon University;Mohammed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": "AICV Lab;;", + "aff_unique_url": "https://www.uark.edu;https://www.cmu.edu;https://mbzuai.ac.ae", + "aff_unique_abbr": "UARK;CMU;MBZUAI", + "aff_campus_unique_index": "0;0;0;1;0", + "aff_campus_unique": "Fayetteville;Pittsburgh;", + "aff_country_unique_index": "0;0;0;0+1;0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "article-26311", + "title": "Value-Consistent Representation Learning for Data-Efficient Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Deep reinforcement learning (RL) algorithms suffer severe performance degradation when the interaction data is scarce, which limits their real-world application. Recently, visual representation learning has been shown to be effective and promising for boosting sample efficiency in RL. These methods usually rely on contrastive learning and data augmentation to train a transition model, which is different from how the model is used in RL---performing value-based planning. Accordingly, the learned representation by these visual methods may be good for recognition but not optimal for estimating state value and solving the decision problem. To address this issue, we propose a novel method, called value-consistent representation learning (VCR), to learn representations that are directly related to decision-making. More specifically, VCR trains a model to predict the future state (also referred to as the \"imagined state'') based on the current one and a sequence of actions. Instead of aligning this imagined state with a real state returned by the environment, VCR applies a Q value head on both of the states and obtains two distributions of action values. Then a distance is computed and minimized to force the imagined state to produce a similar action value prediction as that by the real state. We develop two implementations of the above idea for the discrete and continuous action spaces respectively. We conduct experiments on Atari 100k and DeepMind Control Suite benchmarks to validate their effectiveness for improving sample efficiency. It has been demonstrated that our methods achieve new state-of-the-art performance for search-free RL algorithms.", + "primary_area": "machine learning iv", + "author": "Yang Yue; Bingyi Kang; Zhongwen Xu; Gao Huang; Shuicheng Yan", + "authorids": "", + "aff": "Department of Automation, BNRist, Tsinghua University + Sea AI Lab; Sea AI Lab; Sea AI Lab; Department of Automation, BNRist, Tsinghua University; Sea AI Lab", + "bibtex": "@article{Yue_Kang_Xu_Huang_Yan_2023, title={Value-Consistent Representation Learning for Data-Efficient Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26311}, DOI={10.1609/aaai.v37i9.26311}, abstractNote={Deep reinforcement learning (RL) algorithms suffer severe performance degradation when the interaction data is scarce, which limits their real-world application. Recently, visual representation learning has been shown to be effective and promising for boosting sample efficiency in RL. These methods usually rely on contrastive learning and data augmentation to train a transition model, which is different from how the model is used in RL---performing value-based planning. Accordingly, the learned representation by these visual methods may be good for recognition but not optimal for estimating state value and solving the decision problem. To address this issue, we propose a novel method, called value-consistent representation learning (VCR), to learn representations that are directly related to decision-making. More specifically, VCR trains a model to predict the future state (also referred to as the "imagined state\u2019\u2019) based on the current one and a sequence of actions. Instead of aligning this imagined state with a real state returned by the environment, VCR applies a Q value head on both of the states and obtains two distributions of action values. Then a distance is computed and minimized to force the imagined state to produce a similar action value prediction as that by the real state. We develop two implementations of the above idea for the discrete and continuous action spaces respectively. We conduct experiments on Atari 100k and DeepMind Control Suite benchmarks to validate their effectiveness for improving sample efficiency. It has been demonstrated that our methods achieve new state-of-the-art performance for search-free RL algorithms.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yue, Yang and Kang, Bingyi and Xu, Zhongwen and Huang, Gao and Yan, Shuicheng}, year={2023}, month={Jun.}, pages={11069-11077} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26311/26083", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26311", + "pdf_size": 408283, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5576975996385289893&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;sea.com;sea.com;tsinghua.edu.cn;sea.com", + "email": "gmail.com;sea.com;sea.com;tsinghua.edu.cn;sea.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;0;1", + "aff_unique_norm": "Tsinghua University;Sea AI Lab", + "aff_unique_dep": "Department of Automation;", + "aff_unique_url": "https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "Tsinghua;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China;" + }, + { + "id": "article-25991", + "title": "Variable-Based Calibration for Machine Learning Classifiers", + "track": "main", + "status": "Technical", + "abstract": "The deployment of machine learning classifiers in high-stakes domains requires well-calibrated confidence scores for model predictions. In this paper we introduce the notion of variable-based calibration to characterize calibration properties of a model with respect to a variable of interest, generalizing traditional score-based metrics such as expected calibration error (ECE). In particular, we find that models with near-perfect ECE can exhibit significant miscalibration as a function of features of the data. We demonstrate this phenomenon both theoretically and in practice on multiple well-known datasets, and show that it can persist after the application of existing calibration methods. To mitigate this issue, we propose strategies for detection, visualization, and quantification of variable-based calibration error. We then examine the limitations of current score-based calibration methods and explore potential modifications. Finally, we discuss the implications of these findings, emphasizing that an understanding of calibration beyond simple aggregate measures is crucial for endeavors such as fairness and model interpretability.", + "primary_area": "machine learning ii", + "author": "Markelle Kelly; Padhraic Smyth", + "authorids": "", + "aff": "University of California, Irvine; University of California, Irvine", + "bibtex": "@article{Kelly_Smyth_2023, title={Variable-Based Calibration for Machine Learning Classifiers}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25991}, DOI={10.1609/aaai.v37i7.25991}, abstractNote={The deployment of machine learning classifiers in high-stakes domains requires well-calibrated confidence scores for model predictions. In this paper we introduce the notion of variable-based calibration to characterize calibration properties of a model with respect to a variable of interest, generalizing traditional score-based metrics such as expected calibration error (ECE). In particular, we find that models with near-perfect ECE can exhibit significant miscalibration as a function of features of the data. We demonstrate this phenomenon both theoretically and in practice on multiple well-known datasets, and show that it can persist after the application of existing calibration methods. To mitigate this issue, we propose strategies for detection, visualization, and quantification of variable-based calibration error. We then examine the limitations of current score-based calibration methods and explore potential modifications. Finally, we discuss the implications of these findings, emphasizing that an understanding of calibration beyond simple aggregate measures is crucial for endeavors such as fairness and model interpretability.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kelly, Markelle and Smyth, Padhraic}, year={2023}, month={Jun.}, pages={8211-8219} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25991/25763", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25991", + "pdf_size": 908936, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9942315485162736508&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "uci.edu;ics.uci.edu", + "email": "uci.edu;ics.uci.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Irvine", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uci.edu", + "aff_unique_abbr": "UCI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Irvine", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25873", + "title": "Variational Wasserstein Barycenters with C-cyclical Monotonicity Regularization", + "track": "main", + "status": "Technical", + "abstract": "Wasserstein barycenter, built on the theory of Optimal Transport (OT), provides a powerful framework to aggregate probability distributions, and it has increasingly attracted great attention within the machine learning community. However, it is often intractable to precisely compute, especially for high dimensional and continuous settings. To alleviate this problem, we develop a novel regularization by using the fact that c-cyclical monotonicity is often necessary and sufficient conditions for optimality in OT problems, and incorporate it into the dual formulation of Wasserstein barycenters. For efficient computations, we adopt a variational distribution as the approximation of the true continuous barycenter, so as to frame the Wasserstein barycenters problem as an optimization problem with respect to variational parameters. Upon those ideas, we propose a novel end-to-end continuous approximation method, namely Variational Wasserstein Barycenters with c-Cyclical Monotonicity Regularization (VWB-CMR), given sample access to the input distributions. We show theoretical convergence analysis and demonstrate the superior performance of VWB-CMR on synthetic data and real applications of subset posterior aggregation.", + "primary_area": "machine learning i", + "author": "Jinjin Chi; Zhiyao Yang; Ximing Li; Jihong Ouyang; Renchu Guan", + "authorids": "", + "aff": "College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, China; College of Computer Science and Technology, Jilin University, China+Key Laboratory of Symbolic Computation and Knowledge Engineering of Ministry of Education, China", + "bibtex": "@article{Chi_Yang_Li_Ouyang_Guan_2023, title={Variational Wasserstein Barycenters with C-cyclical Monotonicity Regularization}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25873}, DOI={10.1609/aaai.v37i6.25873}, abstractNote={Wasserstein barycenter, built on the theory of Optimal Transport (OT), provides a powerful framework to aggregate probability distributions, and it has increasingly attracted great attention within the machine learning community. However, it is often intractable to precisely compute, especially for high dimensional and continuous settings. To alleviate this problem, we develop a novel regularization by using the fact that c-cyclical monotonicity is often necessary and sufficient conditions for optimality in OT problems, and incorporate it into the dual formulation of Wasserstein barycenters. For efficient computations, we adopt a variational distribution as the approximation of the true continuous barycenter, so as to frame the Wasserstein barycenters problem as an optimization problem with respect to variational parameters. Upon those ideas, we propose a novel end-to-end continuous approximation method, namely Variational Wasserstein Barycenters with c-Cyclical Monotonicity Regularization (VWB-CMR), given sample access to the input distributions. We show theoretical convergence analysis and demonstrate the superior performance of VWB-CMR on synthetic data and real applications of subset posterior aggregation.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chi, Jinjin and Yang, Zhiyao and Li, Ximing and Ouyang, Jihong and Guan, Renchu}, year={2023}, month={Jun.}, pages={7157-7165} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25873/25645", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25873", + "pdf_size": 666139, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11783603072349303771&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com;gmail.com;jlu.edu.cn;jlu.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;jlu.edu.cn;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Jilin University;Key Laboratory of Symbolic Computation and Knowledge Engineering", + "aff_unique_dep": "College of Computer Science and Technology;Ministry of Education", + "aff_unique_url": "http://www.jlu.edu.cn;", + "aff_unique_abbr": "JLU;", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26450", + "title": "Vector Causal Inference between Two Groups of Variables", + "track": "main", + "status": "Technical", + "abstract": "Methods to identify cause-effect relationships currently mostly assume the variables to be scalar random variables. However, in many fields the objects of interest are vectors or groups of scalar variables.\nWe present a new constraint-based non-parametric approach for inferring the causal relationship between two vector-valued random variables from observational data. Our method employs sparsity estimates of directed and undirected graphs and is based on two new principles for groupwise causal reasoning that we justify theoretically in Pearl's graphical model-based causality framework. Our theoretical considerations are complemented by two new causal discovery algorithms for causal interactions between two random vectors which find the correct causal direction reliably in simulations even if interactions are nonlinear. We evaluate our methods empirically and compare them to other state-of-the-art techniques.", + "primary_area": "reasoning under uncertainty", + "author": "Jonas Wahl; Urmi Ninad; Jakob Runge", + "authorids": "", + "aff": "Technische Universit \u00a8at Berlin+DLR Institut f \u00a8ur Datenwissenschaften Jena; Technische Universit \u00a8at Berlin+DLR Institut f \u00a8ur Datenwissenschaften Jena; Technische Universit \u00a8at Berlin+DLR Institut f \u00a8ur Datenwissenschaften Jena", + "bibtex": "@article{Wahl_Ninad_Runge_2023, title={Vector Causal Inference between Two Groups of Variables}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26450}, DOI={10.1609/aaai.v37i10.26450}, abstractNote={Methods to identify cause-effect relationships currently mostly assume the variables to be scalar random variables. However, in many fields the objects of interest are vectors or groups of scalar variables.\nWe present a new constraint-based non-parametric approach for inferring the causal relationship between two vector-valued random variables from observational data. Our method employs sparsity estimates of directed and undirected graphs and is based on two new principles for groupwise causal reasoning that we justify theoretically in Pearl\u2019s graphical model-based causality framework. Our theoretical considerations are complemented by two new causal discovery algorithms for causal interactions between two random vectors which find the correct causal direction reliably in simulations even if interactions are nonlinear. We evaluate our methods empirically and compare them to other state-of-the-art techniques.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wahl, Jonas and Ninad, Urmi and Runge, Jakob}, year={2023}, month={Jun.}, pages={12305-12312} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26450/26222", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26450", + "pdf_size": 418252, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5439300575622940097&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "tu-berlin.de;tu-berlin.de;tu-berlin.de", + "email": "tu-berlin.de;tu-berlin.de;tu-berlin.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;Deutsches Zentrum f\u00fcr Luft- und Raumfahrt", + "aff_unique_dep": ";Institut f\u00fcr Datenwissenschaften", + "aff_unique_url": "https://www.tu-berlin.de;https://www.dlr.de", + "aff_unique_abbr": "TU Berlin;DLR", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Jena", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25848", + "title": "Very Fast, Approximate Counterfactual Explanations for Decision Forests", + "track": "main", + "status": "Technical", + "abstract": "We consider finding a counterfactual explanation for a classification or regression forest, such as a random forest. This requires solving an optimization problem to find the closest input instance to a given instance for which the forest outputs a desired value. Finding an exact solution has a cost that is exponential on the number of leaves in the forest. We propose a simple but very effective approach: we constrain the optimization to input space regions populated by actual data points. The problem reduces to a form of nearest-neighbor search using a certain distance on a certain dataset. This has two advantages: first, the solution can be found very quickly, scaling to large forests and high-dimensional data, and enabling interactive use. Second, the solution found is more likely to be realistic in that it is guided towards high-density areas of input space.", + "primary_area": "machine learning i", + "author": "Miguel \u00c1. Carreira-Perpinan; Suryabhan Singh Hada", + "authorids": "", + "aff": "Dept. Computer Science & Engineering, University of California, Merced; Dept. Computer Science & Engineering, University of California, Merced", + "bibtex": "@article{Carreira-Perpinan_Hada_2023, title={Very Fast, Approximate Counterfactual Explanations for Decision Forests}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25848}, DOI={10.1609/aaai.v37i6.25848}, abstractNote={We consider finding a counterfactual explanation for a classification or regression forest, such as a random forest. This requires solving an optimization problem to find the closest input instance to a given instance for which the forest outputs a desired value. Finding an exact solution has a cost that is exponential on the number of leaves in the forest. We propose a simple but very effective approach: we constrain the optimization to input space regions populated by actual data points. The problem reduces to a form of nearest-neighbor search using a certain distance on a certain dataset. This has two advantages: first, the solution can be found very quickly, scaling to large forests and high-dimensional data, and enabling interactive use. Second, the solution found is more likely to be realistic in that it is guided towards high-density areas of input space.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carreira-Perpinan, Miguel \u00c1. and Hada, Suryabhan Singh}, year={2023}, month={Jun.}, pages={6935-6943} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25848/25620", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25848", + "pdf_size": 257539, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14529937519095595717&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "ucmerced.edu;ucmerced.edu", + "email": "ucmerced.edu;ucmerced.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Merced", + "aff_unique_dep": "Department of Computer Science & Engineering", + "aff_unique_url": "https://www.ucmerced.edu", + "aff_unique_abbr": "UC Merced", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Merced", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26860", + "title": "Vessel-to-Vessel Motion Compensation with Reinforcement Learning", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Actuation delay poses a challenge for robotic arms and cranes. This is especially the case in dynamic environments where the robot arm or the objects it is trying to manipulate are moved by exogenous forces. In this paper, we consider the task of using a robotic arm to compensate for relative motion between two vessels at sea. We construct a hybrid controller that combines an Inverse Kinematic (IK) solver with a Reinforcement Learning (RL) agent that issues small corrections to the IK input. The solution is empirically evaluated in a simulated environment under several sea states and actuation delays. We observe that more intense waves and larger actuation delays have an adverse effect on the IK controller's ability to compensate for vessel motion. The RL agent is shown to be effective at mitigating large parts of these errors, both in the average case and in the worst case. Its modest requirement for sensory information, combined with the inherent safety in only making small adjustments, also makes it a promising approach for real-world deployment.", + "primary_area": "emerging applications of ai", + "author": "Sverre Herland; Kerstin Bach", + "authorids": "", + "aff": "Norwegian University of Science and Technology; Norwegian University of Science and Technology", + "bibtex": "@article{Herland_Bach_2024, title={Vessel-to-Vessel Motion Compensation with Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26860}, DOI={10.1609/aaai.v37i13.26860}, abstractNote={Actuation delay poses a challenge for robotic arms and cranes. This is especially the case in dynamic environments where the robot arm or the objects it is trying to manipulate are moved by exogenous forces. In this paper, we consider the task of using a robotic arm to compensate for relative motion between two vessels at sea. We construct a hybrid controller that combines an Inverse Kinematic (IK) solver with a Reinforcement Learning (RL) agent that issues small corrections to the IK input. The solution is empirically evaluated in a simulated environment under several sea states and actuation delays. We observe that more intense waves and larger actuation delays have an adverse effect on the IK controller\u2019s ability to compensate for vessel motion. The RL agent is shown to be effective at mitigating large parts of these errors, both in the average case and in the worst case. Its modest requirement for sensory information, combined with the inherent safety in only making small adjustments, also makes it a promising approach for real-world deployment.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Herland, Sverre and Bach, Kerstin}, year={2024}, month={Jul.}, pages={15682-15688} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26860/26632", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26860", + "pdf_size": 4631790, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10186875390079266507&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "ntnu.no;ntnu.no", + "email": "ntnu.no;ntnu.no", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Norwegian University of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ntnu.no", + "aff_unique_abbr": "NTNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Norway" + }, + { + "id": "article-25458", + "title": "Video Compression Artifact Reduction by Fusing Motion Compensation and Global Context in a Swin-CNN Based Parallel Architecture", + "track": "main", + "status": "Technical", + "abstract": "Video Compression Artifact Reduction aims to reduce the artifacts caused by video compression algorithms and improve the quality of compressed video frames. The critical challenge in this task is to make use of the redundant high-quality information in compressed frames for compensation as much as possible. Two important possible compensations: Motion compensation and global context, are not comprehensively considered in previous works, leading to inferior results. The key idea of this paper is to fuse the motion compensation and global context together to gain more compensation information to improve the quality of compressed videos. Here, we propose a novel Spatio-Temporal Compensation Fusion (STCF) framework with the Parallel Swin-CNN Fusion (PSCF) block, which can simultaneously learn and merge the motion compensation and global context to reduce the video compression artifacts. Specifically, a temporal self-attention strategy based on shifted windows is developed to capture the global context in an efficient way, for which we use the Swin transformer layer in the PSCF block. Moreover, an additional Ada-CNN layer is applied in the PSCF block to extract the motion compensation. Experimental results demonstrate that our proposed STCF framework outperforms the state-of-the-art methods up to 0.23dB (27% improvement) on the MFQEv2 dataset.", + "primary_area": "computer vision iii", + "author": "Xinjian Zhang; Su Yang; Wuyang Luo; Longwen Gao; Weishan Zhang", + "authorids": "", + "aff": "School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University + Shanghai Key Laboratory of Intelligent Information Processing; Bilibili; Department of Software Engineering, China University of Petroleum (East China)", + "bibtex": "@article{Zhang_Yang_Luo_Gao_Zhang_2023, title={Video Compression Artifact Reduction by Fusing Motion Compensation and Global Context in a Swin-CNN Based Parallel Architecture}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25458}, DOI={10.1609/aaai.v37i3.25458}, abstractNote={Video Compression Artifact Reduction aims to reduce the artifacts caused by video compression algorithms and improve the quality of compressed video frames. The critical challenge in this task is to make use of the redundant high-quality information in compressed frames for compensation as much as possible. Two important possible compensations: Motion compensation and global context, are not comprehensively considered in previous works, leading to inferior results. The key idea of this paper is to fuse the motion compensation and global context together to gain more compensation information to improve the quality of compressed videos. Here, we propose a novel Spatio-Temporal Compensation Fusion (STCF) framework with the Parallel Swin-CNN Fusion (PSCF) block, which can simultaneously learn and merge the motion compensation and global context to reduce the video compression artifacts. Specifically, a temporal self-attention strategy based on shifted windows is developed to capture the global context in an efficient way, for which we use the Swin transformer layer in the PSCF block. Moreover, an additional Ada-CNN layer is applied in the PSCF block to extract the motion compensation. Experimental results demonstrate that our proposed STCF framework outperforms the state-of-the-art methods up to 0.23dB (27% improvement) on the MFQEv2 dataset.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Xinjian and Yang, Su and Luo, Wuyang and Gao, Longwen and Zhang, Weishan}, year={2023}, month={Jun.}, pages={3489-3497} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25458/25230", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25458", + "pdf_size": 8908279, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17715876689222338860&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;bilibili.com;upc.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;bilibili.com;upc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;3", + "aff_unique_norm": "Fudan University;Shanghai Key Laboratory of Intelligent Information Processing;Bilibili Inc.;China University of Petroleum (East China)", + "aff_unique_dep": "School of Computer Science;Intelligent Information Processing;;Department of Software Engineering", + "aff_unique_url": "https://www.fudan.edu.cn;;https://www.bilibili.com;http://www.cup.edu.cn", + "aff_unique_abbr": "Fudan;;Bilibili;CUP", + "aff_campus_unique_index": ";;;1", + "aff_campus_unique": ";East China", + "aff_country_unique_index": "0+0;0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25418", + "title": "Video Event Extraction via Tracking Visual States of Arguments", + "track": "main", + "status": "Technical", + "abstract": "Video event extraction aims to detect salient events from a video and identify the arguments for each event as well as their semantic roles. Existing methods focus on capturing the overall visual scene of each frame, ignoring fine-grained argument-level information. Inspired by the definition of events as changes of states, we propose a novel framework to detect video events by tracking the changes in the visual states of all involved arguments, which are expected to provide the most informative evidence for the extraction of video events. In order to capture the visual state changes of arguments, we decompose them into changes in pixels within objects, displacements of objects, and interactions among multiple arguments. We further propose Object State Embedding, Object Motion-aware Embedding and Argument Interaction Embedding to encode and track these changes respectively. Experiments on various video event extraction tasks demonstrate significant improvements compared to state-of-the-art models. In particular, on verb classification, we achieve 3.49% absolute gains (19.53% relative gains) in F1@5 on Video Situation Recognition. Our Code is publicly available at https://github.com/Shinetism/VStates for research purposes.", + "primary_area": "computer vision iii", + "author": "Guang Yang; Manling Li; Jiajie Zhang; Xudong Lin; Heng Ji; Shih-Fu Chang", + "authorids": "", + "aff": "Tsinghua University; University of Illinois at Urbana-Champaign; Tsinghua University; Columbia University; University of Illinois at Urbana-Champaign; Columbia University", + "bibtex": "@article{Yang_Li_Zhang_Lin_Ji_Chang_2023, title={Video Event Extraction via Tracking Visual States of Arguments}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25418}, DOI={10.1609/aaai.v37i3.25418}, abstractNote={Video event extraction aims to detect salient events from a video and identify the arguments for each event as well as their semantic roles. Existing methods focus on capturing the overall visual scene of each frame, ignoring fine-grained argument-level information. Inspired by the definition of events as changes of states, we propose a novel framework to detect video events by tracking the changes in the visual states of all involved arguments, which are expected to provide the most informative evidence for the extraction of video events. In order to capture the visual state changes of arguments, we decompose them into changes in pixels within objects, displacements of objects, and interactions among multiple arguments. We further propose Object State Embedding, Object Motion-aware Embedding and Argument Interaction Embedding to encode and track these changes respectively. Experiments on various video event extraction tasks demonstrate significant improvements compared to state-of-the-art models. In particular, on verb classification, we achieve 3.49% absolute gains (19.53% relative gains) in F1@5 on Video Situation Recognition. Our Code is publicly available at https://github.com/Shinetism/VStates for research purposes.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Guang and Li, Manling and Zhang, Jiajie and Lin, Xudong and Ji, Heng and Chang, Shih-Fu}, year={2023}, month={Jun.}, pages={3136-3144} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25418/25190", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25418", + "pdf_size": 3527743, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2881681778440576693&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mails.tsinghua.edu.cn;illinois.edu; ; ; ; ", + "email": "mails.tsinghua.edu.cn;illinois.edu; ; ; ; ", + "github": "https://github.com/Shinetism/VStates", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2;1;2", + "aff_unique_norm": "Tsinghua University;University of Illinois at Urbana-Champaign;Columbia University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://illinois.edu;https://www.columbia.edu", + "aff_unique_abbr": "THU;UIUC;Columbia", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;1;0;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25493", + "title": "Video Object of Interest Segmentation", + "track": "main", + "status": "Technical", + "abstract": "In this work, we present a new computer vision task named video object of interest segmentation (VOIS). Given a video and a target image of interest, our objective is to simultaneously segment and track all objects in the video that are relevant to the target image. This problem combines the traditional video object segmentation task with an additional image indicating the content that users are concerned with. Since no existing dataset is perfectly suitable for this new task, we specifically construct a large-scale dataset called LiveVideos, which contains 2418 pairs of target images and live videos with instance-level annotations. In addition, we propose a transformer-based method for this task. We revisit Swin Transformer and design a dual-path structure to fuse video and image features. Then, a transformer decoder is employed to generate object proposals for segmentation and tracking from the fused features. Extensive experiments on LiveVideos dataset show the superiority of our proposed method.", + "primary_area": "computer vision iii", + "author": "Siyuan Zhou; Chunru Zhan; Biao Wang; Tiezheng Ge; Yuning Jiang; Li Niu", + "authorids": "", + "aff": "MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; MoE Key Lab of Artificial Intelligence, Shanghai Jiao Tong University", + "bibtex": "@article{Zhou_Zhan_Wang_Ge_Jiang_Niu_2023, title={Video Object of Interest Segmentation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25493}, DOI={10.1609/aaai.v37i3.25493}, abstractNote={In this work, we present a new computer vision task named video object of interest segmentation (VOIS). Given a video and a target image of interest, our objective is to simultaneously segment and track all objects in the video that are relevant to the target image. This problem combines the traditional video object segmentation task with an additional image indicating the content that users are concerned with. Since no existing dataset is perfectly suitable for this new task, we specifically construct a large-scale dataset called LiveVideos, which contains 2418 pairs of target images and live videos with instance-level annotations. In addition, we propose a transformer-based method for this task. We revisit Swin Transformer and design a dual-path structure to fuse video and image features. Then, a transformer decoder is employed to generate object proposals for segmentation and tracking from the fused features. Extensive experiments on LiveVideos dataset show the superiority of our proposed method.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Siyuan and Zhan, Chunru and Wang, Biao and Ge, Tiezheng and Jiang, Yuning and Niu, Li}, year={2023}, month={Jun.}, pages={3805-3813} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25493/25265", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25493", + "pdf_size": 6342742, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:ugbg--L7RlAJ:scholar.google.com/&scioq=Video+Object+of+Interest+Segmentation&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff_domain": "sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;gmail.com", + "email": "sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Alibaba Group", + "aff_unique_dep": "MoE Key Lab of Artificial Intelligence;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "SJTU;Alibaba", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26787", + "title": "Video-Audio Domain Generalization via Confounder Disentanglement", + "track": "aaai special track", + "status": "Technical", + "abstract": "Existing video-audio understanding models are trained and evaluated in an intra-domain setting, facing performance degeneration in real-world applications where multiple domains and distribution shifts naturally exist. The key to video-audio domain generalization (VADG) lies in alleviating spurious correlations over multi-modal features. To achieve this goal, we resort to causal theory and attribute such correlation to confounders affecting both video-audio features and labels. We propose a DeVADG framework that conducts uni-modal and cross-modal deconfounding through back-door adjustment. DeVADG performs cross-modal disentanglement and obtains fine-grained confounders at both class-level and domain-level using half-sibling regression and unpaired domain transformation, which essentially identifies domain-variant factors and class-shared factors that cause spurious correlations between features and false labels. To promote VADG research, we collect a VADG-Action dataset for video-audio action recognition with over 5,000 video clips across four domains (e.g., cartoon and game) and ten action classes (e.g., cooking and riding). We conduct extensive experiments, i.e., multi-source DG, single-source DG, and qualitative analysis, validating the rationality of our causal analysis and the effectiveness of the DeVADG framework.", + "primary_area": "safe and robust ai", + "author": "Shengyu Zhang; Xusheng Feng; Wenyan Fan; Wenjing Fang; Fuli Feng; Wei Ji; Shuo Li; Li Wang; Shanshan Zhao; Zhou Zhao; Tat-Seng Chua; Fei Wu", + "authorids": "", + "aff": "Zhejiang University; University of Electronic Science and Technology of China; Zhejiang University; Ant Group; University of Science and Technology of China; National University of Singapore; National University of Singapore; Ant Group; The University of Sydney; Zhejiang University; National University of Singapore; Zhejiang University+Shanghai AI Laboratory", + "bibtex": "@article{Zhang_Feng_Fan_Fang_Feng_Ji_Li_Wang_Zhao_Zhao_Chua_Wu_2023, title={Video-Audio Domain Generalization via Confounder Disentanglement}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26787}, DOI={10.1609/aaai.v37i12.26787}, abstractNote={Existing video-audio understanding models are trained and evaluated in an intra-domain setting, facing performance degeneration in real-world applications where multiple domains and distribution shifts naturally exist. The key to video-audio domain generalization (VADG) lies in alleviating spurious correlations over multi-modal features. To achieve this goal, we resort to causal theory and attribute such correlation to confounders affecting both video-audio features and labels. We propose a DeVADG framework that conducts uni-modal and cross-modal deconfounding through back-door adjustment. DeVADG performs cross-modal disentanglement and obtains fine-grained confounders at both class-level and domain-level using half-sibling regression and unpaired domain transformation, which essentially identifies domain-variant factors and class-shared factors that cause spurious correlations between features and false labels. To promote VADG research, we collect a VADG-Action dataset for video-audio action recognition with over 5,000 video clips across four domains (e.g., cartoon and game) and ten action classes (e.g., cooking and riding). We conduct extensive experiments, i.e., multi-source DG, single-source DG, and qualitative analysis, validating the rationality of our causal analysis and the effectiveness of the DeVADG framework.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Shengyu and Feng, Xusheng and Fan, Wenyan and Fang, Wenjing and Feng, Fuli and Ji, Wei and Li, Shuo and Wang, Li and Zhao, Shanshan and Zhao, Zhou and Chua, Tat-Seng and Wu, Fei}, year={2023}, month={Jun.}, pages={15322-15330} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26787/26559", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26787", + "pdf_size": 5382319, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=484282372839065939&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "zju.edu.cn;outlook.com;outlook.com;antgroup.com;gmail.com;gmail.com;nus.edu.sg;antgroup.com;gmail.com;zju.edu.cn;nus.edu.sg;zju.edu.cn", + "email": "zju.edu.cn;outlook.com;outlook.com;antgroup.com;gmail.com;gmail.com;nus.edu.sg;antgroup.com;gmail.com;zju.edu.cn;nus.edu.sg;zju.edu.cn", + "github": "", + "project": "", + "author_num": 12, + "aff_unique_index": "0;1;0;2;3;4;4;2;5;0;4;0+6", + "aff_unique_norm": "Zhejiang University;University of Electronic Science and Technology of China;Ant Group;University of Science and Technology of China;National University of Singapore;University of Sydney;Shanghai AI Laboratory", + "aff_unique_dep": ";;;;;;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.uestc.edu.cn;https://www.antgroup.com;http://www.ustc.edu.cn;https://www.nus.edu.sg;https://www.sydney.edu.au;https://www.shanghai-ai-lab.com", + "aff_unique_abbr": "ZJU;UESTC;Ant Group;USTC;NUS;USYD;SAIL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;1;0;2;0;1;0+0", + "aff_country_unique": "China;Singapore;Australia" + }, + { + "id": "article-25414", + "title": "Video-Text Pre-training with Learned Regions for Retrieval", + "track": "main", + "status": "Technical", + "abstract": "Video-Text pre-training aims at learning transferable representations from large-scale video-text pairs via aligning the semantics between visual and textual information. State-of-the-art approaches extract visual features from raw pixels in an end-to-end fashion. However, these methods operate at frame-level directly and thus overlook the spatio-temporal structure of objects in video, which yet has a strong synergy with nouns in textual descriptions. In this work, we propose a simple yet effective module for video-text representation learning, namely RegionLearner, which can take into account the structure of objects during pre-training on large-scale video-text pairs. Given a video, our module (1) first quantizes continuous visual features via clustering patch-features into the same cluster according to content similarity, then (2) generates learnable masks to aggregate fragmentary features into regions with complete semantics, and finally (3) models the spatio-temporal dependencies between different semantic regions. In contrast to using off-the-shelf object detectors, our proposed module does not require explicit supervision and is much more computationally efficient. We pre-train the proposed approach on the public WebVid2M and CC3M datasets. Extensive evaluations on four downstream video-text retrieval benchmarks clearly demonstrate the effectiveness of our RegionLearner.", + "primary_area": "computer vision iii", + "author": "Rui Yan; Mike Zheng Shou; Yixiao Ge; Jinpeng Wang; Xudong Lin; Guanyu Cai; Jinhui Tang", + "authorids": "", + "aff": "Nanjing University of Science and Technology; Show Lab, National University of Singapore; Tencent PCG; Show Lab, National University of Singapore; Columbia University; Tongji University; Nanjing University of Science and Technology", + "bibtex": "@article{Yan_Shou_Ge_Wang_Lin_Cai_Tang_2023, title={Video-Text Pre-training with Learned Regions for Retrieval}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25414}, DOI={10.1609/aaai.v37i3.25414}, abstractNote={Video-Text pre-training aims at learning transferable representations from large-scale video-text pairs via aligning the semantics between visual and textual information. State-of-the-art approaches extract visual features from raw pixels in an end-to-end fashion. However, these methods operate at frame-level directly and thus overlook the spatio-temporal structure of objects in video, which yet has a strong synergy with nouns in textual descriptions. In this work, we propose a simple yet effective module for video-text representation learning, namely RegionLearner, which can take into account the structure of objects during pre-training on large-scale video-text pairs. Given a video, our module (1) first quantizes continuous visual features via clustering patch-features into the same cluster according to content similarity, then (2) generates learnable masks to aggregate fragmentary features into regions with complete semantics, and finally (3) models the spatio-temporal dependencies between different semantic regions. In contrast to using off-the-shelf object detectors, our proposed module does not require explicit supervision and is much more computationally efficient. We pre-train the proposed approach on the public WebVid2M and CC3M datasets. Extensive evaluations on four downstream video-text retrieval benchmarks clearly demonstrate the effectiveness of our RegionLearner.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yan, Rui and Shou, Mike Zheng and Ge, Yixiao and Wang, Jinpeng and Lin, Xudong and Cai, Guanyu and Tang, Jinhui}, year={2023}, month={Jun.}, pages={3100-3108} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25414/25186", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25414", + "pdf_size": 929612, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8267029741916042483&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "njust.edu.cn;gmail.com;gmail.com;mail2.sysu.edu.cn;columbia.edu;tongji.edu.cn;njust.edu.cn", + "email": "njust.edu.cn;gmail.com;gmail.com;mail2.sysu.edu.cn;columbia.edu;tongji.edu.cn;njust.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;1;3;4;0", + "aff_unique_norm": "Nanjing University of Science and Technology;National University of Singapore;Tencent;Columbia University;Tongji University", + "aff_unique_dep": ";Show Lab;PCG (Platform and Content Group);;", + "aff_unique_url": "http://www.nust.edu.cn/;https://www.nus.edu.sg;https://www.tencent.com;https://www.columbia.edu;https://www.tongji.edu.cn", + "aff_unique_abbr": "NUST;NUS;Tencent PCG;Columbia;Tongji", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Singapore", + "aff_country_unique_index": "0;1;0;1;2;0;0", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "article-26613", + "title": "VideoDubber: Machine Translation with Speech-Aware Length Control for Video Dubbing", + "track": "main", + "status": "Technical", + "abstract": "Video dubbing aims to translate the original speech in a film or television program into the speech in a target language, which can be achieved with a cascaded system consisting of speech recognition, machine translation and speech synthesis. To ensure the translated speech to be well aligned with the corresponding video, the length/duration of the translated speech should be as close as possible to that of the original speech, which requires strict length control. Previous works usually control the number of words or characters generated by the machine translation model to be similar to the source sentence, without considering the isochronicity of speech as the speech duration of words/characters in different languages varies. In this paper, we propose VideoDubber, a machine translation system tailored for the task of video dubbing, which directly considers the speech duration of each token in translation, to match the length of source and target speech. Specifically, we control the speech length of generated sentence by guiding the prediction of each word with the duration information, including the speech duration of itself as well as how much duration is left for the remaining words. We design experiments on four language directions (German -> English, Spanish -> English, Chinese <-> English), and the results show that VideoDubber achieves better length control ability on the generated speech than baseline methods. To make up the lack of real-world datasets, we also construct a real-world test set collected from films to provide comprehensive evaluations on the video dubbing task.", + "primary_area": "speech natural language processing", + "author": "Yihan Wu; Junliang Guo; Xu Tan; Chen Zhang; Bohan Li; Ruihua Song; Lei He; Sheng Zhao; Arul Menezes; Jiang Bian", + "authorids": "", + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Microsoft Research Asia; Microsoft Azure Speech; Microsoft Azure Speech; Microsoft Azure Speech; Gaoling School of Artificial Intelligence, Renmin University of China; Microsoft Azure Speech; Microsoft Azure Speech; Microsoft Azure Translation; Microsoft Research Asia", + "bibtex": "@article{Wu_Guo_Tan_Zhang_Li_Song_He_Zhao_Menezes_Bian_2023, title={VideoDubber: Machine Translation with Speech-Aware Length Control for Video Dubbing}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26613}, DOI={10.1609/aaai.v37i11.26613}, abstractNote={Video dubbing aims to translate the original speech in a film or television program into the speech in a target language, which can be achieved with a cascaded system consisting of speech recognition, machine translation and speech synthesis. To ensure the translated speech to be well aligned with the corresponding video, the length/duration of the translated speech should be as close as possible to that of the original speech, which requires strict length control. Previous works usually control the number of words or characters generated by the machine translation model to be similar to the source sentence, without considering the isochronicity of speech as the speech duration of words/characters in different languages varies. In this paper, we propose VideoDubber, a machine translation system tailored for the task of video dubbing, which directly considers the speech duration of each token in translation, to match the length of source and target speech. Specifically, we control the speech length of generated sentence by guiding the prediction of each word with the duration information, including the speech duration of itself as well as how much duration is left for the remaining words. We design experiments on four language directions (German -> English, Spanish -> English, Chinese <-> English), and the results show that VideoDubber achieves better length control ability on the generated speech than baseline methods. To make up the lack of real-world datasets, we also construct a real-world test set collected from films to provide comprehensive evaluations on the video dubbing task.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Yihan and Guo, Junliang and Tan, Xu and Zhang, Chen and Li, Bohan and Song, Ruihua and He, Lei and Zhao, Sheng and Menezes, Arul and Bian, Jiang}, year={2023}, month={Jun.}, pages={13772-13779} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26613/26385", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26613", + "pdf_size": 498928, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14699700387403646338&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;ruc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;2;2;2;0;2;2;3;1", + "aff_unique_norm": "Renmin University of China;Microsoft Research;Microsoft Corporation;Microsoft", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Research;Azure Speech;Azure Translation", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.microsoft.com;https://azure.microsoft.com", + "aff_unique_abbr": "RUC;MSR Asia;Microsoft;Microsoft", + "aff_campus_unique_index": "0;1;0;1", + "aff_campus_unique": "Beijing;Asia;", + "aff_country_unique_index": "0;0;1;1;1;0;1;1;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25809", + "title": "Visually Grounded Commonsense Knowledge Acquisition", + "track": "main", + "status": "Technical", + "abstract": "Large-scale commonsense knowledge bases empower a broad range of AI applications, where the automatic extraction of commonsense knowledge (CKE) is a fundamental and challenging problem. CKE from text is known for suffering from the inherent sparsity and reporting bias of commonsense in text. Visual perception, on the other hand, contains rich commonsense knowledge about real-world entities, e.g., (person, can_hold, bottle), which can serve as promising sources for acquiring grounded commonsense knowledge. In this work, we present CLEVER, which formulates CKE as a distantly supervised multi-instance learning problem, where models learn to summarize commonsense relations from a bag of images about an entity pair without any human annotation on image instances. To address the problem, CLEVER leverages vision-language pre-training models for deep understanding of each image in the bag, and selects informative instances from the bag to summarize commonsense entity relations via a novel contrastive attention mechanism. Comprehensive experimental results in held-out and human evaluation show that CLEVER can extract commonsense knowledge in promising quality, outperforming pre-trained language model-based methods by 3.9 AUC and 6.4 mAUC points. The predicted commonsense scores show strong correlation with human judgment with a 0.78 Spearman coefficient. Moreover, the extracted commonsense can also be grounded into images with reasonable interpretability. The data and codes can be obtained at https://github.com/thunlp/CLEVER.", + "primary_area": "knowledge representation and reasoning", + "author": "Yuan Yao; Tianyu Yu; Ao Zhang; Mengdi Li; Ruobing Xie; Cornelius Weber; Zhiyuan Liu; Hai-Tao Zheng; Stefan Wermter; Tat-Seng Chua; Maosong Sun", + "authorids": "", + "aff": "Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China; Shenzhen International Graduate School, Tsinghua University; School of Computing, National University of Singapore, Singapore; Department of Informatics, University of Hamburg, Hamburg, Germany; WeChat AI, Tencent; School of Computing, National University of Singapore, Singapore; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China; Shenzhen International Graduate School, Tsinghua University+Peng Cheng Laboratory; Department of Informatics, University of Hamburg, Hamburg, Germany; School of Computing, National University of Singapore, Singapore; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China", + "bibtex": "@article{Yao_Yu_Zhang_Li_Xie_Weber_Liu_Zheng_Wermter_Chua_Sun_2023, title={Visually Grounded Commonsense Knowledge Acquisition}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25809}, DOI={10.1609/aaai.v37i5.25809}, abstractNote={Large-scale commonsense knowledge bases empower a broad range of AI applications, where the automatic extraction of commonsense knowledge (CKE) is a fundamental and challenging problem. CKE from text is known for suffering from the inherent sparsity and reporting bias of commonsense in text. Visual perception, on the other hand, contains rich commonsense knowledge about real-world entities, e.g., (person, can_hold, bottle), which can serve as promising sources for acquiring grounded commonsense knowledge. In this work, we present CLEVER, which formulates CKE as a distantly supervised multi-instance learning problem, where models learn to summarize commonsense relations from a bag of images about an entity pair without any human annotation on image instances. To address the problem, CLEVER leverages vision-language pre-training models for deep understanding of each image in the bag, and selects informative instances from the bag to summarize commonsense entity relations via a novel contrastive attention mechanism. Comprehensive experimental results in held-out and human evaluation show that CLEVER can extract commonsense knowledge in promising quality, outperforming pre-trained language model-based methods by 3.9 AUC and 6.4 mAUC points. The predicted commonsense scores show strong correlation with human judgment with a 0.78 Spearman coefficient. Moreover, the extracted commonsense can also be grounded into images with reasonable interpretability. The data and codes can be obtained at https://github.com/thunlp/CLEVER.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yao, Yuan and Yu, Tianyu and Zhang, Ao and Li, Mengdi and Xie, Ruobing and Weber, Cornelius and Liu, Zhiyuan and Zheng, Hai-Tao and Wermter, Stefan and Chua, Tat-Seng and Sun, Maosong}, year={2023}, month={Jun.}, pages={6583-6592} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25809/25581", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25809", + "pdf_size": 2232279, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2078486519568292623&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "163.com; ; ; ; ; ;tsinghua.edu.cn;sz.tsinghua.edu.cn; ; ; ", + "email": "163.com; ; ; ; ; ;tsinghua.edu.cn;sz.tsinghua.edu.cn; ; ; ", + "github": "https://github.com/thunlp/CLEVER", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;1;2;3;1;0;0+4;2;1;0", + "aff_unique_norm": "Tsinghua University;National University of Singapore;University of Hamburg;Tencent;Peng Cheng Laboratory", + "aff_unique_dep": "Dept. of Comp. Sci. & Tech.;School of Computing;Department of Informatics;WeChat AI;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.nus.edu.sg;https://www.uni-hamburg.de;https://www.tencent.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "THU;NUS;;Tencent;PCL", + "aff_campus_unique_index": "0;1;3;0;1;3;0", + "aff_campus_unique": "Beijing;Shenzhen;;Hamburg", + "aff_country_unique_index": "0;0;1;2;0;1;0;0+0;2;1;0", + "aff_country_unique": "China;Singapore;Germany" + }, + { + "id": "article-25707", + "title": "Voting with Preference Intensities", + "track": "main", + "status": "Technical", + "abstract": "When an agent votes, she typically ranks the set of available alternatives. Occasionally, she may also wish to report the intensity of her preferences by indicating adjacent pairs of alternatives in her ranking between which her preference is acutely decisive; for instance, she may suggest that she likes alternative a more than b, but b much more than c. We design near-optimal voting rules which aggregate such preference rankings with intensities using the recently-popular distortion framework. We also show that traditional voting rules, which aggregate preference rankings while ignoring (or not eliciting) intensities, can incur significant welfare loss.", + "primary_area": "game theory and economic paradigms", + "author": "Anson Kahng; Mohamad Latifian; Nisarg Shah", + "authorids": "", + "aff": "University of Rochester; University of Toronto; University of Toronto", + "bibtex": "@article{Kahng_Latifian_Shah_2023, title={Voting with Preference Intensities}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25707}, DOI={10.1609/aaai.v37i5.25707}, abstractNote={When an agent votes, she typically ranks the set of available alternatives. Occasionally, she may also wish to report the intensity of her preferences by indicating adjacent pairs of alternatives in her ranking between which her preference is acutely decisive; for instance, she may suggest that she likes alternative a more than b, but b much more than c. We design near-optimal voting rules which aggregate such preference rankings with intensities using the recently-popular distortion framework. We also show that traditional voting rules, which aggregate preference rankings while ignoring (or not eliciting) intensities, can incur significant welfare loss.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kahng, Anson and Latifian, Mohamad and Shah, Nisarg}, year={2023}, month={Jun.}, pages={5697-5704} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25707/25479", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25707", + "pdf_size": 204381, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14401297868440660574&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "cs.rochester.edu;cs.toronto.edu;cs.toronto.edu", + "email": "cs.rochester.edu;cs.toronto.edu;cs.toronto.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University of Rochester;University of Toronto", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.rochester.edu;https://www.utoronto.ca", + "aff_unique_abbr": "U of R;U of T", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "United States;Canada" + }, + { + "id": "article-26749", + "title": "WAT: Improve the Worst-Class Robustness in Adversarial Training", + "track": "aaai special track", + "status": "Technical", + "abstract": "Deep Neural Networks (DNN) have been shown to be vulnerable to adversarial examples. Adversarial training (AT) is a popular and effective strategy to defend against adversarial attacks. Recent works have shown that a robust model well-trained by AT exhibits a remarkable robustness disparity among classes, and propose various methods to obtain consistent robust accuracy across classes. Unfortunately, these methods sacrifice a good deal of the average robust accuracy. Accordingly, this paper proposes a novel framework of worst-class adversarial training and leverages no-regret dynamics to solve this problem. Our goal is to obtain a classifier with great performance on worst-class and sacrifice just a little average robust accuracy at the same time. We then rigorously analyze the theoretical properties of our proposed algorithm, and the generalization error bound in terms of the worst-class robust risk. Furthermore, we propose a measurement to evaluate the proposed method in terms of both the average and worst-class accuracies. Experiments on various datasets and networks show that our proposed method outperforms the state-of-the-art approaches.", + "primary_area": "safe and robust ai", + "author": "Boqi Li; Weiwei Liu", + "authorids": "", + "aff": "School of Computer Science, Wuhan University, China; School of Computer Science, Wuhan University, China", + "bibtex": "@article{Li_Liu_2023, title={WAT: Improve the Worst-Class Robustness in Adversarial Training}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26749}, DOI={10.1609/aaai.v37i12.26749}, abstractNote={Deep Neural Networks (DNN) have been shown to be vulnerable to adversarial examples. Adversarial training (AT) is a popular and effective strategy to defend against adversarial attacks. Recent works have shown that a robust model well-trained by AT exhibits a remarkable robustness disparity among classes, and propose various methods to obtain consistent robust accuracy across classes. Unfortunately, these methods sacrifice a good deal of the average robust accuracy. Accordingly, this paper proposes a novel framework of worst-class adversarial training and leverages no-regret dynamics to solve this problem. Our goal is to obtain a classifier with great performance on worst-class and sacrifice just a little average robust accuracy at the same time. We then rigorously analyze the theoretical properties of our proposed algorithm, and the generalization error bound in terms of the worst-class robust risk. Furthermore, we propose a measurement to evaluate the proposed method in terms of both the average and worst-class accuracies. Experiments on various datasets and networks show that our proposed method outperforms the state-of-the-art approaches.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Boqi and Liu, Weiwei}, year={2023}, month={Jun.}, pages={14982-14990} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26749/26521", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26749", + "pdf_size": 400107, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14079298860308517277&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.whu.edu.cn", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26546", + "title": "WIERT: Web Information Extraction via Render Tree", + "track": "main", + "status": "Technical", + "abstract": "Web information extraction (WIE) is a fundamental problem in web document understanding, with a significant impact on various applications. Visual information plays a crucial role in WIE tasks as the nodes containing relevant information are often visually distinct, such as being in a larger font size or having a brighter color, from the other nodes. However, rendering visual information of a web page can be computationally expensive. Previous works have mainly focused on the Document Object Model (DOM) tree, which lacks visual information. To efficiently exploit visual information, we propose leveraging the render tree, which combines the DOM tree and Cascading Style Sheets Object Model (CSSOM) tree, and contains not only content and layout information but also rich visual information at a little additional acquisition cost compared to the DOM tree. In this paper, we present WIERT, a method that effectively utilizes the render tree of a web page based on a pretrained language model. We evaluate WIERT on the Klarna product page dataset, a manually labeled dataset of renderable e-commerce web pages, demonstrating its effectiveness and robustness.", + "primary_area": "speech natural language processing", + "author": "Zimeng Li; Bo Shao; Linjun Shou; Ming Gong; Gen Li; Daxin Jiang", + "authorids": "", + "aff": "School of Computer Science and Engineering, Beihang University, Beijing, China+Microsoft STCA; Microsoft STCA; Microsoft STCA; Microsoft STCA; Microsoft STCA; Microsoft STCA", + "bibtex": "@article{Li_Shao_Shou_Gong_Li_Jiang_2023, title={WIERT: Web Information Extraction via Render Tree}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26546}, DOI={10.1609/aaai.v37i11.26546}, abstractNote={Web information extraction (WIE) is a fundamental problem in web document understanding, with a significant impact on various applications. Visual information plays a crucial role in WIE tasks as the nodes containing relevant information are often visually distinct, such as being in a larger font size or having a brighter color, from the other nodes. However, rendering visual information of a web page can be computationally expensive. Previous works have mainly focused on the Document Object Model (DOM) tree, which lacks visual information. To efficiently exploit visual information, we propose leveraging the render tree, which combines the DOM tree and Cascading Style Sheets Object Model (CSSOM) tree, and contains not only content and layout information but also rich visual information at a little additional acquisition cost compared to the DOM tree. In this paper, we present WIERT, a method that effectively utilizes the render tree of a web page based on a pretrained language model. We evaluate WIERT on the Klarna product page dataset, a manually labeled dataset of renderable e-commerce web pages, demonstrating its effectiveness and robustness.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Zimeng and Shao, Bo and Shou, Linjun and Gong, Ming and Li, Gen and Jiang, Daxin}, year={2023}, month={Jun.}, pages={13166-13173} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26546/26318", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26546", + "pdf_size": 768878, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15338092327635612942&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff_domain": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;1;1;1", + "aff_unique_norm": "Beihang University;Microsoft", + "aff_unique_dep": "School of Computer Science and Engineering;STCA", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "BUAA;Microsoft", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+1;1;1;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-26015", + "title": "WLD-Reg: A Data-Dependent Within-Layer Diversity Regularizer", + "track": "main", + "status": "Technical", + "abstract": "Neural networks are composed of multiple layers arranged in a hierarchical structure jointly trained with a gradient-based optimization, where the errors are back-propagated from the last layer back to the first one. At each optimization step, neurons at a given layer receive feedback from neurons belonging to higher layers of the hierarchy. In this paper, we propose to complement this traditional 'between-layer' feedback with additional 'within-layer' feedback to encourage the diversity of the activations within the same layer. To this end, we measure the pairwise similarity between the outputs of the neurons and use it to model the layer's overall diversity. We present an extensive empirical study confirming that the proposed approach enhances the performance of several state-of-the-art neural network models in multiple tasks. The code is publically available at https://github.com/firasl/AAAI-23-WLD-Reg.", + "primary_area": "machine learning ii", + "author": "Firas Laakom; Jenni Raitoharju; Alexandros Iosifidis; Moncef Gabbouj", + "authorids": "", + "aff": "Faculty of Information Technology and Communication Sciences, Tampere University, Finland; Faculty of Information Technology, University of Jyv \u00a8askyl \u00a8a, Finland; DIGIT, Department of Electrical and Computer Engineering, Aarhus University, Denmark; Faculty of Information Technology and Communication Sciences, Tampere University, Finland", + "bibtex": "@article{Laakom_Raitoharju_Iosifidis_Gabbouj_2023, title={WLD-Reg: A Data-Dependent Within-Layer Diversity Regularizer}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26015}, DOI={10.1609/aaai.v37i7.26015}, abstractNote={Neural networks are composed of multiple layers arranged in a hierarchical structure jointly trained with a gradient-based optimization, where the errors are back-propagated from the last layer back to the first one. At each optimization step, neurons at a given layer receive feedback from neurons belonging to higher layers of the hierarchy. In this paper, we propose to complement this traditional \u2019between-layer\u2019 feedback with additional \u2019within-layer\u2019 feedback to encourage the diversity of the activations within the same layer. To this end, we measure the pairwise similarity between the outputs of the neurons and use it to model the layer\u2019s overall diversity. We present an extensive empirical study confirming that the proposed approach enhances the performance of several state-of-the-art neural network models in multiple tasks. The code is publically available at https://github.com/firasl/AAAI-23-WLD-Reg.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Laakom, Firas and Raitoharju, Jenni and Iosifidis, Alexandros and Gabbouj, Moncef}, year={2023}, month={Jun.}, pages={8421-8429} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26015/25787", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26015", + "pdf_size": 364101, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4982695534020310668&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "tuni.fi;jyu.fi;ece.au.dk;tuni.fi", + "email": "tuni.fi;jyu.fi;ece.au.dk;tuni.fi", + "github": "https://github.com/firasl/AAAI-23-WLD-Reg", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Tampere University;University of Jyvaskyla;Aarhus University", + "aff_unique_dep": "Faculty of Information Technology and Communication Sciences;Faculty of Information Technology;Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.tuni.fi;https://www.jyu.fi;https://www.au.dk", + "aff_unique_abbr": "Tuni;;AU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "Finland;Denmark" + }, + { + "id": "article-25592", + "title": "WSiP: Wave Superposition Inspired Pooling for Dynamic Interactions-Aware Trajectory Prediction", + "track": "main", + "status": "Technical", + "abstract": "Predicting motions of surrounding vehicles is critically important to help autonomous driving systems plan a safe path and avoid collisions. Although recent social pooling based LSTM models have achieved significant performance gains by considering the motion interactions between vehicles close to each other, vehicle trajectory prediction still remains as a challenging research issue due to the dynamic and high-order interactions in the real complex driving scenarios. To this end, we propose a wave superposition inspired social pooling (Wave-pooling for short) method for dynamically aggregating the high-order interactions from both local and global neighbor vehicles. Through modeling each vehicle as a wave with the amplitude and phase, Wave-pooling can more effectively represent the dynamic motion states of vehicles and capture their high-order dynamic interactions by wave superposition. By integrating Wave-pooling, an encoder-decoder based learning framework named WSiP is also proposed. Extensive experiments conducted on two public highway datasets NGSIM and highD verify the effectiveness of WSiP by comparison with current state-of-the-art baselines. More importantly, the result of WSiP is more interpretable as the interaction strength between vehicles can be intuitively reflected by their phase difference. The code of the work is publicly available at https://github.com/Chopin0123/WSiP.", + "primary_area": "data mining and knowledge management", + "author": "Renzhi Wang; Senzhang Wang; Hao Yan; Xiang Wang", + "authorids": "", + "aff": "Central South University; Central South University; Central South University; National University of Defense Technology", + "bibtex": "@article{Wang_Wang_Yan_Wang_2023, title={WSiP: Wave Superposition Inspired Pooling for Dynamic Interactions-Aware Trajectory Prediction}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25592}, DOI={10.1609/aaai.v37i4.25592}, abstractNote={Predicting motions of surrounding vehicles is critically important to help autonomous driving systems plan a safe path and avoid collisions. Although recent social pooling based LSTM models have achieved significant performance gains by considering the motion interactions between vehicles close to each other, vehicle trajectory prediction still remains as a challenging research issue due to the dynamic and high-order interactions in the real complex driving scenarios. To this end, we propose a wave superposition inspired social pooling (Wave-pooling for short) method for dynamically aggregating the high-order interactions from both local and global neighbor vehicles. Through modeling each vehicle as a wave with the amplitude and phase, Wave-pooling can more effectively represent the dynamic motion states of vehicles and capture their high-order dynamic interactions by wave superposition. By integrating Wave-pooling, an encoder-decoder based learning framework named WSiP is also proposed. Extensive experiments conducted on two public highway datasets NGSIM and highD verify the effectiveness of WSiP by comparison with current state-of-the-art baselines. More importantly, the result of WSiP is more interpretable as the interaction strength between vehicles can be intuitively reflected by their phase difference. The code of the work is publicly available at https://github.com/Chopin0123/WSiP.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Renzhi and Wang, Senzhang and Yan, Hao and Wang, Xiang}, year={2023}, month={Jun.}, pages={4685-4692} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25592/25364", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25592", + "pdf_size": 1578486, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17059003596332920865&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "gmail.com;csu.edu.cn;csu.edu.cn;nudt.edu.cn", + "email": "gmail.com;csu.edu.cn;csu.edu.cn;nudt.edu.cn", + "github": "https://github.com/Chopin0123/WSiP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Central South University;National University of Defense Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.csu.edu.cn;http://www.nudt.edu.cn/", + "aff_unique_abbr": "CSU;NUDT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26667", + "title": "Walkability Optimization: Formulations, Algorithms, and a Case Study of Toronto", + "track": "aaai special track", + "status": "Technical", + "abstract": "The concept of walkable urban development has gained increased\nattention due to its public health, economic, and environmental\nsustainability benefits. Unfortunately, land zoning\nand historic under-investment have resulted in spatial inequality\nin walkability and social inequality among residents.\nWe tackle the problem of Walkability Optimization through\nthe lens of combinatorial optimization. The task is to select\nlocations in which additional amenities (e.g., grocery stores,\nschools, restaurants) can be allocated to improve resident access\nvia walking while taking into account existing amenities\nand providing multiple options (e.g., for restaurants).\nTo this end, we derive Mixed-Integer Linear Programming\n(MILP) and Constraint Programming (CP) models. Moreover,\nwe show that the problem\u2019s objective function is submodular\nin special cases, which motivates an efficient greedy\nheuristic. We conduct a case study on 31 underserved neighborhoods\nin the City of Toronto, Canada. MILP finds the\nbest solutions in most scenarios but does not scale well with\nnetwork size. The greedy algorithm scales well and finds\nhigh-quality solutions. Our empirical evaluation shows that\nneighbourhoods with low walkability have a great potential\nfor transformation into pedestrian-friendly neighbourhoods\nby strategically placing new amenities. Allocating 3 additional\ngrocery stores, schools, and restaurants can improve the\n\u201cWalkScore\u201d by more than 50 points (on a scale of 100) for 4\nneighbourhoods and reduce the walking distances to amenities\nfor 75% of all residential locations to 10 minutes for all\namenity types. Our code and paper appendix are available at\nhttps://github.com/khalil-research/walkability.", + "primary_area": "ai for social impact", + "author": "Weimin Huang; Elias B. Khalil", + "authorids": "", + "aff": "Department of Mechanical & Industrial Engineering, University of Toronto; Department of Mechanical & Industrial Engineering, University of Toronto", + "bibtex": "@article{Huang_Khalil_2023, title={Walkability Optimization: Formulations, Algorithms, and a Case Study of Toronto}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26667}, DOI={10.1609/aaai.v37i12.26667}, abstractNote={The concept of walkable urban development has gained increased\nattention due to its public health, economic, and environmental\nsustainability benefits. Unfortunately, land zoning\nand historic under-investment have resulted in spatial inequality\nin walkability and social inequality among residents.\nWe tackle the problem of Walkability Optimization through\nthe lens of combinatorial optimization. The task is to select\nlocations in which additional amenities (e.g., grocery stores,\nschools, restaurants) can be allocated to improve resident access\nvia walking while taking into account existing amenities\nand providing multiple options (e.g., for restaurants).\nTo this end, we derive Mixed-Integer Linear Programming\n(MILP) and Constraint Programming (CP) models. Moreover,\nwe show that the problem\u2019s objective function is submodular\nin special cases, which motivates an efficient greedy\nheuristic. We conduct a case study on 31 underserved neighborhoods\nin the City of Toronto, Canada. MILP finds the\nbest solutions in most scenarios but does not scale well with\nnetwork size. The greedy algorithm scales well and finds\nhigh-quality solutions. Our empirical evaluation shows that\nneighbourhoods with low walkability have a great potential\nfor transformation into pedestrian-friendly neighbourhoods\nby strategically placing new amenities. Allocating 3 additional\ngrocery stores, schools, and restaurants can improve the\n\u201cWalkScore\u201d by more than 50 points (on a scale of 100) for 4\nneighbourhoods and reduce the walking distances to amenities\nfor 75% of all residential locations to 10 minutes for all\namenity types. Our code and paper appendix are available at\nhttps://github.com/khalil-research/walkability.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Huang, Weimin and Khalil, Elias B.}, year={2023}, month={Jun.}, pages={14249-14258} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26667/26439", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26667", + "pdf_size": 326928, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9330416981286163232&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "mail.utoronto.ca;mie.utoronto.ca", + "email": "mail.utoronto.ca;mie.utoronto.ca", + "github": "https://github.com/khalil-research/walkability", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Toronto", + "aff_unique_dep": "Department of Mechanical & Industrial Engineering", + "aff_unique_url": "https://www.utoronto.ca", + "aff_unique_abbr": "U of T", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Toronto", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Canada" + }, + { + "id": "article-26459", + "title": "Warm-Starting Nested Rollout Policy Adaptation with Optimal Stopping", + "track": "main", + "status": "Technical", + "abstract": "Nested Rollout Policy Adaptation (NRPA) is an approach using online learning policies in a nested structure. It has achieved a great result in a variety of difficult combinatorial optimization problems. In this paper, we propose Meta-NRPA, which combines optimal stopping theory with NRPA for warm-starting and significantly improves the performance of NRPA. We also present several exploratory techniques for NRPA which enable it to perform better exploration. We establish this for three notoriously difficult problems ranging from telecommunication, transportation and coding theory namely Minimum Congestion Shortest Path Routing, Traveling Salesman Problem with Time Windows and Snake-in-the-Box.\nWe also improve the lower bounds of the Snake-in-the-Box problem for multiple dimensions.", + "primary_area": "search and optimization", + "author": "Chen Dang; Cristina Bazgan; Tristan Cazenave; Morgan Chopin; Pierre-Henri Wuillemin", + "authorids": "", + "aff": "Orange Labs, Ch\u00e2tillon, France + Universit\u00e9 Paris-Dauphine, PSL Research University, CNRS, UMR 7243, LAMSADE, F-75016 Paris, France; Universit\u00e9 Paris-Dauphine, PSL Research University, CNRS, UMR 7243, LAMSADE, F-75016 Paris, France; Universit\u00e9 Paris-Dauphine, PSL Research University, CNRS, UMR 7243, LAMSADE, F-75016 Paris, France; Orange Labs, Ch\u00e2tillon, France; Sorbonne Universit\u00e9, CNRS, UMR 7606, LIP6, F-75005 Paris, France", + "bibtex": "@article{Dang_Bazgan_Cazenave_Chopin_Wuillemin_2023, title={Warm-Starting Nested Rollout Policy Adaptation with Optimal Stopping}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26459}, DOI={10.1609/aaai.v37i10.26459}, abstractNote={Nested Rollout Policy Adaptation (NRPA) is an approach using online learning policies in a nested structure. It has achieved a great result in a variety of difficult combinatorial optimization problems. In this paper, we propose Meta-NRPA, which combines optimal stopping theory with NRPA for warm-starting and significantly improves the performance of NRPA. We also present several exploratory techniques for NRPA which enable it to perform better exploration. We establish this for three notoriously difficult problems ranging from telecommunication, transportation and coding theory namely Minimum Congestion Shortest Path Routing, Traveling Salesman Problem with Time Windows and Snake-in-the-Box.\nWe also improve the lower bounds of the Snake-in-the-Box problem for multiple dimensions.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Dang, Chen and Bazgan, Cristina and Cazenave, Tristan and Chopin, Morgan and Wuillemin, Pierre-Henri}, year={2023}, month={Jun.}, pages={12381-12389} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26459/26231", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26459", + "pdf_size": 403058, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11271681445000689936&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "orange.com;dauphine.psl.eu;dauphine.psl.eu;orange.com;lip6.fr", + "email": "orange.com;dauphine.psl.eu;dauphine.psl.eu;orange.com;lip6.fr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;0;2", + "aff_unique_norm": "Orange Labs;Universit\u00e9 Paris-Dauphine;Sorbonne Universit\u00e9", + "aff_unique_dep": ";LAMSADE;LIP6", + "aff_unique_url": "https://www.orangelabs.com;https://www.univ-paris-dauphine.fr;https://www.sorbonne-universite.fr", + "aff_unique_abbr": ";UPD;Sorbonne U", + "aff_campus_unique_index": "0+1;1;1;0;1", + "aff_campus_unique": "Ch\u00e2tillon;Paris", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "article-26419", + "title": "Was Fixing This Really That Hard? On the Complexity of Correcting HTN Domains", + "track": "main", + "status": "Technical", + "abstract": "Automated modeling assistance is indispensable to the AI planning being deployed in practice, notably in industry and other non-academic contexts. Yet, little progress has been made that goes beyond smart interfaces like programming environments. They focus on autocompletion, but lack intelligent support for guiding the modeler. As a theoretical foundation of a first step towards this direction, we study the computational complexity of correcting a flawed Hierarchical Task Network (HTN) planning domain. Specifically, a modeler provides a (white) list of plans that are supposed to be solutions, and likewise a (black) list of plans that shall not be solutions. We investigate the complexity of finding a set of (optimal or suboptimal) model corrections so that those plans are (resp. not) solutions to the corrected model. More specifically, we factor out each hardness source that contributes towards NP-hardness, including one that we deem important for many other complexity investigations that go beyond our specific context of application. All complexities range between NP and Sigma-2-p, rising the hope for efficient practical tools in the future.", + "primary_area": "planning routing and scheduling", + "author": "Songtuan Lin; Pascal Bercher", + "authorids": "", + "aff": "School of Computing, The Australian National University; School of Computing, The Australian National University", + "bibtex": "@article{Lin_Bercher_2023, title={Was Fixing This Really That Hard? On the Complexity of Correcting HTN Domains}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26419}, DOI={10.1609/aaai.v37i10.26419}, abstractNote={Automated modeling assistance is indispensable to the AI planning being deployed in practice, notably in industry and other non-academic contexts. Yet, little progress has been made that goes beyond smart interfaces like programming environments. They focus on autocompletion, but lack intelligent support for guiding the modeler. As a theoretical foundation of a first step towards this direction, we study the computational complexity of correcting a flawed Hierarchical Task Network (HTN) planning domain. Specifically, a modeler provides a (white) list of plans that are supposed to be solutions, and likewise a (black) list of plans that shall not be solutions. We investigate the complexity of finding a set of (optimal or suboptimal) model corrections so that those plans are (resp. not) solutions to the corrected model. More specifically, we factor out each hardness source that contributes towards NP-hardness, including one that we deem important for many other complexity investigations that go beyond our specific context of application. All complexities range between NP and Sigma-2-p, rising the hope for efficient practical tools in the future.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Songtuan and Bercher, Pascal}, year={2023}, month={Jun.}, pages={12032-12040} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26419/26191", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26419", + "pdf_size": 184382, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13107636129578021400&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "anu.edu.au;anu.edu.au", + "email": "anu.edu.au;anu.edu.au", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Australian National University", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.anu.edu.au", + "aff_unique_abbr": "ANU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26056", + "title": "Wasserstein Actor-Critic: Directed Exploration via Optimism for Continuous-Actions Control", + "track": "main", + "status": "Technical", + "abstract": "Uncertainty quantification has been extensively used as a means to achieve efficient directed exploration in Reinforcement Learning (RL). However, state-of-the-art methods for continuous actions still suffer from high sample complexity requirements. Indeed, they either completely lack strategies for propagating the epistemic uncertainty throughout the updates, or they mix it with aleatoric uncertainty while learning the full return distribution (e.g., distributional RL). In this paper, we propose Wasserstein Actor-Critic (WAC), an actor-critic architecture inspired by the recent Wasserstein Q-Learning (WQL), that employs approximate Q-posteriors to represent the epistemic uncertainty and Wasserstein barycenters for uncertainty propagation across the state-action space. WAC enforces exploration in a principled way by guiding the policy learning process with the optimization of an upper bound of the Q-value estimates. Furthermore, we study some peculiar issues that arise when using function approximation, coupled with the uncertainty estimation, and propose a regularized loss for the uncertainty estimation. Finally, we evaluate our algorithm on standard MujoCo tasks as well as suite of continuous-actions domains, where exploration is crucial, in comparison with state-of-the-art baselines. Additional details and results can be found in the supplementary material with our Arxiv preprint.", + "primary_area": "machine learning ii", + "author": "Amarildo Likmeta; Matteo Sacco; Alberto Maria Metelli; Marcello Restelli", + "authorids": "", + "aff": "University of Bologna + Politecnico di Milano; Politecnico di Milano; Politecnico di Milano; Politecnico di Milano", + "bibtex": "@article{Likmeta_Sacco_Metelli_Restelli_2023, title={Wasserstein Actor-Critic: Directed Exploration via Optimism for Continuous-Actions Control}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26056}, DOI={10.1609/aaai.v37i7.26056}, abstractNote={Uncertainty quantification has been extensively used as a means to achieve efficient directed exploration in Reinforcement Learning (RL). However, state-of-the-art methods for continuous actions still suffer from high sample complexity requirements. Indeed, they either completely lack strategies for propagating the epistemic uncertainty throughout the updates, or they mix it with aleatoric uncertainty while learning the full return distribution (e.g., distributional RL). In this paper, we propose Wasserstein Actor-Critic (WAC), an actor-critic architecture inspired by the recent Wasserstein Q-Learning (WQL), that employs approximate Q-posteriors to represent the epistemic uncertainty and Wasserstein barycenters for uncertainty propagation across the state-action space. WAC enforces exploration in a principled way by guiding the policy learning process with the optimization of an upper bound of the Q-value estimates. Furthermore, we study some peculiar issues that arise when using function approximation, coupled with the uncertainty estimation, and propose a regularized loss for the uncertainty estimation. Finally, we evaluate our algorithm on standard MujoCo tasks as well as suite of continuous-actions domains, where exploration is crucial, in comparison with state-of-the-art baselines. Additional details and results can be found in the supplementary material with our Arxiv preprint.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Likmeta, Amarildo and Sacco, Matteo and Metelli, Alberto Maria and Restelli, Marcello}, year={2023}, month={Jun.}, pages={8782-8790} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26056/25828", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26056", + "pdf_size": 5850217, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4692453057734834166&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff_domain": "unibo.it;mail.polimi.it;polimi.it;polimi.it", + "email": "unibo.it;mail.polimi.it;polimi.it;polimi.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;1", + "aff_unique_norm": "University of Bologna;Politecnico di Milano", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unibo.it;https://www.polimi.it", + "aff_unique_abbr": "Unibo;Polimi", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "article-25916", + "title": "Wasserstein Graph Distance Based on L1\u2013Approximated Tree Edit Distance between Weisfeiler\u2013Lehman Subtrees", + "track": "main", + "status": "Technical", + "abstract": "The Weisfeiler-Lehman (WL) test is a widely used algorithm in graph machine learning, including graph kernels, graph metrics, and graph neural networks. However, it focuses only on the consistency of the graph, which means that it is unable to detect slight structural differences. Consequently, this limits its ability to capture structural information, which also limits the performance of existing models that rely on the WL test. This limitation is particularly severe for traditional metrics defined by the WL test, which cannot precisely capture slight structural differences. In this paper, we propose a novel graph metric called the Wasserstein WL Subtree (WWLS) distance to address this problem. Our approach leverages the WL subtree as structural information for node neighborhoods and defines node metrics using the L1-approximated tree edit distance (L1-TED) between WL subtrees of nodes. Subsequently, we combine the Wasserstein distance and the L1-TED to define the WWLS distance, which can capture slight structural differences that may be difficult to detect using conventional metrics. We demonstrate that the proposed WWLS distance outperforms baselines in both metric validation and graph classification experiments.", + "primary_area": "machine learning i", + "author": "Zhongxi Fang; Jianming Huang; Xun Su; Hiroyuki Kasai", + "authorids": "", + "aff": "Department of Computer Science and Communications Engineering, FSE Graduate School, Waseda University; Department of Computer Science and Communications Engineering, FSE Graduate School, Waseda University; Department of Computer Science and Communications Engineering, FSE Graduate School, Waseda University; Department of Computer Science and Communications Engineering, FSE Graduate School, Waseda University + Department of Communications and Computer Engineering, FSE School, Waseda University", + "bibtex": "@article{Fang_Huang_Su_Kasai_2023, title={Wasserstein Graph Distance Based on L1\u2013Approximated Tree Edit Distance between Weisfeiler\u2013Lehman Subtrees}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25916}, DOI={10.1609/aaai.v37i6.25916}, abstractNote={The Weisfeiler-Lehman (WL) test is a widely used algorithm in graph machine learning, including graph kernels, graph metrics, and graph neural networks. However, it focuses only on the consistency of the graph, which means that it is unable to detect slight structural differences. Consequently, this limits its ability to capture structural information, which also limits the performance of existing models that rely on the WL test. This limitation is particularly severe for traditional metrics defined by the WL test, which cannot precisely capture slight structural differences. In this paper, we propose a novel graph metric called the Wasserstein WL Subtree (WWLS) distance to address this problem. Our approach leverages the WL subtree as structural information for node neighborhoods and defines node metrics using the L1-approximated tree edit distance (L1-TED) between WL subtrees of nodes. Subsequently, we combine the Wasserstein distance and the L1-TED to define the WWLS distance, which can capture slight structural differences that may be difficult to detect using conventional metrics. We demonstrate that the proposed WWLS distance outperforms baselines in both metric validation and graph classification experiments.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Zhongxi and Huang, Jianming and Su, Xun and Kasai, Hiroyuki}, year={2023}, month={Jun.}, pages={7539-7549} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25916/25688", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25916", + "pdf_size": 324892, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11084414213766747138&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 10, + "aff_domain": "akane.waseda.jp;toki.waseda.jp;asagi.waseda.jp;waseda.jp", + "email": "akane.waseda.jp;toki.waseda.jp;asagi.waseda.jp;waseda.jp", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0+0", + "aff_unique_norm": "Waseda University", + "aff_unique_dep": "Department of Computer Science and Communications Engineering", + "aff_unique_url": "https://www.waseda.jp/top", + "aff_unique_abbr": "Waseda", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26276", + "title": "WaveForM: Graph Enhanced Wavelet Learning for Long Sequence Forecasting of Multivariate Time Series", + "track": "main", + "status": "Technical", + "abstract": "Multivariate time series (MTS) analysis and forecasting are crucial in many real-world applications, such as smart traffic management and weather forecasting. However, most existing work either focuses on short sequence forecasting or makes predictions predominantly with time domain features, which is not effective at removing noises with irregular frequencies in MTS. Therefore, we propose WaveForM, an end-to-end graph enhanced Wavelet learning framework for long sequence FORecasting of MTS. WaveForM first utilizes Discrete Wavelet Transform (DWT) to represent MTS in the wavelet domain, which captures both frequency and time domain features with a sound theoretical basis. To enable the effective learning in the wavelet domain, we further propose a graph constructor, which learns a global graph to represent the relationships between MTS variables, and graph-enhanced prediction modules, which utilize dilated convolution and graph convolution to capture the correlations between time series and predict the wavelet coefficients at different levels. Extensive experiments on five real-world forecasting datasets show that our model can achieve considerable performance improvement over different prediction lengths against the most competitive baseline of each dataset.", + "primary_area": "machine learning iv", + "author": "Fuhao Yang; Xin Li; Min Wang; Hongyu Zang; Wei Pang; Mingzhong Wang", + "authorids": "", + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Beijing Institute of Technology; Beijing Institute of Technology; Heriot-Watt University; The University of the Sunshine Coast", + "bibtex": "@article{Yang_Li_Wang_Zang_Pang_Wang_2023, title={WaveForM: Graph Enhanced Wavelet Learning for Long Sequence Forecasting of Multivariate Time Series}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26276}, DOI={10.1609/aaai.v37i9.26276}, abstractNote={Multivariate time series (MTS) analysis and forecasting are crucial in many real-world applications, such as smart traffic management and weather forecasting. However, most existing work either focuses on short sequence forecasting or makes predictions predominantly with time domain features, which is not effective at removing noises with irregular frequencies in MTS. Therefore, we propose WaveForM, an end-to-end graph enhanced Wavelet learning framework for long sequence FORecasting of MTS. WaveForM first utilizes Discrete Wavelet Transform (DWT) to represent MTS in the wavelet domain, which captures both frequency and time domain features with a sound theoretical basis. To enable the effective learning in the wavelet domain, we further propose a graph constructor, which learns a global graph to represent the relationships between MTS variables, and graph-enhanced prediction modules, which utilize dilated convolution and graph convolution to capture the correlations between time series and predict the wavelet coefficients at different levels. Extensive experiments on five real-world forecasting datasets show that our model can achieve considerable performance improvement over different prediction lengths against the most competitive baseline of each dataset.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Fuhao and Li, Xin and Wang, Min and Zang, Hongyu and Pang, Wei and Wang, Mingzhong}, year={2023}, month={Jun.}, pages={10754-10761} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26276/26048", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26276", + "pdf_size": 563051, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11221709152598939623&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn;hw.ac.uk;usc.edu.au", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;bit.edu.cn;hw.ac.uk;usc.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "Beijing Institute of Technology;Heriot-Watt University;University of the Sunshine Coast", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.hw.ac.uk;https://www.usc.edu.au", + "aff_unique_abbr": "BIT;HWU;USC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;2", + "aff_country_unique": "China;United Kingdom;Australia" + }, + { + "id": "article-25120", + "title": "Weakly Supervised 3D Multi-Person Pose Estimation for Large-Scale Scenes Based on Monocular Camera and Single LiDAR", + "track": "main", + "status": "Technical", + "abstract": "Depth estimation is usually ill-posed and ambiguous for monocular camera-based 3D multi-person pose estimation. Since LiDAR can capture accurate depth information in long-range scenes, it can benefit both the global localization of individuals and the 3D pose estimation by providing rich geometry features. Motivated by this, we propose a monocular camera and single LiDAR-based method for 3D multi-person pose estimation in large-scale scenes, which is easy to deploy and insensitive to light. Specifically, we design an effective fusion strategy to take advantage of multi-modal input data, including images and point cloud, and make full use of temporal information to guide the network to learn natural and coherent human motions. Without relying on any 3D pose annotations, our method exploits the inherent geometry constraints of point cloud for self-supervision and utilizes 2D keypoints on images for weak supervision. Extensive experiments on public datasets and our newly collected dataset demonstrate the superiority and generalization capability of our proposed method. Project homepage is at \\url{https://github.com/4DVLab/FusionPose.git}.", + "primary_area": "computer vision i", + "author": "Peishan Cong; Yiteng Xu; Yiming Ren; Juze Zhang; Lan Xu; Jingya Wang; Jingyi Yu; Yuexin Ma", + "authorids": "", + "aff": "ShanghaiTech University; ShanghaiTech University; ShanghaiTech University; ShanghaiTech University; ShanghaiTech University+Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University+Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University+Shanghai Engineering Research Center of Intelligent Vision and Imaging; ShanghaiTech University+Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "bibtex": "@article{Cong_Xu_Ren_Zhang_Xu_Wang_Yu_Ma_2023, title={Weakly Supervised 3D Multi-Person Pose Estimation for Large-Scale Scenes Based on Monocular Camera and Single LiDAR}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25120}, DOI={10.1609/aaai.v37i1.25120}, abstractNote={Depth estimation is usually ill-posed and ambiguous for monocular camera-based 3D multi-person pose estimation. Since LiDAR can capture accurate depth information in long-range scenes, it can benefit both the global localization of individuals and the 3D pose estimation by providing rich geometry features. Motivated by this, we propose a monocular camera and single LiDAR-based method for 3D multi-person pose estimation in large-scale scenes, which is easy to deploy and insensitive to light. Specifically, we design an effective fusion strategy to take advantage of multi-modal input data, including images and point cloud, and make full use of temporal information to guide the network to learn natural and coherent human motions. Without relying on any 3D pose annotations, our method exploits the inherent geometry constraints of point cloud for self-supervision and utilizes 2D keypoints on images for weak supervision. Extensive experiments on public datasets and our newly collected dataset demonstrate the superiority and generalization capability of our proposed method. Project homepage is at \\url{https://github.com/4DVLab/FusionPose.git}.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cong, Peishan and Xu, Yiteng and Ren, Yiming and Zhang, Juze and Xu, Lan and Wang, Jingya and Yu, Jingyi and Ma, Yuexin}, year={2023}, month={Jun.}, pages={461-469} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25120/24892", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25120", + "pdf_size": 8519344, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15244438110275765902&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/4DVLab/FusionPose.git", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0+1;0+1;0+1;0+1", + "aff_unique_norm": "ShanghaiTech University;Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.shanghaitech.edu.cn;", + "aff_unique_abbr": "ShanghaiTech;", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25205", + "title": "Weakly Supervised 3D Segmentation via Receptive-Driven Pseudo Label Consistency and Structural Consistency", + "track": "main", + "status": "Technical", + "abstract": "As manual point-wise label is time and labor-intensive for fully supervised large-scale point cloud semantic segmentation, weakly supervised method is increasingly active. However, existing methods fail to generate high-quality pseudo labels effectively, leading to unsatisfactory results. In this paper, we propose a weakly supervised point cloud semantic segmentation framework via receptive-driven pseudo label consistency and structural consistency to mine potential knowledge. Specifically, we propose three consistency contrains: pseudo label consistency among different scales, semantic structure consistency between intra-class features and class-level relation structure consistency between pair-wise categories. Three consistency constraints are jointly used to effectively prepares and utilizes pseudo labels simultaneously for stable training. Finally, extensive experimental results on three challenging datasets demonstrate that our method significantly outperforms state-of-the-art weakly supervised methods and even achieves comparable performance to the fully supervised methods.", + "primary_area": "computer vision i", + "author": "Yuxiang Lan; Yachao Zhang; Yanyun Qu; Cong Wang; Chengyang Li; Jia Cai; Yuan Xie; Zongze Wu", + "authorids": "", + "aff": "School of Informatics, Xiamen University, Fujian, China; School of Informatics, Xiamen University, Fujian, China; School of Informatics, Xiamen University, Fujian, China; Huawei Technologies, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Mechatronics and Control Engineering, Shenzhen University, Guangdong, China", + "bibtex": "@article{Lan_Zhang_Qu_Wang_Li_Cai_Xie_Wu_2023, title={Weakly Supervised 3D Segmentation via Receptive-Driven Pseudo Label Consistency and Structural Consistency}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25205}, DOI={10.1609/aaai.v37i1.25205}, abstractNote={As manual point-wise label is time and labor-intensive for fully supervised large-scale point cloud semantic segmentation, weakly supervised method is increasingly active. However, existing methods fail to generate high-quality pseudo labels effectively, leading to unsatisfactory results. In this paper, we propose a weakly supervised point cloud semantic segmentation framework via receptive-driven pseudo label consistency and structural consistency to mine potential knowledge. Specifically, we propose three consistency contrains: pseudo label consistency among different scales, semantic structure consistency between intra-class features and class-level relation structure consistency between pair-wise categories. Three consistency constraints are jointly used to effectively prepares and utilizes pseudo labels simultaneously for stable training. Finally, extensive experimental results on three challenging datasets demonstrate that our method significantly outperforms state-of-the-art weakly supervised methods and even achieves comparable performance to the fully supervised methods.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lan, Yuxiang and Zhang, Yachao and Qu, Yanyun and Wang, Cong and Li, Chengyang and Cai, Jia and Xie, Yuan and Wu, Zongze}, year={2023}, month={Jun.}, pages={1222-1230} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25205/24977", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25205", + "pdf_size": 2494631, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12069796713139968932&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn; ;cs.ecnu.edu.cn; ; ; ", + "email": "stu.xmu.edu.cn;stu.xmu.edu.cn;xmu.edu.cn; ;cs.ecnu.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2;2;2;3", + "aff_unique_norm": "Xiamen University;Huawei Technologies;East China Normal University;Shenzhen University", + "aff_unique_dep": "School of Informatics;;School of Computer Science and Technology;School of Mechatronics and Control Engineering", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.huawei.com;http://www.ecnu.edu.cn;https://www.szu.edu.cn", + "aff_unique_abbr": "XMU;Huawei;ECNU;SZU", + "aff_campus_unique_index": "0;0;0;1;1;1;1;2", + "aff_campus_unique": "Xiamen;Shanghai;Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25189", + "title": "Weakly-Guided Self-Supervised Pretraining for Temporal Activity Detection", + "track": "main", + "status": "Technical", + "abstract": "Temporal Activity Detection aims to predict activity classes per frame, in contrast to video-level predictions in Activity Classification (i.e., Activity Recognition). Due to the expensive frame-level annotations required for detection, the scale of detection datasets is limited. Thus, commonly, previous work on temporal activity detection resorts to fine-tuning a classification model pretrained on large-scale classification datasets (e.g., Kinetics-400). However, such pretrained models are not ideal for downstream detection, due to the disparity between the pretraining and the downstream fine-tuning tasks. In this work, we propose a novel weakly-guided self-supervised pretraining method for detection. We leverage weak labels (classification) to introduce a self-supervised pretext task (detection) by generating frame-level pseudo labels, multi-action frames, and action segments. Simply put, we design a detection task similar to downstream, on large-scale classification data, without extra annotations. We show that the models pretrained with the proposed weakly-guided self-supervised detection task outperform prior work on multiple challenging activity detection benchmarks, including Charades and MultiTHUMOS. Our extensive ablations further provide insights on when and how to use the proposed models for activity detection. Code is available at github.com/kkahatapitiya/SSDet.", + "primary_area": "computer vision i", + "author": "Kumara Kahatapitiya; Zhou Ren; Haoxiang Li; Zhenyu Wu; Michael S. Ryoo; Gang Hua", + "authorids": "", + "aff": "Stony Brook University; Wormpex AI Research; Wormpex AI Research; Wormpex AI Research; Stony Brook University; Wormpex AI Research", + "bibtex": "@article{Kahatapitiya_Ren_Li_Wu_Ryoo_Hua_2023, title={Weakly-Guided Self-Supervised Pretraining for Temporal Activity Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25189}, DOI={10.1609/aaai.v37i1.25189}, abstractNote={Temporal Activity Detection aims to predict activity classes per frame, in contrast to video-level predictions in Activity Classification (i.e., Activity Recognition). Due to the expensive frame-level annotations required for detection, the scale of detection datasets is limited. Thus, commonly, previous work on temporal activity detection resorts to fine-tuning a classification model pretrained on large-scale classification datasets (e.g., Kinetics-400). However, such pretrained models are not ideal for downstream detection, due to the disparity between the pretraining and the downstream fine-tuning tasks. In this work, we propose a novel weakly-guided self-supervised pretraining method for detection. We leverage weak labels (classification) to introduce a self-supervised pretext task (detection) by generating frame-level pseudo labels, multi-action frames, and action segments. Simply put, we design a detection task similar to downstream, on large-scale classification data, without extra annotations. We show that the models pretrained with the proposed weakly-guided self-supervised detection task outperform prior work on multiple challenging activity detection benchmarks, including Charades and MultiTHUMOS. Our extensive ablations further provide insights on when and how to use the proposed models for activity detection. Code is available at github.com/kkahatapitiya/SSDet.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kahatapitiya, Kumara and Ren, Zhou and Li, Haoxiang and Wu, Zhenyu and Ryoo, Michael S. and Hua, Gang}, year={2023}, month={Jun.}, pages={1078-1086} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25189/24961", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25189", + "pdf_size": 769654, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1386321260970226504&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "github.com/kkahatapitiya/SSDet", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;0;1", + "aff_unique_norm": "Stony Brook University;Wormpex AI Research", + "aff_unique_dep": ";AI Research", + "aff_unique_url": "https://www.stonybrook.edu;", + "aff_unique_abbr": "SBU;Wormpex AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25156", + "title": "Weakly-Supervised Camouflaged Object Detection with Scribble Annotations", + "track": "main", + "status": "Technical", + "abstract": "Existing camouflaged object detection (COD) methods rely heavily on large-scale datasets with pixel-wise annotations. However, due to the ambiguous boundary, annotating camouflage objects pixel-wisely is very time-consuming and labor-intensive, taking ~60mins to label one image. In this paper, we propose the first weakly-supervised COD method, using scribble annotations as supervision. To achieve this, we first relabel 4,040 images in existing camouflaged object datasets with scribbles, which takes ~10s to label one image. As scribble annotations only describe the primary structure of objects without details, for the network to learn to localize the boundaries of camouflaged objects, we propose a novel consistency loss composed of two parts: a cross-view loss to attain reliable consistency over different images, and an inside-view loss to maintain consistency inside a single prediction map. Besides, we observe that humans use semantic information to segment regions near the boundaries of camouflaged objects. Hence, we further propose a feature-guided loss, which includes visual features directly extracted from images and semantically significant features captured by the model. Finally, we propose a novel network for COD via scribble learning on structural information and semantic relations. Our network has two novel modules: the local-context contrasted (LCC) module, which mimics visual inhibition to enhance image contrast/sharpness and expand the scribbles into potential camouflaged regions, and the logical semantic relation (LSR) module, which analyzes the semantic relation to determine the regions representing the camouflaged object. Experimental results show that our model outperforms relevant SOTA methods on three COD benchmarks with an average improvement of 11.0% on MAE, 3.2% on S-measure, 2.5% on E-measure, and 4.4% on weighted F-measure.", + "primary_area": "computer vision i", + "author": "Ruozhen He; Qihua Dong; Jiaying Lin; Rynson W.H. Lau", + "authorids": "", + "aff": "Department of Computer Science, City University of Hong Kong; Department of Computer Science, City University of Hong Kong; Department of Computer Science, City University of Hong Kong; Department of Computer Science, City University of Hong Kong", + "bibtex": "@article{He_Dong_Lin_W.H. Lau_2023, title={Weakly-Supervised Camouflaged Object Detection with Scribble Annotations}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25156}, DOI={10.1609/aaai.v37i1.25156}, abstractNote={Existing camouflaged object detection (COD) methods rely heavily on large-scale datasets with pixel-wise annotations. However, due to the ambiguous boundary, annotating camouflage objects pixel-wisely is very time-consuming and labor-intensive, taking ~60mins to label one image. In this paper, we propose the first weakly-supervised COD method, using scribble annotations as supervision. To achieve this, we first relabel 4,040 images in existing camouflaged object datasets with scribbles, which takes ~10s to label one image. As scribble annotations only describe the primary structure of objects without details, for the network to learn to localize the boundaries of camouflaged objects, we propose a novel consistency loss composed of two parts: a cross-view loss to attain reliable consistency over different images, and an inside-view loss to maintain consistency inside a single prediction map. Besides, we observe that humans use semantic information to segment regions near the boundaries of camouflaged objects. Hence, we further propose a feature-guided loss, which includes visual features directly extracted from images and semantically significant features captured by the model. Finally, we propose a novel network for COD via scribble learning on structural information and semantic relations. Our network has two novel modules: the local-context contrasted (LCC) module, which mimics visual inhibition to enhance image contrast/sharpness and expand the scribbles into potential camouflaged regions, and the logical semantic relation (LSR) module, which analyzes the semantic relation to determine the regions representing the camouflaged object. Experimental results show that our model outperforms relevant SOTA methods on three COD benchmarks with an average improvement of 11.0% on MAE, 3.2% on S-measure, 2.5% on E-measure, and 4.4% on weighted F-measure.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Ruozhen and Dong, Qihua and Lin, Jiaying and W.H. Lau, Rynson}, year={2023}, month={Jun.}, pages={781-789} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25156/24928", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25156", + "pdf_size": 4992227, + "gs_citation": 90, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13451998347472584162&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "my.cityu.edu.hk;my.cityu.edu.hk;my.cityu.edu.hk;cityu.edu.hk", + "email": "my.cityu.edu.hk;my.cityu.edu.hk;my.cityu.edu.hk;cityu.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "City University of Hong Kong", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.cityu.edu.hk", + "aff_unique_abbr": "CityU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25136", + "title": "Weakly-Supervised Semantic Segmentation for Histopathology Images Based on Dataset Synthesis and Feature Consistency Constraint", + "track": "main", + "status": "Technical", + "abstract": "Tissue segmentation is a critical task in computational pathology due to its desirable ability to indicate the prognosis of cancer patients. Currently, numerous studies attempt to use image-level labels to achieve pixel-level segmentation to reduce the need for fine annotations. However, most of these methods are based on class activation map, which suffers from inaccurate segmentation boundaries. To address this problem, we propose a novel weakly-supervised tissue segmentation framework named PistoSeg, which is implemented under a fully-supervised manner by transferring tissue category labels to pixel-level masks. Firstly, a dataset synthesis method is proposed based on Mosaic transformation to generate synthesized images with pixel-level masks. Next, considering the difference between synthesized and real images, this paper devises an attention-based feature consistency, which directs the training process of a proposed pseudo-mask refining module. Finally, the refined pseudo-masks are used to train a precise segmentation model for testing. Experiments based on WSSS4LUAD and BCSS-WSSS validate that PistoSeg outperforms the state-of-the-art methods. The code is released at https://github.com/Vison307/PistoSeg.", + "primary_area": "computer vision i", + "author": "Zijie Fang; Yang Chen; Yifeng Wang; Zhi Wang; Xiangyang Ji; Yongbing Zhang", + "authorids": "", + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Harbin Institute of Technology (Shenzhen); Tsinghua Shenzhen International Graduate School, Tsinghua University; Department of Automation, Tsinghua University; Harbin Institute of Technology (Shenzhen)", + "bibtex": "@article{Fang_Chen_Wang_Wang_Ji_Zhang_2023, title={Weakly-Supervised Semantic Segmentation for Histopathology Images Based on Dataset Synthesis and Feature Consistency Constraint}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25136}, DOI={10.1609/aaai.v37i1.25136}, abstractNote={Tissue segmentation is a critical task in computational pathology due to its desirable ability to indicate the prognosis of cancer patients. Currently, numerous studies attempt to use image-level labels to achieve pixel-level segmentation to reduce the need for fine annotations. However, most of these methods are based on class activation map, which suffers from inaccurate segmentation boundaries. To address this problem, we propose a novel weakly-supervised tissue segmentation framework named PistoSeg, which is implemented under a fully-supervised manner by transferring tissue category labels to pixel-level masks. Firstly, a dataset synthesis method is proposed based on Mosaic transformation to generate synthesized images with pixel-level masks. Next, considering the difference between synthesized and real images, this paper devises an attention-based feature consistency, which directs the training process of a proposed pseudo-mask refining module. Finally, the refined pseudo-masks are used to train a precise segmentation model for testing. Experiments based on WSSS4LUAD and BCSS-WSSS validate that PistoSeg outperforms the state-of-the-art methods. The code is released at https://github.com/Vison307/PistoSeg.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fang, Zijie and Chen, Yang and Wang, Yifeng and Wang, Zhi and Ji, Xiangyang and Zhang, Yongbing}, year={2023}, month={Jun.}, pages={606-613} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25136/24908", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25136", + "pdf_size": 3913076, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12440766251830651096&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;mails.tsinghua.edu.cn;stu.hit.edu.cn;sz.tsinghua.edu.cn; ;hit.edu.cn", + "email": "gmail.com;mails.tsinghua.edu.cn;stu.hit.edu.cn;sz.tsinghua.edu.cn; ;hit.edu.cn", + "github": "https://github.com/Vison307/PistoSeg", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;1", + "aff_unique_norm": "Tsinghua University;Harbin Institute of Technology", + "aff_unique_dep": "International Graduate School;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://en.hhit.edu.cn/", + "aff_unique_abbr": "THU;HIT", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26696", + "title": "Weather2vec: Representation Learning for Causal Inference with Non-local Confounding in Air Pollution and Climate Studies", + "track": "aaai special track", + "status": "Technical", + "abstract": "Estimating the causal effects of a spatially-varying intervention on a spatially-varying outcome may be subject to non-local confounding (NLC), a phenomenon that can bias estimates when the treatments and outcomes of a given unit are dictated in part by the covariates of other nearby units. In particular, NLC is a challenge for evaluating the effects of environmental policies and climate events on health-related outcomes such as air pollution exposure. This paper first formalizes NLC using the potential outcomes framework, providing a comparison with the related phenomenon of causal interference. Then, it proposes a broadly applicable framework, termed weather2vec, that uses the theory of balancing scores to learn representations of non-local information into a scalar or vector defined for each observational unit, which is subsequently used to adjust for confounding in conjunction with causal inference methods. The framework is evaluated in a simulation study and two case studies on air pollution where the weather is an (inherently regional) known confounder.", + "primary_area": "ai for social impact", + "author": "Mauricio Tec; James G. Scott; Corwin M. Zigler", + "authorids": "", + "aff": "Department of Biostatistics, Harvard University; Department of Statistics and Data Sciences, The University of Texas at Austin+Department of Information, Risk, and Operations Management, The University of Texas at Austin; Department of Statistics and Data Sciences, The University of Texas at Austin", + "bibtex": "@article{Tec_Scott_Zigler_2023, title={Weather2vec: Representation Learning for Causal Inference with Non-local Confounding in Air Pollution and Climate Studies}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26696}, DOI={10.1609/aaai.v37i12.26696}, abstractNote={Estimating the causal effects of a spatially-varying intervention on a spatially-varying outcome may be subject to non-local confounding (NLC), a phenomenon that can bias estimates when the treatments and outcomes of a given unit are dictated in part by the covariates of other nearby units. In particular, NLC is a challenge for evaluating the effects of environmental policies and climate events on health-related outcomes such as air pollution exposure. This paper first formalizes NLC using the potential outcomes framework, providing a comparison with the related phenomenon of causal interference. Then, it proposes a broadly applicable framework, termed weather2vec, that uses the theory of balancing scores to learn representations of non-local information into a scalar or vector defined for each observational unit, which is subsequently used to adjust for confounding in conjunction with causal inference methods. The framework is evaluated in a simulation study and two case studies on air pollution where the weather is an (inherently regional) known confounder.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tec, Mauricio and Scott, James G. and Zigler, Corwin M.}, year={2023}, month={Jun.}, pages={14504-14513} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26696/26468", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26696", + "pdf_size": 616181, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6706534854040859134&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff_domain": "hsph.harvard.edu;mccombs.utexas.edu;austin.utexas.edu", + "email": "hsph.harvard.edu;mccombs.utexas.edu;austin.utexas.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+1;1", + "aff_unique_norm": "Harvard University;The University of Texas at Austin", + "aff_unique_dep": "Department of Biostatistics;Department of Statistics and Data Sciences", + "aff_unique_url": "https://www.harvard.edu;https://www.utexas.edu", + "aff_unique_abbr": "Harvard;UT Austin", + "aff_campus_unique_index": "0;1+1;1", + "aff_campus_unique": "Cambridge;Austin", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26090", + "title": "Weight Predictor Network with Feature Selection for Small Sample Tabular Biomedical Data", + "track": "main", + "status": "Technical", + "abstract": "Tabular biomedical data is often high-dimensional but with a very small number of samples. Although recent work showed that well-regularised simple neural networks could outperform more sophisticated architectures on tabular data, they are still prone to overfitting on tiny datasets with many potentially irrelevant features. To combat these issues, we propose Weight Predictor Network with Feature Selection (WPFS) for learning neural networks from high-dimensional and small sample data by reducing the number of learnable parameters and simultaneously performing feature selection. In addition to the classification network, WPFS uses two small auxiliary networks that together output the weights of the first layer of the classification model. We evaluate on nine real-world biomedical datasets and demonstrate that WPFS outperforms other standard as well as more recent methods typically applied to tabular data. Furthermore, we investigate the proposed feature selection mechanism and show that it improves performance while providing useful insights into the learning task.", + "primary_area": "machine learning iii", + "author": "Andrei Margeloiu; Nikola Simidjievski; Pietro Li\u00f2; Mateja Jamnik", + "authorids": "", + "aff": "Department of Computer Science and Technology, University of Cambridge, UK; Department of Computer Science and Technology, University of Cambridge, UK; Department of Computer Science and Technology, University of Cambridge, UK; Department of Computer Science and Technology, University of Cambridge, UK", + "bibtex": "@article{Margeloiu_Simidjievski_Li\u00f2_Jamnik_2023, title={Weight Predictor Network with Feature Selection for Small Sample Tabular Biomedical Data}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26090}, DOI={10.1609/aaai.v37i8.26090}, abstractNote={Tabular biomedical data is often high-dimensional but with a very small number of samples. Although recent work showed that well-regularised simple neural networks could outperform more sophisticated architectures on tabular data, they are still prone to overfitting on tiny datasets with many potentially irrelevant features. To combat these issues, we propose Weight Predictor Network with Feature Selection (WPFS) for learning neural networks from high-dimensional and small sample data by reducing the number of learnable parameters and simultaneously performing feature selection. In addition to the classification network, WPFS uses two small auxiliary networks that together output the weights of the first layer of the classification model. We evaluate on nine real-world biomedical datasets and demonstrate that WPFS outperforms other standard as well as more recent methods typically applied to tabular data. Furthermore, we investigate the proposed feature selection mechanism and show that it improves performance while providing useful insights into the learning task.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Margeloiu, Andrei and Simidjievski, Nikola and Li\u00f2, Pietro and Jamnik, Mateja}, year={2023}, month={Jun.}, pages={9081-9089} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26090/25862", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26090", + "pdf_size": 582610, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14725003980711557573&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "github": "https://github.com/andreimargeloiu/WPFS", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Department of Computer Science and Technology", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "article-26130", + "title": "Weighted Policy Constraints for Offline Reinforcement Learning", + "track": "main", + "status": "Technical", + "abstract": "Offline reinforcement learning (RL) aims to learn policy from the passively collected offline dataset. Applying existing RL methods on the static dataset straightforwardly will raise distribution shift, causing these unconstrained RL methods to fail. To cope with the distribution shift problem, a common practice in offline RL is to constrain the policy explicitly or implicitly close to behavioral policy. However, the available dataset usually contains sub-optimal or inferior actions, constraining the policy near all these actions will make the policy inevitably learn inferior behaviors, limiting the performance of the algorithm. Based on this observation, we propose a weighted policy constraints (wPC) method that only constrains the learned policy to desirable behaviors, making room for policy improvement on other parts. Our algorithm outperforms existing state-of-the-art offline RL algorithms on the D4RL offline gym datasets. Moreover, the proposed algorithm is simple to implement with few hyper-parameters, making the proposed wPC algorithm a robust offline RL method with low computational complexity.", + "primary_area": "machine learning iii", + "author": "Zhiyong Peng; Changlin Han; Yadong Liu; Zongtan Zhou", + "authorids": "", + "aff": "College of Intelligence Science and Technology, National University of Defense Technology, Changsha, China; College of Intelligence Science and Technology, National University of Defense Technology, Changsha, China; College of Intelligence Science and Technology, National University of Defense Technology, Changsha, China; College of Intelligence Science and Technology, National University of Defense Technology, Changsha, China", + "bibtex": "@article{Peng_Han_Liu_Zhou_2023, title={Weighted Policy Constraints for Offline Reinforcement Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26130}, DOI={10.1609/aaai.v37i8.26130}, abstractNote={Offline reinforcement learning (RL) aims to learn policy from the passively collected offline dataset. Applying existing RL methods on the static dataset straightforwardly will raise distribution shift, causing these unconstrained RL methods to fail. To cope with the distribution shift problem, a common practice in offline RL is to constrain the policy explicitly or implicitly close to behavioral policy. However, the available dataset usually contains sub-optimal or inferior actions, constraining the policy near all these actions will make the policy inevitably learn inferior behaviors, limiting the performance of the algorithm. Based on this observation, we propose a weighted policy constraints (wPC) method that only constrains the learned policy to desirable behaviors, making room for policy improvement on other parts. Our algorithm outperforms existing state-of-the-art offline RL algorithms on the D4RL offline gym datasets. Moreover, the proposed algorithm is simple to implement with few hyper-parameters, making the proposed wPC algorithm a robust offline RL method with low computational complexity.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Peng, Zhiyong and Han, Changlin and Liu, Yadong and Zhou, Zongtan}, year={2023}, month={Jun.}, pages={9435-9443} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26130/25902", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26130", + "pdf_size": 2242674, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15842028291224457048&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff_domain": "qq.com;foxmail.com;nudt.edu.cn;163.com", + "email": "qq.com;foxmail.com;nudt.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "National University of Defense Technology", + "aff_unique_dep": "College of Intelligence Science and Technology", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Changsha", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26166", + "title": "What Do You MEME? Generating Explanations for Visual Semantic Role Labelling in Memes", + "track": "main", + "status": "Technical", + "abstract": "Memes are powerful means for effective communication on social media. Their effortless amalgamation of viral visuals and compelling messages can have far-reaching implications with proper marketing. Previous research on memes has primarily focused on characterizing their affective spectrum and detecting whether the meme's message insinuates any intended harm, such as hate, offense, racism, etc. However, memes often use abstraction, which can be elusive. Here, we introduce a novel task - EXCLAIM, generating explanations for visual semantic role labeling in memes. To this end, we curate ExHVV, a novel dataset that offers natural language explanations of connotative roles for three types of entities - heroes, villains, and victims, encompassing 4,680 entities present in 3K memes. We also benchmark ExHVV with several strong unimodal and multimodal baselines. Moreover, we posit LUMEN, a novel multimodal, multi-task learning framework that endeavors to address EXCLAIM optimally by jointly learning to predict the correct semantic roles and correspondingly to generate suitable natural language explanations. LUMEN distinctly outperforms the best baseline across 18 standard natural language generation evaluation metrics. Our systematic evaluation and analyses demonstrate that characteristic multimodal cues required for adjudicating semantic roles are also helpful for generating suitable explanations.", + "primary_area": "machine learning iii", + "author": "Shivam Sharma; Siddhant Agarwal; Tharun Suresh; Preslav Nakov; Md. Shad Akhtar; Tanmoy Chakraborty", + "authorids": "", + "aff": "Indraprastha Institute of Information Technology Delhi, India+Wipro AI Labs (Lab45), India; Indraprastha Institute of Information Technology Delhi, India; Indraprastha Institute of Information Technology Delhi, India; Mohamed bin Zayed University of Artificial Intelligence, UAE; Indraprastha Institute of Information Technology Delhi, India; Indian Institute of Technology Delhi, India", + "bibtex": "@article{Sharma_Agarwal_Suresh_Nakov_Akhtar_Chakraborty_2023, title={What Do You MEME? Generating Explanations for Visual Semantic Role Labelling in Memes}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26166}, DOI={10.1609/aaai.v37i8.26166}, abstractNote={Memes are powerful means for effective communication on social media. Their effortless amalgamation of viral visuals and compelling messages can have far-reaching implications with proper marketing. Previous research on memes has primarily focused on characterizing their affective spectrum and detecting whether the meme\u2019s message insinuates any intended harm, such as hate, offense, racism, etc. However, memes often use abstraction, which can be elusive. Here, we introduce a novel task - EXCLAIM, generating explanations for visual semantic role labeling in memes. To this end, we curate ExHVV, a novel dataset that offers natural language explanations of connotative roles for three types of entities - heroes, villains, and victims, encompassing 4,680 entities present in 3K memes. We also benchmark ExHVV with several strong unimodal and multimodal baselines. Moreover, we posit LUMEN, a novel multimodal, multi-task learning framework that endeavors to address EXCLAIM optimally by jointly learning to predict the correct semantic roles and correspondingly to generate suitable natural language explanations. LUMEN distinctly outperforms the best baseline across 18 standard natural language generation evaluation metrics. Our systematic evaluation and analyses demonstrate that characteristic multimodal cues required for adjudicating semantic roles are also helpful for generating suitable explanations.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Sharma, Shivam and Agarwal, Siddhant and Suresh, Tharun and Nakov, Preslav and Akhtar, Md. Shad and Chakraborty, Tanmoy}, year={2023}, month={Jun.}, pages={9763-9771} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26166/25938", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26166", + "pdf_size": 1698147, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7095886564453789013&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff_domain": "iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;mbzuai.ac.ae;iiitd.ac.in;iitd.ac.in", + "email": "iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;mbzuai.ac.ae;iiitd.ac.in;iitd.ac.in", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;2;0;3", + "aff_unique_norm": "Indraprastha Institute of Information Technology;Wipro;Mohamed bin Zayed University of Artificial Intelligence;Indian Institute of Technology Delhi", + "aff_unique_dep": ";Wipro AI Labs;;", + "aff_unique_url": "https://www.iiitd.ac.in;https://www.wipro.com/;https://mbzuai.ac.ae;https://www.iitdelhi.ac.in", + "aff_unique_abbr": "IIIT Delhi;Wipro;MBZUAI;IIT Delhi", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Delhi;", + "aff_country_unique_index": "0+0;0;0;1;0;0", + "aff_country_unique": "India;United Arab Emirates" + }, + { + "id": "article-26628", + "title": "What Does Your Face Sound Like? 3D Face Shape towards Voice", + "track": "main", + "status": "Technical", + "abstract": "Face-based speech synthesis provides a practical solution to generate voices from human faces. However, directly using 2D face images leads to the problems of uninterpretability and entanglement. In this paper, to address the issues, we introduce 3D face shape which (1) has an anatomical relationship between voice characteristics, partaking in the \"bone conduction\" of human timbre production, and (2) is naturally independent of irrelevant factors by excluding the blending process. We devise a three-stage framework to generate speech from 3D face shapes. Fully considering timbre production in anatomical and acquired terms, our framework incorporates three additional relevant attributes including face texture, facial features, and demographics. Experiments and subjective tests demonstrate our method can generate utterances matching faces well, with good audio quality and voice diversity. We also explore and visualize how the voice changes with the face. Case studies show that our method upgrades the face-voice inference to personalized custom-made voice creating, revealing a promising prospect in virtual human and dubbing applications.", + "primary_area": "speech natural language processing", + "author": "Zhihan Yang; Zhiyong Wu; Ying Shan; Jia Jia", + "authorids": "", + "aff": "Shenzhen International Graduate School, Tsinghua University; Shenzhen International Graduate School, Tsinghua University; Applied Research Center (ARC), Tencent PCG; Department of Computer Science and Technology, Tsinghua University", + "bibtex": "@article{Yang_Wu_Shan_Jia_2023, title={What Does Your Face Sound Like? 3D Face Shape towards Voice}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26628}, DOI={10.1609/aaai.v37i11.26628}, abstractNote={Face-based speech synthesis provides a practical solution to generate voices from human faces. However, directly using 2D face images leads to the problems of uninterpretability and entanglement. In this paper, to address the issues, we introduce 3D face shape which (1) has an anatomical relationship between voice characteristics, partaking in the "bone conduction" of human timbre production, and (2) is naturally independent of irrelevant factors by excluding the blending process. We devise a three-stage framework to generate speech from 3D face shapes. Fully considering timbre production in anatomical and acquired terms, our framework incorporates three additional relevant attributes including face texture, facial features, and demographics. Experiments and subjective tests demonstrate our method can generate utterances matching faces well, with good audio quality and voice diversity. We also explore and visualize how the voice changes with the face. Case studies show that our method upgrades the face-voice inference to personalized custom-made voice creating, revealing a promising prospect in virtual human and dubbing applications.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Zhihan and Wu, Zhiyong and Shan, Ying and Jia, Jia}, year={2023}, month={Jun.}, pages={13905-13913} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26628/26400", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26628", + "pdf_size": 2213264, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4435112683875324131&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "mails.tsinghua.edu.cn;sz.tsinghua.edu.cn; ;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;sz.tsinghua.edu.cn; ;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Tsinghua University;Tencent", + "aff_unique_dep": "Shenzhen International Graduate School;Applied Research Center (ARC)", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "THU;Tencent ARC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-25712", + "title": "When Congestion Games Meet Mobile Crowdsourcing: Selective Information Disclosure", + "track": "main", + "status": "Technical", + "abstract": "In congestion games, users make myopic routing decisions to jam each other, and the social planner with the full information designs mechanisms on information or payment side to regulate. However, it is difficult to obtain time-varying traffic conditions, and emerging crowdsourcing platforms (e.g., Waze and Google Maps) provide a convenient way for mobile users travelling on the paths to learn and share the traffic conditions over time. When congestion games meet mobile crowdsourcing, it is critical to incentive selfish users to change their myopic routing policy and reach the best exploitation-exploration trade-off. By considering a simple but fundamental parallel routing network with one deterministic path and multiple stochastic paths for atomic users, we prove that the myopic routing policy's price of anarchy (PoA) can be arbitrarily large as the discount factor approaches 1. To remedy such huge efficiency loss, we propose a selective information disclosure (SID) mechanism: we only reveal the latest traffic information to users when they intend to over-explore the stochastic paths, while hiding such information when they want to under-explore. We prove that our mechanism reduces PoA to less than 2. Besides the worst-case performance, we further examine our mechanism's average-case performance by using extensive simulations.", + "primary_area": "game theory and economic paradigms", + "author": "Hongbo Li; Lingjie Duan", + "authorids": "", + "aff": "Singapore University of Technology and Design; Singapore University of Technology and Design", + "bibtex": "@article{Li_Duan_2023, title={When Congestion Games Meet Mobile Crowdsourcing: Selective Information Disclosure}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25712}, DOI={10.1609/aaai.v37i5.25712}, abstractNote={In congestion games, users make myopic routing decisions to jam each other, and the social planner with the full information designs mechanisms on information or payment side to regulate. However, it is difficult to obtain time-varying traffic conditions, and emerging crowdsourcing platforms (e.g., Waze and Google Maps) provide a convenient way for mobile users travelling on the paths to learn and share the traffic conditions over time. When congestion games meet mobile crowdsourcing, it is critical to incentive selfish users to change their myopic routing policy and reach the best exploitation-exploration trade-off. By considering a simple but fundamental parallel routing network with one deterministic path and multiple stochastic paths for atomic users, we prove that the myopic routing policy\u2019s price of anarchy (PoA) can be arbitrarily large as the discount factor approaches 1. To remedy such huge efficiency loss, we propose a selective information disclosure (SID) mechanism: we only reveal the latest traffic information to users when they intend to over-explore the stochastic paths, while hiding such information when they want to under-explore. We prove that our mechanism reduces PoA to less than 2. Besides the worst-case performance, we further examine our mechanism\u2019s average-case performance by using extensive simulations.}, number={5}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Hongbo and Duan, Lingjie}, year={2023}, month={Jun.}, pages={5739-5746} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25712/25484", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25712", + "pdf_size": 1610481, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11715217099540013277&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "mymail.sutd.edu.sg;sutd.edu.sg", + "email": "mymail.sutd.edu.sg;sutd.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Singapore University of Technology and Design", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sutd.edu.sg", + "aff_unique_abbr": "SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "article-26328", + "title": "When Neural Networks Fail to Generalize? A Model Sensitivity Perspective", + "track": "main", + "status": "Technical", + "abstract": "Domain generalization (DG) aims to train a model to perform well in unseen domains under different distributions. This paper considers a more realistic yet more challenging scenario, namely Single Domain Generalization (Single-DG), where only a single source domain is available for training. To tackle this challenge, we first try to understand when neural networks fail to generalize? We empirically ascertain a property of a model that correlates strongly with its generalization that we coin as \"model sensitivity\". Based on our analysis, we propose a novel strategy of Spectral Adversarial Data Augmentation (SADA) to generate augmented images targeted at the highly sensitive frequencies. Models trained with these hard-to-learn samples can effectively suppress the sensitivity in the frequency space, which leads to improved generalization performance. Extensive experiments on multiple public datasets demonstrate the superiority of our approach, which surpasses the state-of-the-art single-DG methods by up to 2.55%. The source code is available at https://github.com/DIAL-RPI/Spectral-Adversarial-Data-Augmentation.", + "primary_area": "machine learning iv", + "author": "Jiajin Zhang; Hanqing Chao; Amit Dhurandhar; Pin-Yu Chen; Ali Tajer; Yangyang Xu; Pingkun Yan", + "authorids": "", + "aff": "Department of Biomedical Engineering and Center for Biotechnology and Interdisciplinary Studies, Rensselaer Polytechnic Institute, Troy, NY , USA; Department of Biomedical Engineering and Center for Biotechnology and Interdisciplinary Studies, Rensselaer Polytechnic Institute, Troy, NY , USA; IBM Thomas J. Watson Research Center, Yorktown Heights, NY , USA; IBM Thomas J. Watson Research Center, Yorktown Heights, NY , USA; Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY , USA; Department of Mathematical Sciences, Rensselaer Polytechnic Institute, Troy, NY , USA; Department of Biomedical Engineering and Center for Biotechnology and Interdisciplinary Studies, Rensselaer Polytechnic Institute, Troy, NY , USA", + "bibtex": "@article{Zhang_Chao_Dhurandhar_Chen_Tajer_Xu_Yan_2023, title={When Neural Networks Fail to Generalize? A Model Sensitivity Perspective}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26328}, DOI={10.1609/aaai.v37i9.26328}, abstractNote={Domain generalization (DG) aims to train a model to perform well in unseen domains under different distributions. This paper considers a more realistic yet more challenging scenario, namely Single Domain Generalization (Single-DG), where only a single source domain is available for training. To tackle this challenge, we first try to understand when neural networks fail to generalize? We empirically ascertain a property of a model that correlates strongly with its generalization that we coin as "model sensitivity". Based on our analysis, we propose a novel strategy of Spectral Adversarial Data Augmentation (SADA) to generate augmented images targeted at the highly sensitive frequencies. Models trained with these hard-to-learn samples can effectively suppress the sensitivity in the frequency space, which leads to improved generalization performance. Extensive experiments on multiple public datasets demonstrate the superiority of our approach, which surpasses the state-of-the-art single-DG methods by up to 2.55%. The source code is available at https://github.com/DIAL-RPI/Spectral-Adversarial-Data-Augmentation.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Jiajin and Chao, Hanqing and Dhurandhar, Amit and Chen, Pin-Yu and Tajer, Ali and Xu, Yangyang and Yan, Pingkun}, year={2023}, month={Jun.}, pages={11219-11227} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26328/26100", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26328", + "pdf_size": 1326641, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6708452097114198231&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff_domain": "; ; ; ; ; ; ", + "email": "; ; ; ; ; ; ", + "github": "https://github.com/DIAL-RPI/Spectral-Adversarial-Data-Augmentation", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;0;0;0", + "aff_unique_norm": "Rensselaer Polytechnic Institute;IBM Thomas J. Watson Research Center", + "aff_unique_dep": "Department of Biomedical Engineering;", + "aff_unique_url": "https://www.rpi.edu;https://www.ibm.com/research/watson", + "aff_unique_abbr": "RPI;IBM Watson", + "aff_campus_unique_index": "0;0;1;1;0;0;0", + "aff_campus_unique": "Troy;Yorktown Heights", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26029", + "title": "When Online Learning Meets ODE: Learning without Forgetting on Variable Feature Space", + "track": "main", + "status": "Technical", + "abstract": "Machine learning systems that built upon varying feature space are ubiquitous across the world. When the set of practical or virtual features changes, the online learning approach can adjust the learned model accordingly rather than re-training from scratch and has been an attractive area of research. Despite its importance, most studies for algorithms that are capable of handling online features have no ensurance of stationarity point convergence, while the accuracy guaranteed methods are still limited to some simple cases such as L_1 or L_2 norms with square loss. To address this challenging problem, we develop an efficient Dynamic Feature Learning System (DFLS) to perform online learning on the unfixed feature set for more general statistical models and demonstrate how DFLS opens up many new applications. We are the first to achieve accurate & reliable feature-wise online learning for a broad class of models like logistic regression, spline interpolation, group Lasso and Poisson regression. By utilizing DFLS, the updated model is theoretically the same as the model trained from scratch using the entire new feature space. Specifically, we reparameterize the feature-varying procedure and devise the corresponding ordinary differential equation (ODE) system to compute the optimal solutions of the new model status. Simulation studies reveal that the proposed DFLS can substantially ease the computational cost without forgetting.", + "primary_area": "machine learning ii", + "author": "Diyang Li; Bin Gu", + "authorids": "", + "aff": "Nanjing University of Information Science & Technology, P.R.China; Nanjing University of Information Science & Technology, P.R.China + MBZUAI, United Arab Emirates", + "bibtex": "@article{Li_Gu_2023, title={When Online Learning Meets ODE: Learning without Forgetting on Variable Feature Space}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26029}, DOI={10.1609/aaai.v37i7.26029}, abstractNote={Machine learning systems that built upon varying feature space are ubiquitous across the world. When the set of practical or virtual features changes, the online learning approach can adjust the learned model accordingly rather than re-training from scratch and has been an attractive area of research. Despite its importance, most studies for algorithms that are capable of handling online features have no ensurance of stationarity point convergence, while the accuracy guaranteed methods are still limited to some simple cases such as L_1 or L_2 norms with square loss. To address this challenging problem, we develop an efficient Dynamic Feature Learning System (DFLS) to perform online learning on the unfixed feature set for more general statistical models and demonstrate how DFLS opens up many new applications. We are the first to achieve accurate & reliable feature-wise online learning for a broad class of models like logistic regression, spline interpolation, group Lasso and Poisson regression. By utilizing DFLS, the updated model is theoretically the same as the model trained from scratch using the entire new feature space. Specifically, we reparameterize the feature-varying procedure and devise the corresponding ordinary differential equation (ODE) system to compute the optimal solutions of the new model status. Simulation studies reveal that the proposed DFLS can substantially ease the computational cost without forgetting.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Diyang and Gu, Bin}, year={2023}, month={Jun.}, pages={8545-8553} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26029/25801", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26029", + "pdf_size": 274358, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13931809886906920071&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "gmail.com;mbzuai.ac.ae", + "email": "gmail.com;mbzuai.ac.ae", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Nanjing University of Information Science & Technology;Mohamed Bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.nuist.edu.cn;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "NUIST;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "China;United Arab Emirates" + }, + { + "id": "article-25855", + "title": "Where Will Players Move Next? Dynamic Graphs and Hierarchical Fusion for Movement Forecasting in Badminton", + "track": "main", + "status": "Technical", + "abstract": "Sports analytics has captured increasing attention since analysis of the various data enables insights for training strategies, player evaluation, etc. In this paper, we focus on predicting what types of returning strokes will be made, and where players will move to based on previous strokes. As this problem has not been addressed to date, movement forecasting can be tackled through sequence-based and graph-based models by formulating as a sequence prediction task. However, existing sequence-based models neglect the effects of interactions between players, and graph-based models still suffer from multifaceted perspectives on the next movement. Moreover, there is no existing work on representing strategic relations among players' shot types and movements. To address these challenges, we first introduce the procedure of the Player Movements (PM) graph to exploit the structural movements of players with strategic relations. Based on the PM graph, we propose a novel Dynamic Graphs and Hierarchical Fusion for Movement Forecasting model (DyMF) with interaction style extractors to capture the mutual interactions of players themselves and between both players within a rally, and dynamic players' tactics across time. In addition, hierarchical fusion modules are designed to incorporate the style influence of both players and rally interactions. Extensive experiments show that our model empirically outperforms both sequence- and graph-based methods and demonstrate the practical usage of movement forecasting. Code is available at https://github.com/wywyWang/CoachAI-Projects/tree/main/Movement%20Forecasting.", + "primary_area": "machine learning i", + "author": "Kai-Shiang Chang; Wei-Yao Wang; Wen-Chih Peng", + "authorids": "", + "aff": "National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan; National Yang Ming Chiao Tung University, Hsinchu, Taiwan", + "bibtex": "@article{Chang_Wang_Peng_2023, title={Where Will Players Move Next? Dynamic Graphs and Hierarchical Fusion for Movement Forecasting in Badminton}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25855}, DOI={10.1609/aaai.v37i6.25855}, abstractNote={Sports analytics has captured increasing attention since analysis of the various data enables insights for training strategies, player evaluation, etc. In this paper, we focus on predicting what types of returning strokes will be made, and where players will move to based on previous strokes. As this problem has not been addressed to date, movement forecasting can be tackled through sequence-based and graph-based models by formulating as a sequence prediction task. However, existing sequence-based models neglect the effects of interactions between players, and graph-based models still suffer from multifaceted perspectives on the next movement. Moreover, there is no existing work on representing strategic relations among players\u2019 shot types and movements. To address these challenges, we first introduce the procedure of the Player Movements (PM) graph to exploit the structural movements of players with strategic relations. Based on the PM graph, we propose a novel Dynamic Graphs and Hierarchical Fusion for Movement Forecasting model (DyMF) with interaction style extractors to capture the mutual interactions of players themselves and between both players within a rally, and dynamic players\u2019 tactics across time. In addition, hierarchical fusion modules are designed to incorporate the style influence of both players and rally interactions. Extensive experiments show that our model empirically outperforms both sequence- and graph-based methods and demonstrate the practical usage of movement forecasting. Code is available at https://github.com/wywyWang/CoachAI-Projects/tree/main/Movement%20Forecasting.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chang, Kai-Shiang and Wang, Wei-Yao and Peng, Wen-Chih}, year={2023}, month={Jun.}, pages={6998-7005} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25855/25627", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25855", + "pdf_size": 7061507, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13786616645112914002&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "nctu.edu.tw;nctu.edu.tw;cs.nycu.edu.tw", + "email": "nctu.edu.tw;nctu.edu.tw;cs.nycu.edu.tw", + "github": "https://github.com/wywyWang/CoachAI-Projects/tree/main/Movement%20Forecasting", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National Yang Ming Chiao Tung University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nycu.edu.tw", + "aff_unique_abbr": "NYCU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hsinchu", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-26590", + "title": "Which Shortcut Solution Do Question Answering Models Prefer to Learn?", + "track": "main", + "status": "Technical", + "abstract": "Question answering (QA) models for reading comprehension tend to exploit spurious correlations in training sets and thus learn shortcut solutions rather than the solutions intended by QA datasets.\nQA models that have learned shortcut solutions can achieve human-level performance in shortcut examples where shortcuts are valid, but these same behaviors degrade generalization potential on anti-shortcut examples where shortcuts are invalid.\nVarious methods have been proposed to mitigate this problem, but they do not fully take the characteristics of shortcuts themselves into account.\nWe assume that the learnability of shortcuts, i.e., how easy it is to learn a shortcut, is useful to mitigate the problem.\nThus, we first examine the learnability of the representative shortcuts on extractive and multiple-choice QA datasets.\nBehavioral tests using biased training sets reveal that shortcuts that exploit answer positions and word-label correlations are preferentially learned for extractive and multiple-choice QA, respectively.\nWe find that the more learnable a shortcut is, the flatter and deeper the loss landscape is around the shortcut solution in the parameter space.\nWe also find that the availability of the preferred shortcuts tends to make the task easier to perform from an information-theoretic viewpoint.\nLastly, we experimentally show that the learnability of shortcuts can be utilized to construct an effective QA training set; the more learnable a shortcut is, the smaller the proportion of anti-shortcut examples required to achieve comparable performance on shortcut and anti-shortcut examples.\nWe claim that the learnability of shortcuts should be considered when designing mitigation methods.", + "primary_area": "speech natural language processing", + "author": "Kazutoshi Shinoda; Saku Sugawara; Akiko Aizawa", + "authorids": "", + "aff": "The University of Tokyo + National Institute of Informatics; National Institute of Informatics; The University of Tokyo + National Institute of Informatics", + "bibtex": "@article{Shinoda_Sugawara_Aizawa_2023, title={Which Shortcut Solution Do Question Answering Models Prefer to Learn?}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26590}, DOI={10.1609/aaai.v37i11.26590}, abstractNote={Question answering (QA) models for reading comprehension tend to exploit spurious correlations in training sets and thus learn shortcut solutions rather than the solutions intended by QA datasets.\nQA models that have learned shortcut solutions can achieve human-level performance in shortcut examples where shortcuts are valid, but these same behaviors degrade generalization potential on anti-shortcut examples where shortcuts are invalid.\nVarious methods have been proposed to mitigate this problem, but they do not fully take the characteristics of shortcuts themselves into account.\nWe assume that the learnability of shortcuts, i.e., how easy it is to learn a shortcut, is useful to mitigate the problem.\nThus, we first examine the learnability of the representative shortcuts on extractive and multiple-choice QA datasets.\nBehavioral tests using biased training sets reveal that shortcuts that exploit answer positions and word-label correlations are preferentially learned for extractive and multiple-choice QA, respectively.\nWe find that the more learnable a shortcut is, the flatter and deeper the loss landscape is around the shortcut solution in the parameter space.\nWe also find that the availability of the preferred shortcuts tends to make the task easier to perform from an information-theoretic viewpoint.\nLastly, we experimentally show that the learnability of shortcuts can be utilized to construct an effective QA training set; the more learnable a shortcut is, the smaller the proportion of anti-shortcut examples required to achieve comparable performance on shortcut and anti-shortcut examples.\nWe claim that the learnability of shortcuts should be considered when designing mitigation methods.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shinoda, Kazutoshi and Sugawara, Saku and Aizawa, Akiko}, year={2023}, month={Jun.}, pages={13564-13572} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26590/26362", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26590", + "pdf_size": 10552573, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6734577210010894520&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "is.s.u-tokyo.ac.jp;nii.ac.jp;nii.ac.jp", + "email": "is.s.u-tokyo.ac.jp;nii.ac.jp;nii.ac.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0+1", + "aff_unique_norm": "University of Tokyo;National Institute of Informatics", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.nii.ac.jp/", + "aff_unique_abbr": "UTokyo;NII", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "article-26104", + "title": "Why Capsule Neural Networks Do Not Scale: Challenging the Dynamic Parse-Tree Assumption", + "track": "main", + "status": "Technical", + "abstract": "Capsule neural networks replace simple, scalar-valued neurons with vector-valued capsules. They are motivated by the pattern recognition system in the human brain, where complex objects are decomposed into a hierarchy of simpler object parts. Such a hierarchy is referred to as a parse-tree. Conceptually, capsule neural networks have been defined to mimic this behavior. The capsule neural network (CapsNet), by Sabour, Frosst, and Hinton, is the first actual implementation of the conceptual idea of capsule neural networks. CapsNets achieved state-of-the-art performance on simple image recognition tasks with fewer parameters and greater robustness to affine transformations than comparable approaches. This sparked extensive follow-up research. However, despite major efforts, no work was able to scale the CapsNet architecture to more reasonable-sized datasets. Here, we provide a reason for this failure and argue that it is most likely not possible to scale CapsNets beyond toy examples. In particular, we show that the concept of a parse-tree, the main idea behind capsule neuronal networks, is not present in CapsNets. We also show theoretically and experimentally that CapsNets suffer from a vanishing gradient problem that results in the starvation of many capsules during training.", + "primary_area": "machine learning iii", + "author": "Matthias Mitterreiter; Marcel Koch; Joachim Giesen; S\u00f6ren Laue", + "authorids": "", + "aff": "Friedrich-Schiller-University, Jena, Germany+Data Assessment Solutions GmbH, Hannover, Germany; Ernst Abbe University of Applied Sciences, Jena, Germany; Friedrich-Schiller-University, Jena, Germany; Technical University Kaiserslautern, Germany", + "bibtex": "@article{Mitterreiter_Koch_Giesen_Laue_2023, title={Why Capsule Neural Networks Do Not Scale: Challenging the Dynamic Parse-Tree Assumption}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26104}, DOI={10.1609/aaai.v37i8.26104}, abstractNote={Capsule neural networks replace simple, scalar-valued neurons with vector-valued capsules. They are motivated by the pattern recognition system in the human brain, where complex objects are decomposed into a hierarchy of simpler object parts. Such a hierarchy is referred to as a parse-tree. Conceptually, capsule neural networks have been defined to mimic this behavior. The capsule neural network (CapsNet), by Sabour, Frosst, and Hinton, is the first actual implementation of the conceptual idea of capsule neural networks. CapsNets achieved state-of-the-art performance on simple image recognition tasks with fewer parameters and greater robustness to affine transformations than comparable approaches. This sparked extensive follow-up research. However, despite major efforts, no work was able to scale the CapsNet architecture to more reasonable-sized datasets. Here, we provide a reason for this failure and argue that it is most likely not possible to scale CapsNets beyond toy examples. In particular, we show that the concept of a parse-tree, the main idea behind capsule neuronal networks, is not present in CapsNets. We also show theoretically and experimentally that CapsNets suffer from a vanishing gradient problem that results in the starvation of many capsules during training.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Mitterreiter, Matthias and Koch, Marcel and Giesen, Joachim and Laue, S\u00f6ren}, year={2023}, month={Jun.}, pages={9209-9216} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26104/25876", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26104", + "pdf_size": 564754, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5933462563721831535&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff_domain": "uni-jena.de;eah-jena.de;uni-jena.de;cs.uni-kl.de", + "email": "uni-jena.de;eah-jena.de;uni-jena.de;cs.uni-kl.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;3", + "aff_unique_norm": "Friedrich-Schiller-University Jena;Data Assessment Solutions GmbH;Ernst Abbe University of Applied Sciences;Technical University of Kaiserslautern", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uni-jena.de/;;https://www.eah-jena.de;https://www.tu-kl.de", + "aff_unique_abbr": "FSU Jena;;EAH Jena;TUK", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Jena;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "article-25870", + "title": "Wiener Graph Deconvolutional Network Improves Graph Self-Supervised Learning", + "track": "main", + "status": "Technical", + "abstract": "Graph self-supervised learning (SSL) has been vastly employed to learn representations from unlabeled graphs. Existing methods can be roughly divided into predictive learning and contrastive learning, where the latter one attracts more research attention with better empirical performance. We argue that, however, predictive models weaponed with powerful decoder could achieve comparable or even better representation power than contrastive models. In this work, we propose a Wiener Graph Deconvolutional Network (WGDN), an augmentation-adaptive decoder empowered by graph wiener filter to perform information reconstruction. Theoretical analysis proves the superior reconstruction ability of graph wiener filter. Extensive experimental results on various datasets demonstrate the effectiveness of our approach.", + "primary_area": "machine learning i", + "author": "Jiashun Cheng; Man Li; Jia Li; Fugee Tsung", + "authorids": "", + "aff": "The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology; The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology", + "bibtex": "@article{Cheng_Li_Li_Tsung_2023, title={Wiener Graph Deconvolutional Network Improves Graph Self-Supervised Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25870}, DOI={10.1609/aaai.v37i6.25870}, abstractNote={Graph self-supervised learning (SSL) has been vastly employed to learn representations from unlabeled graphs. Existing methods can be roughly divided into predictive learning and contrastive learning, where the latter one attracts more research attention with better empirical performance. We argue that, however, predictive models weaponed with powerful decoder could achieve comparable or even better representation power than contrastive models. In this work, we propose a Wiener Graph Deconvolutional Network (WGDN), an augmentation-adaptive decoder empowered by graph wiener filter to perform information reconstruction. Theoretical analysis proves the superior reconstruction ability of graph wiener filter. Extensive experimental results on various datasets demonstrate the effectiveness of our approach.}, number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cheng, Jiashun and Li, Man and Li, Jia and Tsung, Fugee}, year={2023}, month={Jun.}, pages={7131-7139} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25870/25642", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25870", + "pdf_size": 656847, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18268510377877337379&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "connect.ust.hk;connect.ust.hk;ust.hk;ust.hk", + "email": "connect.ust.hk;connect.ust.hk;ust.hk;ust.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;1;0+1", + "aff_unique_norm": "The Hong Kong University of Science and Technology;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ust.hk;https://www.ust.hk", + "aff_unique_abbr": "HKUST;HKUST", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-25531", + "title": "Win-Win: A Privacy-Preserving Federated Framework for Dual-Target Cross-Domain Recommendation", + "track": "main", + "status": "Technical", + "abstract": "Cross-domain recommendation (CDR) aims to alleviate the data sparsity by transferring knowledge from an informative source domain to the target domain, which inevitably proposes stern challenges to data privacy and transferability during the transfer process. A small amount of recent CDR works have investigated privacy protection, while they still suffer from satisfying practical requirements (e.g., limited privacy-preserving ability) and preventing the potential risk of negative transfer. To address the above challenging problems, we propose a novel and unified privacy-preserving federated framework for dual-target CDR, namely P2FCDR. We design P2FCDR as peer-to-peer federated network architecture to ensure the local data storage and privacy protection of business partners. Specifically, for the special knowledge transfer process in CDR under federated settings, we initialize an optimizable orthogonal mapping matrix to learn the embedding transformation across domains and adopt the local differential privacy technique on the transformed embedding before exchanging across domains, which provides more reliable privacy protection. Furthermore, we exploit the similarity between in-domain and cross-domain embedding, and develop a gated selecting vector to refine the information fusion for more accurate dual transfer. Extensive experiments on three real-world datasets demonstrate that P2FCDR significantly outperforms the state-of-the-art methods and effectively protects data privacy.", + "primary_area": "data mining and knowledge management", + "author": "Gaode Chen; Xinghua Zhang; Yijun Su; Yantong Lai; Ji Xiang; Junbo Zhang; Yu Zheng", + "authorids": "", + "aff": "1Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+2School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China+3JD iCity, JD Technology, Beijing, China+4JD Intelligent Cities Research, Beijing, China; 1Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+2School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; 3JD iCity, JD Technology, Beijing, China+4JD Intelligent Cities Research, Beijing, China; 1Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+2School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; 1Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+2School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; 3JD iCity, JD Technology, Beijing, China+4JD Intelligent Cities Research, Beijing, China; 3JD iCity, JD Technology, Beijing, China+4JD Intelligent Cities Research, Beijing, China", + "bibtex": "@article{Chen_Zhang_Su_Lai_Xiang_Zhang_Zheng_2023, title={Win-Win: A Privacy-Preserving Federated Framework for Dual-Target Cross-Domain Recommendation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25531}, DOI={10.1609/aaai.v37i4.25531}, abstractNote={Cross-domain recommendation (CDR) aims to alleviate the data sparsity by transferring knowledge from an informative source domain to the target domain, which inevitably proposes stern challenges to data privacy and transferability during the transfer process. A small amount of recent CDR works have investigated privacy protection, while they still suffer from satisfying practical requirements (e.g., limited privacy-preserving ability) and preventing the potential risk of negative transfer. To address the above challenging problems, we propose a novel and unified privacy-preserving federated framework for dual-target CDR, namely P2FCDR. We design P2FCDR as peer-to-peer federated network architecture to ensure the local data storage and privacy protection of business partners. Specifically, for the special knowledge transfer process in CDR under federated settings, we initialize an optimizable orthogonal mapping matrix to learn the embedding transformation across domains and adopt the local differential privacy technique on the transformed embedding before exchanging across domains, which provides more reliable privacy protection. Furthermore, we exploit the similarity between in-domain and cross-domain embedding, and develop a gated selecting vector to refine the information fusion for more accurate dual transfer. Extensive experiments on three real-world datasets demonstrate that P2FCDR significantly outperforms the state-of-the-art methods and effectively protects data privacy.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Gaode and Zhang, Xinghua and Su, Yijun and Lai, Yantong and Xiang, Ji and Zhang, Junbo and Zheng, Yu}, year={2023}, month={Jun.}, pages={4149-4156} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25531/25303", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25531", + "pdf_size": 275220, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16990607099646833695&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;outlook.com;outlook.com", + "email": "iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;outlook.com;outlook.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2+3;0+1;2+3;0+1;0+1;2+3;2+3", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;JD Technology;4JD Intelligent Cities Research", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;;", + "aff_unique_abbr": "CAS;UCAS;;", + "aff_campus_unique_index": "0+0;0+0;;0+0;0+0;;", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0+0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26671", + "title": "Winning the CityLearn Challenge: Adaptive Optimization with Evolutionary Search under Trajectory-Based Guidance", + "track": "aaai special track", + "status": "Technical", + "abstract": "Modern power systems will have to face difficult challenges in the years to come: frequent blackouts in urban areas caused by high peaks of electricity demand, grid instability exacerbated by the intermittency of renewable generation, and climate change on a global scale amplified by increasing carbon emissions. While current practices are growingly inadequate, the pathway of artificial intelligence (AI)-based methods to widespread adoption is hindered by missing aspects of trustworthiness. The CityLearn Challenge is an exemplary opportunity for researchers from multi-disciplinary fields to investigate the potential of AI to tackle these pressing issues within the energy domain, collectively modeled as a reinforcement learning (RL) task. Multiple real-world challenges faced by contemporary RL techniques are embodied in the problem formulation. In this paper, we present a novel method using the solution function of optimization as policies to compute the actions for sequential decision-making, while notably adapting the parameters of the optimization model from online observations. Algorithmically, this is achieved by an evolutionary algorithm under a novel trajectory-based guidance scheme. Formally, the global convergence property is established. Our agent ranked first in the latest 2021 CityLearn Challenge, being able to achieve superior performance in almost all metrics while maintaining some key aspects of interpretability.", + "primary_area": "ai for social impact", + "author": "Vanshaj Khattar; Ming Jin", + "authorids": "", + "aff": "Department of Electrical and Computer Engineering, Virginia Tech; Department of Electrical and Computer Engineering, Virginia Tech", + "bibtex": "@article{Khattar_Jin_2023, title={Winning the CityLearn Challenge: Adaptive Optimization with Evolutionary Search under Trajectory-Based Guidance}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26671}, DOI={10.1609/aaai.v37i12.26671}, abstractNote={Modern power systems will have to face difficult challenges in the years to come: frequent blackouts in urban areas caused by high peaks of electricity demand, grid instability exacerbated by the intermittency of renewable generation, and climate change on a global scale amplified by increasing carbon emissions. While current practices are growingly inadequate, the pathway of artificial intelligence (AI)-based methods to widespread adoption is hindered by missing aspects of trustworthiness. The CityLearn Challenge is an exemplary opportunity for researchers from multi-disciplinary fields to investigate the potential of AI to tackle these pressing issues within the energy domain, collectively modeled as a reinforcement learning (RL) task. Multiple real-world challenges faced by contemporary RL techniques are embodied in the problem formulation. In this paper, we present a novel method using the solution function of optimization as policies to compute the actions for sequential decision-making, while notably adapting the parameters of the optimization model from online observations. Algorithmically, this is achieved by an evolutionary algorithm under a novel trajectory-based guidance scheme. Formally, the global convergence property is established. Our agent ranked first in the latest 2021 CityLearn Challenge, being able to achieve superior performance in almost all metrics while maintaining some key aspects of interpretability.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Khattar, Vanshaj and Jin, Ming}, year={2023}, month={Jun.}, pages={14286-14294} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26671/26443", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26671", + "pdf_size": 539938, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15701161309784337663&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff_domain": "vt.edu;vt.edu", + "email": "vt.edu;vt.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Virginia Tech", + "aff_unique_dep": "Department of Electrical and Computer Engineering", + "aff_unique_url": "https://www.vt.edu", + "aff_unique_abbr": "VT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25963", + "title": "XClusters: Explainability-First Clustering", + "track": "main", + "status": "Technical", + "abstract": "We study the problem of explainability-first clustering where explainability becomes a first-class citizen for clustering. Previous clustering approaches use decision trees for explanation, but only after the clustering is completed. In contrast, our approach is to perform clustering and decision tree training holistically where the decision tree's performance and size also influence the clustering results. We assume the attributes for clustering and explaining are distinct, although this is not necessary. We observe that our problem is a monotonic optimization where the objective function is a difference of monotonic functions. We then propose an efficient branch-and-bound algorithm for finding the best parameters that lead to a balance of clustering accuracy and decision tree explainability. Our experiments show that our method can improve the explainability of any clustering that fits in our framework.", + "primary_area": "machine learning ii", + "author": "Hyunseung Hwang; Steven Euijong Whang", + "authorids": "", + "aff": "KAIST; KAIST", + "bibtex": "@article{Hwang_Whang_2023, title={XClusters: Explainability-First Clustering}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25963}, DOI={10.1609/aaai.v37i7.25963}, abstractNote={We study the problem of explainability-first clustering where explainability becomes a first-class citizen for clustering. Previous clustering approaches use decision trees for explanation, but only after the clustering is completed. In contrast, our approach is to perform clustering and decision tree training holistically where the decision tree\u2019s performance and size also influence the clustering results. We assume the attributes for clustering and explaining are distinct, although this is not necessary. We observe that our problem is a monotonic optimization where the objective function is a difference of monotonic functions. We then propose an efficient branch-and-bound algorithm for finding the best parameters that lead to a balance of clustering accuracy and decision tree explainability. Our experiments show that our method can improve the explainability of any clustering that fits in our framework.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hwang, Hyunseung and Whang, Steven Euijong}, year={2023}, month={Jun.}, pages={7962-7970} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25963/25735", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25963", + "pdf_size": 2469261, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17855930753613061947&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff_domain": "kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "article-26401", + "title": "XRand: Differentially Private Defense against Explanation-Guided Attacks", + "track": "main", + "status": "Technical", + "abstract": "Recent development in the field of explainable artificial intelligence (XAI) has helped improve trust in Machine-Learning-as-a-Service (MLaaS) systems, in which an explanation is provided together with the model prediction in response to each query. However, XAI also opens a door for adversaries to gain insights into the black-box models in MLaaS, thereby making the models more vulnerable to several attacks. For example, feature-based explanations (e.g., SHAP) could expose the top important features that a black-box model focuses on. Such disclosure has been exploited to craft effective backdoor triggers against malware classifiers. To address this trade-off, we introduce a new concept of achieving local differential privacy (LDP) in the explanations, and from that we establish a defense, called XRand, against such attacks. We show that our mechanism restricts the information that the adversary can learn about the top important features, while maintaining the faithfulness of the explanations.", + "primary_area": "philosophy and ethics of ai", + "author": "Truc Nguyen; Phung Lai; Hai Phan; My T. Thai", + "authorids": "", + "aff": "University of Florida; New Jersey Institute of Technology; New Jersey Institute of Technology; University of Florida", + "bibtex": "@article{Nguyen_Lai_Phan_Thai_2023, title={XRand: Differentially Private Defense against Explanation-Guided Attacks}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26401}, DOI={10.1609/aaai.v37i10.26401}, abstractNote={Recent development in the field of explainable artificial intelligence (XAI) has helped improve trust in Machine-Learning-as-a-Service (MLaaS) systems, in which an explanation is provided together with the model prediction in response to each query. However, XAI also opens a door for adversaries to gain insights into the black-box models in MLaaS, thereby making the models more vulnerable to several attacks. For example, feature-based explanations (e.g., SHAP) could expose the top important features that a black-box model focuses on. Such disclosure has been exploited to craft effective backdoor triggers against malware classifiers. To address this trade-off, we introduce a new concept of achieving local differential privacy (LDP) in the explanations, and from that we establish a defense, called XRand, against such attacks. We show that our mechanism restricts the information that the adversary can learn about the top important features, while maintaining the faithfulness of the explanations.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Nguyen, Truc and Lai, Phung and Phan, Hai and Thai, My T.}, year={2023}, month={Jun.}, pages={11873-11881} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26401/26173", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26401", + "pdf_size": 530966, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7802634909426855117&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff_domain": "ufl.edu;njit.edu;njit.edu;cise.ufl.edu", + "email": "ufl.edu;njit.edu;njit.edu;cise.ufl.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "University of Florida;New Jersey Institute of Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ufl.edu;https://www.njit.edu", + "aff_unique_abbr": "UF;NJIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26871", + "title": "Xaitk-Saliency: An Open Source Explainable AI Toolkit for Saliency", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Advances in artificial intelligence (AI) using techniques such as deep learning have fueled the recent progress in fields such as computer vision. However, these algorithms are still often viewed as \"black boxes\", which cannot easily explain how they arrived at their final output decisions. Saliency maps are one commonly used form of explainable AI (XAI), which indicate the input features an algorithm paid attention to during its decision process. Here, we introduce the open source xaitk-saliency package, an XAI framework and toolkit for saliency. We demonstrate its modular and flexible nature by highlighting two example use cases for saliency maps: (1) object detection model comparison and (2) doppelganger saliency for person re-identification. We also show how the xaitk-saliency package can be paired with visualization tools to support the interactive exploration of saliency maps. Our results suggest that saliency maps may play a critical role in the verification and validation of AI models, ensuring their trusted use and deployment. The code is publicly available at: https://github.com/xaitk/xaitk-saliency.", + "primary_area": "innovative tools for enabling ai application", + "author": "Brian Hu; Paul Tunison; Brandon RichardWebster; Anthony Hoogs", + "authorids": "", + "aff": "Kitware, Inc.; Kitware, Inc.; Kitware, Inc.; Kitware, Inc.", + "bibtex": "@article{Hu_Tunison_RichardWebster_Hoogs_2024, title={Xaitk-Saliency: An Open Source Explainable AI Toolkit for Saliency}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26871}, DOI={10.1609/aaai.v37i13.26871}, abstractNote={Advances in artificial intelligence (AI) using techniques such as deep learning have fueled the recent progress in fields such as computer vision. However, these algorithms are still often viewed as "black boxes", which cannot easily explain how they arrived at their final output decisions. Saliency maps are one commonly used form of explainable AI (XAI), which indicate the input features an algorithm paid attention to during its decision process. Here, we introduce the open source xaitk-saliency package, an XAI framework and toolkit for saliency. We demonstrate its modular and flexible nature by highlighting two example use cases for saliency maps: (1) object detection model comparison and (2) doppelganger saliency for person re-identification. We also show how the xaitk-saliency package can be paired with visualization tools to support the interactive exploration of saliency maps. Our results suggest that saliency maps may play a critical role in the verification and validation of AI models, ensuring their trusted use and deployment. The code is publicly available at: https://github.com/xaitk/xaitk-saliency.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Brian and Tunison, Paul and RichardWebster, Brandon and Hoogs, Anthony}, year={2024}, month={Jul.}, pages={15760-15766} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26871/26643", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26871", + "pdf_size": 9766631, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10700928883480664493&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "kitware.com;kitware.com;kitware.com;kitware.com", + "email": "kitware.com;kitware.com;kitware.com;kitware.com", + "github": "https://github.com/xaitk/xaitk-saliency", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Kitware, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kitware.com", + "aff_unique_abbr": "Kitware", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25320", + "title": "YOLOV: Making Still Image Object Detectors Great at Video Object Detection", + "track": "main", + "status": "Technical", + "abstract": "Video object detection (VID) is challenging because of the high variation of object appearance as well as the diverse deterioration in some frames. On the positive side, the detection in a certain frame of a video, compared with that in a still image, can draw support from other frames. Hence, how to aggregate features across different frames is pivotal to VID problem. Most of existing aggregation algorithms are customized for two-stage detectors. However, these detectors are usually computationally expensive due to their two-stage nature. This work proposes a simple yet effective strategy to address the above concerns, which costs marginal overheads with significant gains in accuracy. Concretely, different from traditional two-stage pipeline, we select important regions after the one-stage detection to avoid processing massive low-quality candidates. Besides, we evaluate the relationship between a target frame and reference frames to guide the aggregation. We conduct extensive experiments and ablation studies to verify the efficacy of our design, and reveal its superiority over other state-of-the-art VID approaches in both effectiveness and efficiency. Our YOLOX-based model can achieve promising performance (e.g., 87.5% AP50 at over 30 FPS on the ImageNet VID dataset on a single 2080Ti GPU), making it attractive for large-scale or real-time applications. The implementation is simple, we have made the demo codes and models available at https://github.com/YuHengsss/YOLOV.", + "primary_area": "computer vision ii", + "author": "Yuheng Shi; Naiyan Wang; Xiaojie Guo", + "authorids": "", + "aff": "Tianjin University; TuSimple; Tianjin University", + "bibtex": "@article{Shi_Wang_Guo_2023, title={YOLOV: Making Still Image Object Detectors Great at Video Object Detection}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25320}, DOI={10.1609/aaai.v37i2.25320}, abstractNote={Video object detection (VID) is challenging because of the high variation of object appearance as well as the diverse deterioration in some frames. On the positive side, the detection in a certain frame of a video, compared with that in a still image, can draw support from other frames. Hence, how to aggregate features across different frames is pivotal to VID problem. Most of existing aggregation algorithms are customized for two-stage detectors. However, these detectors are usually computationally expensive due to their two-stage nature. This work proposes a simple yet effective strategy to address the above concerns, which costs marginal overheads with significant gains in accuracy. Concretely, different from traditional two-stage pipeline, we select important regions after the one-stage detection to avoid processing massive low-quality candidates. Besides, we evaluate the relationship between a target frame and reference frames to guide the aggregation. We conduct extensive experiments and ablation studies to verify the efficacy of our design, and reveal its superiority over other state-of-the-art VID approaches in both effectiveness and efficiency. Our YOLOX-based model can achieve promising performance (e.g., 87.5% AP50 at over 30 FPS on the ImageNet VID dataset on a single 2080Ti GPU), making it attractive for large-scale or real-time applications. The implementation is simple, we have made the demo codes and models available at https://github.com/YuHengsss/YOLOV.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Shi, Yuheng and Wang, Naiyan and Guo, Xiaojie}, year={2023}, month={Jun.}, pages={2254-2262} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25320/25092", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25320", + "pdf_size": 495494, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14786933666955212549&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff_domain": "tju.edu.cn;gmail.com;gmail.com", + "email": "tju.edu.cn;gmail.com;gmail.com", + "github": "https://github.com/YuHengsss/YOLOV", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Tianjin University;TuSimple", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.tju.edu.cn;https://www.tusimple.com", + "aff_unique_abbr": "TJU;TuSimple", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "article-25674", + "title": "Yet Another Traffic Classifier: A Masked Autoencoder Based Traffic Transformer with Multi-Level Flow Representation", + "track": "main", + "status": "Technical", + "abstract": "Traffic classification is a critical task in network security and management. Recent research has demonstrated the effectiveness of the deep learning-based traffic classification method. However,\u00a0the following limitations remain: (1) the traffic representation is simply generated from raw packet bytes, resulting in the absence of important information; (2) the model structure of directly applying deep learning algorithms does not take traffic characteristics into account; and (3) scenario-specific classifier training usually requires a labor-intensive and time-consuming process to label data. In this paper, we introduce a masked autoencoder (MAE) based traffic transformer with multi-level flow representation to tackle these problems. To model raw traffic data, we design\u00a0a formatted traffic representation matrix with hierarchical flow information. After that, we develop\u00a0an efficient Traffic Transformer, in which packet-level and flow-level attention mechanisms implement more efficient feature extraction with lower complexity. At last, we utilize the MAE paradigm to pre-train our classifier with a large amount of unlabeled data, and perform fine-tuning with a few labeled data for a series of traffic classification tasks. Experiment findings reveal that our method outperforms state-of-the-art methods on five real-world traffic datasets by a large margin. The code is available at https://github.com/NSSL-SJTU/YaTC.", + "primary_area": "domain s of application", + "author": "Ruijie Zhao; Mingwei Zhan; Xianwen Deng; Yanhao Wang; Yijun Wang; Guan Gui; Zhi Xue", + "authorids": "", + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; QI-ANXIN; Shanghai Jiao Tong University; NJUPT; Shanghai Jiao Tong University", + "bibtex": "@article{Zhao_Zhan_Deng_Wang_Wang_Gui_Xue_2023, title={Yet Another Traffic Classifier: A Masked Autoencoder Based Traffic Transformer with Multi-Level Flow Representation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25674}, DOI={10.1609/aaai.v37i4.25674}, abstractNote={Traffic classification is a critical task in network security and management. Recent research has demonstrated the effectiveness of the deep learning-based traffic classification method. However,\u00a0the following limitations remain: (1) the traffic representation is simply generated from raw packet bytes, resulting in the absence of important information; (2) the model structure of directly applying deep learning algorithms does not take traffic characteristics into account; and (3) scenario-specific classifier training usually requires a labor-intensive and time-consuming process to label data. In this paper, we introduce a masked autoencoder (MAE) based traffic transformer with multi-level flow representation to tackle these problems. To model raw traffic data, we design\u00a0a formatted traffic representation matrix with hierarchical flow information. After that, we develop\u00a0an efficient Traffic Transformer, in which packet-level and flow-level attention mechanisms implement more efficient feature extraction with lower complexity. At last, we utilize the MAE paradigm to pre-train our classifier with a large amount of unlabeled data, and perform fine-tuning with a few labeled data for a series of traffic classification tasks. Experiment findings reveal that our method outperforms state-of-the-art methods on five real-world traffic datasets by a large margin. The code is available at https://github.com/NSSL-SJTU/YaTC.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Ruijie and Zhan, Mingwei and Deng, Xianwen and Wang, Yanhao and Wang, Yijun and Gui, Guan and Xue, Zhi}, year={2023}, month={Jun.}, pages={5420-5427} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25674/25446", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25674", + "pdf_size": 666283, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3153649153307625927&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com;sjtu.edu.cn;njupt.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;gmail.com;sjtu.edu.cn;njupt.edu.cn;sjtu.edu.cn", + "github": "https://github.com/NSSL-SJTU/YaTC", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;2;0", + "aff_unique_norm": "Shanghai Jiao Tong University;QI-ANXIN;Nanjing University of Posts and Telecommunications", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.sjtu.edu.cn;;https://www.njupt.edu.cn", + "aff_unique_abbr": "SJTU;;NJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26243", + "title": "Zero-Cost Operation Scoring in Differentiable Architecture Search", + "track": "main", + "status": "Technical", + "abstract": "We formalize and analyze a fundamental component of dif-\nferentiable neural architecture search (NAS): local \u201copera-\ntion scoring\u201d at each operation choice. We view existing\noperation scoring functions as inexact proxies for accuracy,\nand we find that they perform poorly when analyzed empir-\nically on NAS benchmarks. From this perspective, we intro-\nduce a novel perturbation-based zero-cost operation scor-\ning (Zero-Cost-PT) approach, which utilizes zero-cost prox-\nies that were recently studied in multi-trial NAS but de-\ngrade significantly on larger search spaces, typical for dif-\nferentiable NAS. We conduct a thorough empirical evalu-\nation on a number of NAS benchmarks and large search\nspaces, from NAS-Bench-201, NAS-Bench-1Shot1, NAS-\nBench-Macro, to DARTS-like and MobileNet-like spaces,\nshowing significant improvements in both search time and\naccuracy. On the ImageNet classification task on the DARTS\nsearch space, our approach improved accuracy compared to\nthe best current training-free methods (TE-NAS) while be-\ning over 10\u00d7 faster (total searching time 25 minutes on a\nsingle GPU), and observed significantly better transferabil-\nity on architectures searched on the CIFAR-10 dataset with\nan accuracy increase of 1.8 pp. Our code is available at:\nhttps://github.com/zerocostptnas/zerocost operation score.", + "primary_area": "machine learning iv", + "author": "Lichuan Xiang; Lukasz Dudziak; Mohamed S. Abdelfattah; Thomas Chau; Nicholas D. Lane; Hongkai Wen", + "authorids": "", + "aff": "University of Warwick; Samsung AI Center Cambridge; Cornell University; Samsung AI Center Cambridge; Samsung AI Center Cambridge + University of Cambridge; University of Warwick + Samsung AI Center Cambridge", + "bibtex": "@article{Xiang_Dudziak_Abdelfattah_Chau_Lane_Wen_2023, title={Zero-Cost Operation Scoring in Differentiable Architecture Search}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26243}, DOI={10.1609/aaai.v37i9.26243}, abstractNote={We formalize and analyze a fundamental component of dif-\nferentiable neural architecture search (NAS): local \u201copera-\ntion scoring\u201d at each operation choice. We view existing\noperation scoring functions as inexact proxies for accuracy,\nand we find that they perform poorly when analyzed empir-\nically on NAS benchmarks. From this perspective, we intro-\nduce a novel perturbation-based zero-cost operation scor-\ning (Zero-Cost-PT) approach, which utilizes zero-cost prox-\nies that were recently studied in multi-trial NAS but de-\ngrade significantly on larger search spaces, typical for dif-\nferentiable NAS. We conduct a thorough empirical evalu-\nation on a number of NAS benchmarks and large search\nspaces, from NAS-Bench-201, NAS-Bench-1Shot1, NAS-\nBench-Macro, to DARTS-like and MobileNet-like spaces,\nshowing significant improvements in both search time and\naccuracy. On the ImageNet classification task on the DARTS\nsearch space, our approach improved accuracy compared to\nthe best current training-free methods (TE-NAS) while be-\ning over 10\u00d7 faster (total searching time 25 minutes on a\nsingle GPU), and observed significantly better transferabil-\nity on architectures searched on the CIFAR-10 dataset with\nan accuracy increase of 1.8 pp. Our code is available at:\nhttps://github.com/zerocostptnas/zerocost operation score.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiang, Lichuan and Dudziak, Lukasz and Abdelfattah, Mohamed S. and Chau, Thomas and Lane, Nicholas D. and Wen, Hongkai}, year={2023}, month={Jun.}, pages={10453-10463} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26243/26015", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26243", + "pdf_size": 1260110, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13670892211175721729&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff_domain": "warwick.ac.uk;samsung.com;cornell.edu;samsung.com;samsung.com;warwick.ac.uk", + "email": "warwick.ac.uk;samsung.com;cornell.edu;samsung.com;samsung.com;warwick.ac.uk", + "github": "https://github.com/zerocostptnas/zerocost operation score", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;1+3;0+1", + "aff_unique_norm": "University of Warwick;Samsung AI Center;Cornell University;University of Cambridge", + "aff_unique_dep": ";AI Center;;", + "aff_unique_url": "https://www.warwick.ac.uk;https://www.samsung.com/global/research-innovation/ai-research-centers/samsung-ai-center-cambridge/;https://www.cornell.edu;https://www.cam.ac.uk", + "aff_unique_abbr": "Warwick;SAC Cambridge;Cornell;Cambridge", + "aff_campus_unique_index": "1;1;1+1;1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;0;1;0;0+0;0+0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "article-26410", + "title": "Zero-Knowledge Proofs for Classical Planning Problems", + "track": "main", + "status": "Technical", + "abstract": "In classical planning, the aim is to find a sequence of deterministic actions leading from the initial to a goal state. In this work, we consider the scenario where a party who knows the solution to a planning task, called the prover, wants to convince a second party, the verifier, that it has the solution without revealing any information about the solution itself. This is relevant in domains where privacy is important, for example when plans contain sensitive information or when the solution should not be revealed upfront. We achieve this by introducing a zero-knowledge protocol for plan existence. By restricting ourselves to tasks with polynomially-bounded plan length, we are able to construct a protocol that can be run efficiently by both the prover and verifier. The resulting protocol does not rely on any reduction, has a constant number of rounds, and runs in time polynomial in the size of the task.", + "primary_area": "planning routing and scheduling", + "author": "Augusto B. Corr\u00eaa; Clemens B\u00fcchner; Remo Christen", + "authorids": "", + "aff": "University of Basel, Switzerland; University of Basel, Switzerland; University of Basel, Switzerland", + "bibtex": "@article{Corr\u00eaa_B\u00fcchner_Christen_2023, title={Zero-Knowledge Proofs for Classical Planning Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26410}, DOI={10.1609/aaai.v37i10.26410}, abstractNote={In classical planning, the aim is to find a sequence of deterministic actions leading from the initial to a goal state. In this work, we consider the scenario where a party who knows the solution to a planning task, called the prover, wants to convince a second party, the verifier, that it has the solution without revealing any information about the solution itself. This is relevant in domains where privacy is important, for example when plans contain sensitive information or when the solution should not be revealed upfront. We achieve this by introducing a zero-knowledge protocol for plan existence. By restricting ourselves to tasks with polynomially-bounded plan length, we are able to construct a protocol that can be run efficiently by both the prover and verifier. The resulting protocol does not rely on any reduction, has a constant number of rounds, and runs in time polynomial in the size of the task.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Corr\u00eaa, Augusto B. and B\u00fcchner, Clemens and Christen, Remo}, year={2023}, month={Jun.}, pages={11955-11962} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26410/26182", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26410", + "pdf_size": 145401, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1220984912497186703&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "unibas.ch;unibas.ch;unibas.ch", + "email": "unibas.ch;unibas.ch;unibas.ch", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Basel", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unibas.ch", + "aff_unique_abbr": "UniBas", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "article-26365", + "title": "Zero-Shot Assistance in Sequential Decision Problems", + "track": "main", + "status": "Technical", + "abstract": "We consider the problem of creating assistants that can help agents solve new sequential decision problems, assuming the agent is not able to specify the reward function explicitly to the assistant. Instead of acting in place of the agent as in current automation-based approaches, we give the assistant an advisory role and keep the agent in the loop as the main decision maker. The difficulty is that we must account for potential biases of the agent which may cause it to seemingly irrationally reject advice. To do this we introduce a novel formalization of assistance that models these biases, allowing the assistant to infer and adapt to them. We then introduce a new method for planning the assistant's actions which can scale to large decision making problems. We show experimentally that our approach adapts to these agent biases, and results in higher cumulative reward for the agent than automation-based alternatives. Lastly, we show that an approach combining advice and automation outperforms advice alone at the cost of losing some safety guarantees.", + "primary_area": "multiagent systems", + "author": "Sebastiaan De Peuter; Samuel Kaski", + "authorids": "", + "aff": "Department of Computer Science, Aalto University, Espoo, Finland + Department of Computer Science, University of Manchester, Manchester, UK; Department of Computer Science, Aalto University, Espoo, Finland + Department of Computer Science, University of Manchester, Manchester, UK", + "bibtex": "@article{De Peuter_Kaski_2023, title={Zero-Shot Assistance in Sequential Decision Problems}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26365}, DOI={10.1609/aaai.v37i10.26365}, abstractNote={We consider the problem of creating assistants that can help agents solve new sequential decision problems, assuming the agent is not able to specify the reward function explicitly to the assistant. Instead of acting in place of the agent as in current automation-based approaches, we give the assistant an advisory role and keep the agent in the loop as the main decision maker. The difficulty is that we must account for potential biases of the agent which may cause it to seemingly irrationally reject advice. To do this we introduce a novel formalization of assistance that models these biases, allowing the assistant to infer and adapt to them. We then introduce a new method for planning the assistant\u2019s actions which can scale to large decision making problems. We show experimentally that our approach adapts to these agent biases, and results in higher cumulative reward for the agent than automation-based alternatives. Lastly, we show that an approach combining advice and automation outperforms advice alone at the cost of losing some safety guarantees.}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={De Peuter, Sebastiaan and Kaski, Samuel}, year={2023}, month={Jun.}, pages={11551-11559} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26365/26137", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26365", + "pdf_size": 200288, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14898019001288519718&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "aalto.fi;aalto.fi", + "email": "aalto.fi;aalto.fi", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "Aalto University;University of Manchester", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.aalto.fi;https://www.manchester.ac.uk", + "aff_unique_abbr": "Aalto;UoM", + "aff_campus_unique_index": "0+1;0+1", + "aff_campus_unique": "Espoo;Manchester", + "aff_country_unique_index": "0+1;0+1", + "aff_country_unique": "Finland;United Kingdom" + }, + { + "id": "article-26482", + "title": "Zero-Shot Cross-Lingual Event Argument Extraction with Language-Oriented Prefix-Tuning", + "track": "main", + "status": "Technical", + "abstract": "Event argument extraction (EAE) aims to identify the arguments of a given event, and classify the roles that those arguments play. Due to high data demands of training EAE models, zero-shot cross-lingual EAE has attracted increasing attention, as it greatly reduces human annotation effort. Some prior works indicate that generation-based methods have achieved promising performance for monolingual EAE. However, when applying existing generation-based methods to zero-shot cross-lingual EAE, we find two critical challenges, including Language Discrepancy and Template Construction. In this paper, we propose a novel method termed as Language-oriented Prefix-tuning Network (LAPIN) to address the above challenges. Specifically, we devise a Language-oriented Prefix Generator module to handle the discrepancies between source and target languages. Moreover, we leverage a Language-agnostic Template Constructor module to design templates that can be adapted to any language. Extensive experiments demonstrate that our proposed method achieves the best performance, outperforming the previous state-of-the-art model by 4.8% and 2.3% of the average F1-score on two multilingual EAE datasets.", + "primary_area": "speech natural language processing", + "author": "Pengfei Cao; Zhuoran Jin; Yubo Chen; Kang Liu; Jun Zhao", + "authorids": "", + "aff": "National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Beijing Academy of Artificial Intelligence, Beijing, 100084, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China", + "bibtex": "@article{Cao_Jin_Chen_Liu_Zhao_2023, title={Zero-Shot Cross-Lingual Event Argument Extraction with Language-Oriented Prefix-Tuning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26482}, DOI={10.1609/aaai.v37i11.26482}, abstractNote={Event argument extraction (EAE) aims to identify the arguments of a given event, and classify the roles that those arguments play. Due to high data demands of training EAE models, zero-shot cross-lingual EAE has attracted increasing attention, as it greatly reduces human annotation effort. Some prior works indicate that generation-based methods have achieved promising performance for monolingual EAE. However, when applying existing generation-based methods to zero-shot cross-lingual EAE, we find two critical challenges, including Language Discrepancy and Template Construction. In this paper, we propose a novel method termed as Language-oriented Prefix-tuning Network (LAPIN) to address the above challenges. Specifically, we devise a Language-oriented Prefix Generator module to handle the discrepancies between source and target languages. Moreover, we leverage a Language-agnostic Template Constructor module to design templates that can be adapted to any language. Extensive experiments demonstrate that our proposed method achieves the best performance, outperforming the previous state-of-the-art model by 4.8% and 2.3% of the average F1-score on two multilingual EAE datasets.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cao, Pengfei and Jin, Zhuoran and Chen, Yubo and Liu, Kang and Zhao, Jun}, year={2023}, month={Jun.}, pages={12589-12597} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26482/26254", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26482", + "pdf_size": 404598, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9805880324747217875&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+1+2;0+1", + "aff_unique_norm": "National Laboratory of Pattern Recognition;University of Chinese Academy of Sciences;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;", + "aff_unique_url": ";http://www.ucas.ac.cn;https://www.baaic.cn", + "aff_unique_abbr": ";UCAS;BAAI", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26607", + "title": "Zero-Shot Face-Based Voice Conversion: Bottleneck-Free Speech Disentanglement in the Real-World Scenario", + "track": "main", + "status": "Technical", + "abstract": "Often a face has a voice. Appearance sometimes has a strong relationship with one's voice. In this work, we study how a face can be converted to a voice, which is a face-based voice conversion. Since there is no clean dataset that contains face and speech, voice conversion faces difficult learning and low-quality problems caused by background noise or echo. Too much redundant information for face-to-voice also causes synthesis of a general style of speech. Furthermore, previous work tried to disentangle speech with bottleneck adjustment. However, it is hard to decide on the size of the bottleneck. Therefore, we propose a bottleneck-free strategy for speech disentanglement. To avoid synthesizing the general style of speech, we utilize framewise facial embedding. It applied adversarial learning with a multi-scale discriminator for the model to achieve better quality. In addition, the self-attention module is added to focus on content-related features for in-the-wild data. Quantitative experiments show that our method outperforms previous work.", + "primary_area": "speech natural language processing", + "author": "Shao-En Weng; Hong-Han Shuai; Wen-Huang Cheng", + "authorids": "", + "aff": "National Yang Ming Chiao Tung University; National Yang Ming Chiao Tung University; National Yang Ming Chiao Tung University", + "bibtex": "@article{Weng_Shuai_Cheng_2023, title={Zero-Shot Face-Based Voice Conversion: Bottleneck-Free Speech Disentanglement in the Real-World Scenario}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26607}, DOI={10.1609/aaai.v37i11.26607}, abstractNote={Often a face has a voice. Appearance sometimes has a strong relationship with one\u2019s voice. In this work, we study how a face can be converted to a voice, which is a face-based voice conversion. Since there is no clean dataset that contains face and speech, voice conversion faces difficult learning and low-quality problems caused by background noise or echo. Too much redundant information for face-to-voice also causes synthesis of a general style of speech. Furthermore, previous work tried to disentangle speech with bottleneck adjustment. However, it is hard to decide on the size of the bottleneck. Therefore, we propose a bottleneck-free strategy for speech disentanglement. To avoid synthesizing the general style of speech, we utilize framewise facial embedding. It applied adversarial learning with a multi-scale discriminator for the model to achieve better quality. In addition, the self-attention module is added to focus on content-related features for in-the-wild data. Quantitative experiments show that our method outperforms previous work.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Weng, Shao-En and Shuai, Hong-Han and Cheng, Wen-Huang}, year={2023}, month={Jun.}, pages={13718-13726} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26607/26379", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26607", + "pdf_size": 5413290, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=620528419964003703&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 3, + "aff_domain": "nycu.edu.tw;nycu.edu.tw;nycu.edu.tw", + "email": "nycu.edu.tw;nycu.edu.tw;nycu.edu.tw", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National Yang Ming Chiao Tung University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nycu.edu.tw", + "aff_unique_abbr": "NYCU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "article-25080", + "title": "Zero-Shot Linear Combinations of Grounded Social Interactions with Linear Social MDPs", + "track": "main", + "status": "Technical", + "abstract": "Humans and animals engage in rich social interactions. It is often theorized that a relatively small number of basic social interactions give rise to the full range of behavior observed. But no computational theory explaining how social interactions combine together has been proposed before. We do so here. We take a model, the Social MDP, which is able to express a range of social interactions, and extend it to represent linear combinations of social interactions. Practically for robotics applications, such models are now able to not just express that an agent should help another agent, but to express goal-centric social interactions. Perhaps an agent is helping someone get dressed, but preventing them from falling, and is happy to exchange stories in the meantime. How an agent responds socially, should depend on what it thinks the other agent is doing at that point in time. To encode this notion, we take linear combinations of social interactions as defined in Social MDPs, and compute the weights on those combinations on the fly depending on the estimated goals of other agents. This new model, the Linear Social MDP, enables zero-shot reasoning about complex social interactions, provides a mathematical basis for the long-standing intuition that social interactions should compose, and leads to interesting new behaviors that we validate using human observers. Complex social interactions are part of the future of intelligent agents, and having principled mathematical models built on a foundation like MDPs will make it possible to bring social interactions to every robotic application.", + "primary_area": "cognitive modeling cognitive systems", + "author": "Ravi Tejwani; Yen-Ling Kuo; Tianmin Shu; Bennett Stankovits; Dan Gutfreund; Joshua B. Tenenbaum; Boris Katz; Andrei Barbu", + "authorids": "", + "aff": "CSAIL & CBMM, MIT; CSAIL & CBMM, MIT; BCS & CBMM, MIT; CSAIL & CBMM, MIT; MIT-IBM Watson AI Lab; BCS & CBMM, MIT; CSAIL & CBMM, MIT; CSAIL & CBMM, MIT", + "bibtex": "@article{Tejwani_Kuo_Shu_Stankovits_Gutfreund_Tenenbaum_Katz_Barbu_2023, title={Zero-Shot Linear Combinations of Grounded Social Interactions with Linear Social MDPs}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25080}, DOI={10.1609/aaai.v37i1.25080}, abstractNote={Humans and animals engage in rich social interactions. It is often theorized that a relatively small number of basic social interactions give rise to the full range of behavior observed. But no computational theory explaining how social interactions combine together has been proposed before. We do so here. We take a model, the Social MDP, which is able to express a range of social interactions, and extend it to represent linear combinations of social interactions. Practically for robotics applications, such models are now able to not just express that an agent should help another agent, but to express goal-centric social interactions. Perhaps an agent is helping someone get dressed, but preventing them from falling, and is happy to exchange stories in the meantime. How an agent responds socially, should depend on what it thinks the other agent is doing at that point in time. To encode this notion, we take linear combinations of social interactions as defined in Social MDPs, and compute the weights on those combinations on the fly depending on the estimated goals of other agents. This new model, the Linear Social MDP, enables zero-shot reasoning about complex social interactions, provides a mathematical basis for the long-standing intuition that social interactions should compose, and leads to interesting new behaviors that we validate using human observers. Complex social interactions are part of the future of intelligent agents, and having principled mathematical models built on a foundation like MDPs will make it possible to bring social interactions to every robotic application.}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tejwani, Ravi and Kuo, Yen-Ling and Shu, Tianmin and Stankovits, Bennett and Gutfreund, Dan and Tenenbaum, Joshua B. and Katz, Boris and Barbu, Andrei}, year={2023}, month={Jun.}, pages={94-101} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25080/24852", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25080", + "pdf_size": 2780655, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9764027514170678427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff_domain": "mit.edu;mit.edu;mit.edu;mit.edu;us.ibm.com;mit.edu;mit.edu;mit.edu", + "email": "mit.edu;mit.edu;mit.edu;mit.edu;us.ibm.com;mit.edu;mit.edu;mit.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory & Center for Brains, Minds, and Machines", + "aff_unique_url": "https://www.csail.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-25651", + "title": "Zero-Shot Rumor Detection with Propagation Structure via Prompt Learning", + "track": "main", + "status": "Technical", + "abstract": "The spread of rumors along with breaking events seriously hinders the truth in the era of social media. Previous studies reveal that due to the lack of annotated resources, rumors presented in minority languages are hard to be detected. Furthermore, the unforeseen breaking events not involved in yesterday's news exacerbate the scarcity of data resources. In this work, we propose a novel zero-shot framework based on prompt learning to detect rumors falling in different domains or presented in different languages. More specifically, we firstly represent rumor circulated on social media as diverse propagation threads, then design a hierarchical prompt encoding mechanism to learn language-agnostic contextual representations for both prompts and rumor data. To further enhance domain adaptation, we model the domain-invariant structural features from the propagation threads, to incorporate structural position representations of influential community response. In addition, a new virtual response augmentation method is used to improve model training. Extensive experiments conducted on three real-world datasets demonstrate that our proposed model achieves much better performance than state-of-the-art methods and exhibits a superior capacity for detecting rumors at early stages.", + "primary_area": "domain s of application", + "author": "Hongzhan Lin; Pengyao Yi; Jing Ma; Haiyun Jiang; Ziyang Luo; Shuming Shi; Ruifang Liu", + "authorids": "", + "aff": "Hong Kong Baptist University; Beijing University of Posts and Telecommunications; Fudan University; Tsinghua University; Hong Kong Baptist University; Tsinghua University; Beijing University of Posts and Telecommunications", + "bibtex": "@article{Lin_Yi_Ma_Jiang_Luo_Shi_Liu_2023, title={Zero-Shot Rumor Detection with Propagation Structure via Prompt Learning}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25651}, DOI={10.1609/aaai.v37i4.25651}, abstractNote={The spread of rumors along with breaking events seriously hinders the truth in the era of social media. Previous studies reveal that due to the lack of annotated resources, rumors presented in minority languages are hard to be detected. Furthermore, the unforeseen breaking events not involved in yesterday\u2019s news exacerbate the scarcity of data resources. In this work, we propose a novel zero-shot framework based on prompt learning to detect rumors falling in different domains or presented in different languages. More specifically, we firstly represent rumor circulated on social media as diverse propagation threads, then design a hierarchical prompt encoding mechanism to learn language-agnostic contextual representations for both prompts and rumor data. To further enhance domain adaptation, we model the domain-invariant structural features from the propagation threads, to incorporate structural position representations of influential community response. In addition, a new virtual response augmentation method is used to improve model training. Extensive experiments conducted on three real-world datasets demonstrate that our proposed model achieves much better performance than state-of-the-art methods and exhibits a superior capacity for detecting rumors at early stages.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lin, Hongzhan and Yi, Pengyao and Ma, Jing and Jiang, Haiyun and Luo, Ziyang and Shi, Shuming and Liu, Ruifang}, year={2023}, month={Jun.}, pages={5213-5221} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/25651/25423", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/25651", + "pdf_size": 436766, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6504804085140678914&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff_domain": "comp.hkbu.edu.hk;bupt.edu.cn;comp.hkbu.edu.hk;fudan.edu.cn;comp.hkbu.edu.hk;hotmail.com;bupt.edu.cn", + "email": "comp.hkbu.edu.hk;bupt.edu.cn;comp.hkbu.edu.hk;fudan.edu.cn;comp.hkbu.edu.hk;hotmail.com;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;0;3;1", + "aff_unique_norm": "Hong Kong Baptist University;Beijing University of Posts and Telecommunications;Fudan University;Tsinghua University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.hkbu.edu.hk;http://www.bupt.edu.cn/;https://www.fudan.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "HKBU;BUPT;Fudan;THU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "article-26566", + "title": "Zero-Shot Slot Filling with Slot-Prefix Prompting and Attention Relationship Descriptor", + "track": "main", + "status": "Technical", + "abstract": "This paper addresses zero-shot slot filling, which tries to build a system that can generalize to unseen slot types without any training data. The key to zero-shot slot-filling is to match the tokens from the utterance with the semantic definition of the slot without training data in the target domain. This paper tackles this problem by devising a scheme to fully leverage pre-trained language models (PLMs). To this end, we propose a new prompting scheme that utilizes both learnable tokens and slot names to guide the model to focus on the relevant text spans for a given slot. Furthermore, we use attention values between tokens to form a feature descriptor for each token, which is motivated by the fact that the attention value in a PLM naturally characterizes various relationships, e.g., syntactic or semantic, between tokens. By further consolidating those features with an additional transformer-based aggregation module, we create a simple-but-effective zero-shot slot filling system that can achieve significantly better performance than the previous methods, as demonstrated by our experimental studies.", + "primary_area": "speech natural language processing", + "author": "Qiaoyang Luo; Lingqiao Liu", + "authorids": "", + "aff": "The University of Adelaide; The University of Adelaide", + "bibtex": "@article{Luo_Liu_2023, title={Zero-Shot Slot Filling with Slot-Prefix Prompting and Attention Relationship Descriptor}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26566}, DOI={10.1609/aaai.v37i11.26566}, abstractNote={This paper addresses zero-shot slot filling, which tries to build a system that can generalize to unseen slot types without any training data. The key to zero-shot slot-filling is to match the tokens from the utterance with the semantic definition of the slot without training data in the target domain. This paper tackles this problem by devising a scheme to fully leverage pre-trained language models (PLMs). To this end, we propose a new prompting scheme that utilizes both learnable tokens and slot names to guide the model to focus on the relevant text spans for a given slot. Furthermore, we use attention values between tokens to form a feature descriptor for each token, which is motivated by the fact that the attention value in a PLM naturally characterizes various relationships, e.g., syntactic or semantic, between tokens. By further consolidating those features with an additional transformer-based aggregation module, we create a simple-but-effective zero-shot slot filling system that can achieve significantly better performance than the previous methods, as demonstrated by our experimental studies.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Luo, Qiaoyang and Liu, Lingqiao}, year={2023}, month={Jun.}, pages={13344-13352} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26566/26338", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26566", + "pdf_size": 254753, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11710869742608609211&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff_domain": "adelaide.edu.au;adelaide.edu.au", + "email": "adelaide.edu.au;adelaide.edu.au", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Adelaide", + "aff_unique_dep": "", + "aff_unique_url": "https://www.adelaide.edu.au", + "aff_unique_abbr": "Adelaide", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "article-26964", + "title": "eCDANs: Efficient Temporal Causal Discovery from Autocorrelated and Non-stationary Data (Student Abstract)", + "track": "aaai student abstract and poster program", + "status": "Technical", + "abstract": "Conventional temporal causal discovery (CD) methods suffer from high dimensionality, fail to identify lagged causal relationships, and often ignore dynamics in relations. In this study, we present a novel constraint-based CD approach for autocorrelated and non-stationary time series data (eCDANs) capable of detecting lagged and contemporaneous causal relationships along with temporal changes. eCDANs addresses high dimensionality by optimizing the conditioning sets while conducting conditional independence (CI) tests and identifies the changes in causal relations by introducing a surrogate variable to represent time dependency. Experiments on synthetic and real-world data show that eCDANs can identify time influence and outperform the baselines.", + "primary_area": "", + "author": "Muhammad Hasan Ferdous; Uzma Hasan; Md Osman Gani", + "authorids": "", + "aff": "Causal AI Lab, Department of Information Systems, University of Maryland, Baltimore County; Causal AI Lab, Department of Information Systems, University of Maryland, Baltimore County; Causal AI Lab, Department of Information Systems, University of Maryland, Baltimore County", + "bibtex": "@article{Ferdous_Hasan_Gani_2024, title={eCDANs: Efficient Temporal Causal Discovery from Autocorrelated and Non-stationary Data (Student Abstract)}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26964}, DOI={10.1609/aaai.v37i13.26964}, abstractNote={Conventional temporal causal discovery (CD) methods suffer from high dimensionality, fail to identify lagged causal relationships, and often ignore dynamics in relations. In this study, we present a novel constraint-based CD approach for autocorrelated and non-stationary time series data (eCDANs) capable of detecting lagged and contemporaneous causal relationships along with temporal changes. eCDANs addresses high dimensionality by optimizing the conditioning sets while conducting conditional independence (CI) tests and identifies the changes in causal relations by introducing a surrogate variable to represent time dependency. Experiments on synthetic and real-world data show that eCDANs can identify time influence and outperform the baselines.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ferdous, Muhammad Hasan and Hasan, Uzma and Gani, Md Osman}, year={2024}, month={Jul.}, pages={16208-16209} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26964/26736", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26964", + "pdf_size": 133913, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8629744643036794512&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff_domain": "umbc.edu;umbc.edu;umbc.edu", + "email": "umbc.edu;umbc.edu;umbc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Maryland, Baltimore County", + "aff_unique_dep": "Department of Information Systems", + "aff_unique_url": "https://www.umbc.edu", + "aff_unique_abbr": "UMBC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Baltimore County", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26853", + "title": "eForecaster: Unifying Electricity Forecasting with Robust, Flexible, and Explainable Machine Learning Algorithms", + "track": "iaai technical track", + "status": "Technical", + "abstract": "Electricity forecasting is crucial in scheduling and planning of future electric load, so as to improve the reliability and safeness of the power grid. Despite recent developments of forecasting algorithms in the machine learning community, there is a lack of general and advanced algorithms specifically considering requirements from the power industry perspective. In this paper, we present eForecaster, a unified AI platform including robust, flexible, and explainable machine learning algorithms for diversified electricity forecasting applications. Since Oct. 2021, multiple commercial bus load, system load, and renewable energy forecasting systems built upon eForecaster have been deployed in seven provinces of China. The deployed systems consistently reduce the average Mean Absolute Error (MAE) by 39.8% to 77.0%, with reduced manual work and explainable guidance. In particular, eForecaster also integrates multiple interpretation methods to uncover the working mechanism of the predictive models, which significantly improves forecasts adoption and user satisfaction.", + "primary_area": "deployed highly innovative applications of ai", + "author": "Zhaoyang Zhu; Weiqi Chen; Rui Xia; Tian Zhou; Peisong Niu; Bingqing Peng; Wenwei Wang; Hengbo Liu; Ziqing Ma; Qingsong Wen; Liang Sun", + "authorids": "", + "aff": "DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Hangzhou, China; DAMO Academy, Alibaba Group, Bellevue, WA, USA; DAMO Academy, Alibaba Group, Bellevue, WA, USA", + "bibtex": "@article{Zhu_Chen_Xia_Zhou_Niu_Peng_Wang_Liu_Ma_Wen_Sun_2024, title={eForecaster: Unifying Electricity Forecasting with Robust, Flexible, and Explainable Machine Learning Algorithms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26853}, DOI={10.1609/aaai.v37i13.26853}, abstractNote={Electricity forecasting is crucial in scheduling and planning of future electric load, so as to improve the reliability and safeness of the power grid. Despite recent developments of forecasting algorithms in the machine learning community, there is a lack of general and advanced algorithms specifically considering requirements from the power industry perspective. In this paper, we present eForecaster, a unified AI platform including robust, flexible, and explainable machine learning algorithms for diversified electricity forecasting applications. Since Oct. 2021, multiple commercial bus load, system load, and renewable energy forecasting systems built upon eForecaster have been deployed in seven provinces of China. The deployed systems consistently reduce the average Mean Absolute Error (MAE) by 39.8% to 77.0%, with reduced manual work and explainable guidance. In particular, eForecaster also integrates multiple interpretation methods to uncover the working mechanism of the predictive models, which significantly improves forecasts adoption and user satisfaction.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhu, Zhaoyang and Chen, Weiqi and Xia, Rui and Zhou, Tian and Niu, Peisong and Peng, Bingqing and Wang, Wenwei and Liu, Hengbo and Ma, Ziqing and Wen, Qingsong and Sun, Liang}, year={2024}, month={Jul.}, pages={15630-15638} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26853/26625", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26853", + "pdf_size": 661885, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16223293955440090266&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "DAMO Academy", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;1;1", + "aff_campus_unique": "Hangzhou;Bellevue", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "article-27066", + "title": "edBB-Demo: Biometrics and Behavior Analysis for Online Educational Platforms", + "track": "demonstrations", + "status": "Technical", + "abstract": "We present edBB-Demo, a demonstrator of an AI-powered research platform for student monitoring in remote education. The edBB platform aims to study the challenges associated to user recognition and behavior understanding in digital platforms. This platform has been developed for data collection, acquiring signals from a variety of sensors including keyboard, mouse, webcam, microphone, smartwatch, and an Electroencephalography band. The information captured from the sensors during the student sessions is modelled in a multimodal learning framework. The demonstrator includes: i) Biometric user authentication in an unsupervised environment; ii) Human action recognition based on remote video analysis; iii) Heart rate estimation from webcam video; and iv) Attention level estimation from facial expression analysis.", + "primary_area": "", + "author": "Roberto Daza; Aythami Morales; Ruben Tolosana; Luis F. Gomez; Julian Fierrez; Javier Ortega-Garcia", + "authorids": "", + "aff": "School of Engineering, Autonomous University of Madrid; School of Engineering, Autonomous University of Madrid; School of Engineering, Autonomous University of Madrid; School of Engineering, Autonomous University of Madrid; School of Engineering, Autonomous University of Madrid; School of Engineering, Autonomous University of Madrid", + "bibtex": "@article{Daza_Morales_Tolosana_Gomez_Fierrez_Ortega-Garcia_2024, title={edBB-Demo: Biometrics and Behavior Analysis for Online Educational Platforms}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27066}, DOI={10.1609/aaai.v37i13.27066}, abstractNote={We present edBB-Demo, a demonstrator of an AI-powered research platform for student monitoring in remote education. The edBB platform aims to study the challenges associated to user recognition and behavior understanding in digital platforms. This platform has been developed for data collection, acquiring signals from a variety of sensors including keyboard, mouse, webcam, microphone, smartwatch, and an Electroencephalography band. The information captured from the sensors during the student sessions is modelled in a multimodal learning framework. The demonstrator includes: i) Biometric user authentication in an unsupervised environment; ii) Human action recognition based on remote video analysis; iii) Heart rate estimation from webcam video; and iv) Attention level estimation from facial expression analysis.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Daza, Roberto and Morales, Aythami and Tolosana, Ruben and Gomez, Luis F. and Fierrez, Julian and Ortega-Garcia, Javier}, year={2024}, month={Jul.}, pages={16422-16424} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27066/26838", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27066", + "pdf_size": 314794, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12965951469028923575&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 7, + "aff_domain": "uam.es;uam.es;uam.es;uam.es;uam.es;uam.es", + "email": "uam.es;uam.es;uam.es;uam.es;uam.es;uam.es", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Autonomous University of Madrid", + "aff_unique_dep": "School of Engineering", + "aff_unique_url": "https://www.uam.es", + "aff_unique_abbr": "UAM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "article-26605", + "title": "fmLRE: A Low-Resource Relation Extraction Model Based on Feature Mapping Similarity Calculation", + "track": "main", + "status": "Technical", + "abstract": "Low-resource relation extraction (LRE) aims to extract relations from limited labeled corpora. Existing work takes advantages of self-training or distant supervision to expand the limited labeled data in the data-driven approaches, while the selection bias of pseudo labels may cause the error accumulation in subsequent relation classification. To address this issue, this paper proposes fmLRE, an iterative feedback method based on feature mapping similarity calculation to improve the accuracy of pseudo labels. First, it calculates the similarities between pseudo-label and real-label data of the same category in a feature mapping space based on semantic features of labeled dataset after feature projection. Then, it fine-tunes initial model according to the iterative process of reinforcement learning. Finally, the similarity is used as a threshold for screening high-precision pseudo-labels and the basis for setting different rewards, which also acts as a penalty term for the loss function of relation classifier. Experimental results demonstrate that fmLRE achieves the state-of-the-art performance compared with strong baselines on two public datasets.", + "primary_area": "speech natural language processing", + "author": "Peng Wang; Tong Shao; Ke Ji; Guozheng Li; Wenjun Ke", + "authorids": "", + "aff": "Southeast University; Southeast University; Southeast University; Southeast University; Southeast University+Beijing Institute of Computer Technology and Application", + "bibtex": "@article{Wang_Shao_Ji_Li_Ke_2023, title={fmLRE: A Low-Resource Relation Extraction Model Based on Feature Mapping Similarity Calculation}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26605}, DOI={10.1609/aaai.v37i11.26605}, abstractNote={Low-resource relation extraction (LRE) aims to extract relations from limited labeled corpora. Existing work takes advantages of self-training or distant supervision to expand the limited labeled data in the data-driven approaches, while the selection bias of pseudo labels may cause the error accumulation in subsequent relation classification. To address this issue, this paper proposes fmLRE, an iterative feedback method based on feature mapping similarity calculation to improve the accuracy of pseudo labels. First, it calculates the similarities between pseudo-label and real-label data of the same category in a feature mapping space based on semantic features of labeled dataset after feature projection. Then, it fine-tunes initial model according to the iterative process of reinforcement learning. Finally, the similarity is used as a threshold for screening high-precision pseudo-labels and the basis for setting different rewards, which also acts as a penalty term for the loss function of relation classifier. Experimental results demonstrate that fmLRE achieves the state-of-the-art performance compared with strong baselines on two public datasets.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Peng and Shao, Tong and Ji, Ke and Li, Guozheng and Ke, Wenjun}, year={2023}, month={Jun.}, pages={13700-13708} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26605/26377", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26605", + "pdf_size": 298230, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13073347327072811136&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff_domain": "seu.edu.cn;stu.hit.edu.cn;seu.edu.cn;seu.edu.cn;163.com", + "email": "seu.edu.cn;stu.hit.edu.cn;seu.edu.cn;seu.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "Southeast University;Beijing Institute of Computer Technology and Application", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.seu.edu.cn/;", + "aff_unique_abbr": "SEU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "article-26290", + "title": "i-Code: An Integrative and Composable Multimodal Learning Framework", + "track": "main", + "status": "Technical", + "abstract": "Human intelligence is multimodal; we integrate visual, linguistic, and acoustic signals to maintain a holistic worldview. Most current pretraining methods, however, are limited to one or two modalities. We present i-Code, a self-supervised pretraining framework where users may flexibly combine the modalities of vision, speech, and language into unified and general-purpose vector representations. In this framework, data from each modality are first given to pretrained single-modality encoders. The encoder outputs are then integrated with a multimodal fusion network, which uses novel merge- and co-attention mechanisms to effectively combine information from the different modalities. The entire system is pretrained end-to-end with new objectives including masked modality unit modeling and cross-modality contrastive learning. Unlike previous research using only video for pretraining, the i-Code framework can dynamically process single, dual, and triple-modality data during training and inference, flexibly projecting different combinations of modalities into a single representation space. Experimental results demonstrate how i-Code can outperform state-of-the-art techniques on five multimodal understanding tasks and single-modality benchmarks, improving by as much as 11% and demonstrating the power of integrative multimodal pretraining.", + "primary_area": "machine learning iv", + "author": "Ziyi Yang; Yuwei Fang; Chenguang Zhu; Reid Pryzant; DongDong Chen; Yu Shi; Yichong Xu; Yao Qian; Mei Gao; Yi-Ling Chen; Liyang Lu; Yujia Xie; Robert Gmyr; Noel Codella; Naoyuki Kanda; Bin Xiao; Lu Yuan; Takuya Yoshioka; Michael Zeng; Xuedong Huang", + "authorids": "", + "aff": "Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research", + "bibtex": "@article{Yang_Fang_Zhu_Pryzant_Chen_Shi_Xu_Qian_Gao_Chen_Lu_Xie_Gmyr_Codella_Kanda_Xiao_Yuan_Yoshioka_Zeng_Huang_2023, title={i-Code: An Integrative and Composable Multimodal Learning Framework}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26290}, DOI={10.1609/aaai.v37i9.26290}, abstractNote={Human intelligence is multimodal; we integrate visual, linguistic, and acoustic signals to maintain a holistic worldview. Most current pretraining methods, however, are limited to one or two modalities. We present i-Code, a self-supervised pretraining framework where users may flexibly combine the modalities of vision, speech, and language into unified and general-purpose vector representations. In this framework, data from each modality are first given to pretrained single-modality encoders. The encoder outputs are then integrated with a multimodal fusion network, which uses novel merge- and co-attention mechanisms to effectively combine information from the different modalities. The entire system is pretrained end-to-end with new objectives including masked modality unit modeling and cross-modality contrastive learning. Unlike previous research using only video for pretraining, the i-Code framework can dynamically process single, dual, and triple-modality data during training and inference, flexibly projecting different combinations of modalities into a single representation space. Experimental results demonstrate how i-Code can outperform state-of-the-art techniques on five multimodal understanding tasks and single-modality benchmarks, improving by as much as 11% and demonstrating the power of integrative multimodal pretraining.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Ziyi and Fang, Yuwei and Zhu, Chenguang and Pryzant, Reid and Chen, DongDong and Shi, Yu and Xu, Yichong and Qian, Yao and Gao, Mei and Chen, Yi-Ling and Lu, Liyang and Xie, Yujia and Gmyr, Robert and Codella, Noel and Kanda, Naoyuki and Xiao, Bin and Yuan, Lu and Yoshioka, Takuya and Zeng, Michael and Huang, Xuedong}, year={2023}, month={Jun.}, pages={10880-10890} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26290/26062", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26290", + "pdf_size": 291927, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6634247564406147298&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff_domain": "microsoft.com;microsoft.com;microsoft.com; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ", + "email": "microsoft.com;microsoft.com;microsoft.com; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 20, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Microsoft", + "aff_unique_dep": "Azure Cognitive Services Research", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-27082", + "title": "nBIIG: A Neural BI Insights Generation System for Table Reporting", + "track": "demonstrations", + "status": "Technical", + "abstract": "We present nBIIG, a neural Business Intelligence (BI) Insights Generation system. Given a table, our system applies various analyses to create corresponding RDF representations, and then uses a neural model to generate fluent textual insights out of these representations. The generated insights can be used by an analyst, via a human-in-the-loop paradigm, to enhance the task of creating compelling table reports. The underlying generative neural model is trained over large and carefully distilled data, curated from multiple BI domains. Thus, the system can generate faithful and fluent insights over open-domain tables, making it practical and useful.", + "primary_area": "", + "author": "Yotam Perlitz; Dafna Sheinwald; Noam Slonim; Michal Shmueli-Scheuer", + "authorids": "", + "aff": "IBM Research; IBM Research; IBM Research; IBM Research", + "bibtex": "@article{Perlitz_Sheinwald_Slonim_Shmueli-Scheuer_2024, title={nBIIG: A Neural BI Insights Generation System for Table Reporting}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/27082}, DOI={10.1609/aaai.v37i13.27082}, abstractNote={We present nBIIG, a neural Business Intelligence (BI) Insights Generation system. Given a table, our system applies various analyses to create corresponding RDF representations, and then uses a neural model to generate fluent textual insights out of these representations. The generated insights can be used by an analyst, via a human-in-the-loop paradigm, to enhance the task of creating compelling table reports. The underlying generative neural model is trained over large and carefully distilled data, curated from multiple BI domains. Thus, the system can generate faithful and fluent insights over open-domain tables, making it practical and useful.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Perlitz, Yotam and Sheinwald, Dafna and Slonim, Noam and Shmueli-Scheuer, Michal}, year={2024}, month={Jul.}, pages={16470-16472} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/27082/26854", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/27082", + "pdf_size": 478788, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4311230777124272695&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "ibm.com;il.ibm.com;il.ibm.com;il.ibm.com", + "email": "ibm.com;il.ibm.com;il.ibm.com;il.ibm.com", + "github": "", + "project": "https://ibm.biz/BIIG VIDEO; https://ibm.biz/nBIIG dataset", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26902", + "title": "\u201cHow Can I Code A.I. Responsibly?\u201d: The Effect of Computational Action on K-12 Students Learning and Creating Socially Responsible A.I.", + "track": "eaai symposium resources for teaching ai in k 12", + "status": "Technical", + "abstract": "Teaching young people about artificial intelligence (A.I.) is recognized globally as an important education effort by organizations and programs such as UNICEF, OECD, Elements of A.I., and AI4K12. A common theme among K-12 A.I. education programs is teaching how A.I. can impact society in both positive and negative ways. We present an effective tool that teaches young people about the societal impact of A.I. that goes one step further: empowering K-12 students to use tools and frameworks to create socially responsible A.I. The computational action process is a curriculum and toolkit that gives students the lessons and tools to evaluate positive and negative impacts of A.I. and consider how they can create beneficial solutions that involve A.I. and computing technology. In a human-subject research study, 101 U.S. and international students between ages 9 and 18 participated in a one-day workshop to learn and practice the computational action process. Pre-post questionnaires measured on the Likert scale students\u2019 perception of A.I. in society and students' desire to use A.I. in their projects. Analysis of the results shows that students who identified as female agreed more strongly with having a concern about the impacts of A.I. than those who identified as male. Students also wrote open-ended responses to questions about what socially responsible technology means to them pre- and post-study. Analysis shows that post-intervention, students were more aware of ethical considerations and what tools they can use to code A.I. responsibly. In addition, students engaged actively with tools in the computational action toolkit, specifically the novel impact matrix, to describe the positive and negative impacts of A.I. technologies like facial recognition. Students demonstrated breadth and depth of discussion of various A.I. technologies' far-reaching positive and negative impacts. These promising results indicate that the computational action process can be a helpful addition to A.I. education programs in furnishing tools for students to analyze the effects of A.I. on society and plan how they can create and use socially responsible A.I.", + "primary_area": "", + "author": "H. Nicole Pang; Robert Parks; Cynthia Breazeal; Hal Abelson", + "authorids": "", + "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", + "bibtex": "@article{Pang_Parks_Breazeal_Abelson_2024, title={\u201cHow Can I Code A.I. Responsibly?\u201d: The Effect of Computational Action on K-12 Students Learning and Creating Socially Responsible A.I.}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26902}, DOI={10.1609/aaai.v37i13.26902}, abstractNote={Teaching young people about artificial intelligence (A.I.) is recognized globally as an important education effort by organizations and programs such as UNICEF, OECD, Elements of A.I., and AI4K12. A common theme among K-12 A.I. education programs is teaching how A.I. can impact society in both positive and negative ways. We present an effective tool that teaches young people about the societal impact of A.I. that goes one step further: empowering K-12 students to use tools and frameworks to create socially responsible A.I. The computational action process is a curriculum and toolkit that gives students the lessons and tools to evaluate positive and negative impacts of A.I. and consider how they can create beneficial solutions that involve A.I. and computing technology. In a human-subject research study, 101 U.S. and international students between ages 9 and 18 participated in a one-day workshop to learn and practice the computational action process. Pre-post questionnaires measured on the Likert scale students\u2019 perception of A.I. in society and students\u2019 desire to use A.I. in their projects. Analysis of the results shows that students who identified as female agreed more strongly with having a concern about the impacts of A.I. than those who identified as male. Students also wrote open-ended responses to questions about what socially responsible technology means to them pre- and post-study. Analysis shows that post-intervention, students were more aware of ethical considerations and what tools they can use to code A.I. responsibly. In addition, students engaged actively with tools in the computational action toolkit, specifically the novel impact matrix, to describe the positive and negative impacts of A.I. technologies like facial recognition. Students demonstrated breadth and depth of discussion of various A.I. technologies\u2019 far-reaching positive and negative impacts. These promising results indicate that the computational action process can be a helpful addition to A.I. education programs in furnishing tools for students to analyze the effects of A.I. on society and plan how they can create and use socially responsible A.I.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pang, H. Nicole and Parks, Robert and Breazeal, Cynthia and Abelson, Hal}, year={2024}, month={Jul.}, pages={16017-16024} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26902/26674", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26902", + "pdf_size": 1146717, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12261125980576491239&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff_domain": "alum.mit.edu;mit.edu;media.mit.edu;mit.edu", + "email": "alum.mit.edu;mit.edu;media.mit.edu;mit.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://web.mit.edu", + "aff_unique_abbr": "MIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "article-26665", + "title": "\u201cNothing Abnormal\u201d: Disambiguating Medical Reports via Contrastive Knowledge Infusion", + "track": "aaai special track", + "status": "Technical", + "abstract": "Sharing medical reports is essential for patient-centered care. A recent line of work has focused on automatically generating reports with NLP methods. However, different audiences have different purposes when writing/reading medical reports \u2013 for example, healthcare professionals care more about pathology, whereas patients are more concerned with the diagnosis (\"Is there any abnormality?\"). The expectation gap results in a common situation where patients find their medical reports to be ambiguous and therefore unsure about the next steps. In this work, we explore the audience expectation gap in healthcare and summarize common ambiguities that lead patients to be confused about their diagnosis into three categories: medical jargon, contradictory findings, and misleading grammatical errors. Based on our analysis, we define a disambiguation rewriting task to regenerate an input to be unambiguous while preserving information about the original content. We further propose a rewriting algorithm based on contrastive pretraining and perturbation-based rewriting. In addition, we create two datasets, OpenI-Annotated based on chest reports and VA-Annotated based on general medical reports, with available binary labels for ambiguity and abnormality presence annotated by radiology specialists. Experimental results on these datasets show that our proposed algorithm effectively rewrites input sentences in a less ambiguous way with high content fidelity. Our code and annotated data will be released to facilitate future research.", + "primary_area": "ai for social impact", + "author": "Zexue He; An Yan; Amilcare Gentili; Julian McAuley; Chun-Nan Hsu", + "authorids": "", + "aff": "University of California, San Diego, La Jolla, CA; University of California, San Diego, La Jolla, CA; University of California, San Diego, La Jolla, CA + V A San Diego Healthcare System, San Diego, CA; University of California, San Diego, La Jolla, CA; University of California, San Diego, La Jolla, CA + V A San Diego Healthcare System, San Diego, CA + V A National AI Institute, Washington, DC", + "bibtex": "@article{He_Yan_Gentili_McAuley_Hsu_2023, title={\u201cNothing Abnormal\u201d: Disambiguating Medical Reports via Contrastive Knowledge Infusion}, volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/26665}, DOI={10.1609/aaai.v37i12.26665}, abstractNote={Sharing medical reports is essential for patient-centered care. A recent line of work has focused on automatically generating reports with NLP methods. However, different audiences have different purposes when writing/reading medical reports \u2013 for example, healthcare professionals care more about pathology, whereas patients are more concerned with the diagnosis ("Is there any abnormality?"). The expectation gap results in a common situation where patients find their medical reports to be ambiguous and therefore unsure about the next steps. In this work, we explore the audience expectation gap in healthcare and summarize common ambiguities that lead patients to be confused about their diagnosis into three categories: medical jargon, contradictory findings, and misleading grammatical errors. Based on our analysis, we define a disambiguation rewriting task to regenerate an input to be unambiguous while preserving information about the original content. We further propose a rewriting algorithm based on contrastive pretraining and perturbation-based rewriting. In addition, we create two datasets, OpenI-Annotated based on chest reports and VA-Annotated based on general medical reports, with available binary labels for ambiguity and abnormality presence annotated by radiology specialists. Experimental results on these datasets show that our proposed algorithm effectively rewrites input sentences in a less ambiguous way with high content fidelity. Our code and annotated data will be released to facilitate future research.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={He, Zexue and Yan, An and Gentili, Amilcare and McAuley, Julian and Hsu, Chun-Nan}, year={2023}, month={Jun.}, pages={14232-14240} }", + "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/26665/26437", + "site": "https://ojs.aaai.org/index.php/AAAI/article/view/26665", + "pdf_size": 354783, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2796777392011541474&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff_domain": "eng.ucsd.edu;eng.ucsd.edu;va.gov;eng.ucsd.edu;ucsd.edu", + "email": "eng.ucsd.edu;eng.ucsd.edu;va.gov;eng.ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;0;0+1+2", + "aff_unique_norm": "University of California, San Diego;VA San Diego Healthcare System;V A National AI Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsd.edu;https://www.sandiego.va.gov;", + "aff_unique_abbr": "UCSD;VASDHS;", + "aff_campus_unique_index": "0;0;0+1;0;0+1+2", + "aff_campus_unique": "La Jolla;San Diego;Washington, DC", + "aff_country_unique_index": "0;0;0+0;0;0+0+0", + "aff_country_unique": "United States" + } +] \ No newline at end of file